Lines Matching refs:r1_bio
132 struct r1bio *r1_bio; in r1buf_pool_alloc() local
138 r1_bio = r1bio_pool_alloc(gfp_flags, pi); in r1buf_pool_alloc()
139 if (!r1_bio) in r1buf_pool_alloc()
154 r1_bio->bios[j] = bio; in r1buf_pool_alloc()
169 bio = r1_bio->bios[j]; in r1buf_pool_alloc()
179 rp->raid_bio = r1_bio; in r1buf_pool_alloc()
183 r1_bio->master_bio = NULL; in r1buf_pool_alloc()
185 return r1_bio; in r1buf_pool_alloc()
193 bio_put(r1_bio->bios[j]); in r1buf_pool_alloc()
197 rbio_pool_free(r1_bio, data); in r1buf_pool_alloc()
220 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
225 struct bio **bio = r1_bio->bios + i; in put_all_bios()
232 static void free_r1bio(struct r1bio *r1_bio) in free_r1bio() argument
234 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
236 put_all_bios(conf, r1_bio); in free_r1bio()
237 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
240 static void put_buf(struct r1bio *r1_bio) in put_buf() argument
242 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
243 sector_t sect = r1_bio->sector; in put_buf()
247 struct bio *bio = r1_bio->bios[i]; in put_buf()
249 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
252 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
257 static void reschedule_retry(struct r1bio *r1_bio) in reschedule_retry() argument
260 struct mddev *mddev = r1_bio->mddev; in reschedule_retry()
264 idx = sector_to_idx(r1_bio->sector); in reschedule_retry()
266 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
279 static void call_bio_endio(struct r1bio *r1_bio) in call_bio_endio() argument
281 struct bio *bio = r1_bio->master_bio; in call_bio_endio()
282 struct r1conf *conf = r1_bio->mddev->private; in call_bio_endio()
284 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in call_bio_endio()
292 allow_barrier(conf, r1_bio->sector); in call_bio_endio()
295 static void raid_end_bio_io(struct r1bio *r1_bio) in raid_end_bio_io() argument
297 struct bio *bio = r1_bio->master_bio; in raid_end_bio_io()
300 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid_end_bio_io()
306 call_bio_endio(r1_bio); in raid_end_bio_io()
308 free_r1bio(r1_bio); in raid_end_bio_io()
314 static inline void update_head_pos(int disk, struct r1bio *r1_bio) in update_head_pos() argument
316 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
319 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
325 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) in find_bio_disk() argument
328 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
332 if (r1_bio->bios[mirror] == bio) in find_bio_disk()
336 update_head_pos(mirror, r1_bio); in find_bio_disk()
344 struct r1bio *r1_bio = bio->bi_private; in raid1_end_read_request() local
345 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
346 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
351 update_head_pos(r1_bio->read_disk, r1_bio); in raid1_end_read_request()
354 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_read_request()
356 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_end_read_request()
367 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
368 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
375 raid_end_bio_io(r1_bio); in raid1_end_read_request()
385 (unsigned long long)r1_bio->sector); in raid1_end_read_request()
386 set_bit(R1BIO_ReadError, &r1_bio->state); in raid1_end_read_request()
387 reschedule_retry(r1_bio); in raid1_end_read_request()
392 static void close_write(struct r1bio *r1_bio) in close_write() argument
395 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in close_write()
396 bio_free_pages(r1_bio->behind_master_bio); in close_write()
397 bio_put(r1_bio->behind_master_bio); in close_write()
398 r1_bio->behind_master_bio = NULL; in close_write()
401 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
402 r1_bio->sectors, in close_write()
403 !test_bit(R1BIO_Degraded, &r1_bio->state), in close_write()
404 test_bit(R1BIO_BehindIO, &r1_bio->state)); in close_write()
405 md_write_end(r1_bio->mddev); in close_write()
408 static void r1_bio_write_done(struct r1bio *r1_bio) in r1_bio_write_done() argument
410 if (!atomic_dec_and_test(&r1_bio->remaining)) in r1_bio_write_done()
413 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in r1_bio_write_done()
414 reschedule_retry(r1_bio); in r1_bio_write_done()
416 close_write(r1_bio); in r1_bio_write_done()
417 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) in r1_bio_write_done()
418 reschedule_retry(r1_bio); in r1_bio_write_done()
420 raid_end_bio_io(r1_bio); in r1_bio_write_done()
426 struct r1bio *r1_bio = bio->bi_private; in raid1_end_write_request() local
427 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); in raid1_end_write_request()
428 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
430 int mirror = find_bio_disk(r1_bio, bio); in raid1_end_write_request()
449 md_error(r1_bio->mddev, rdev); in raid1_end_write_request()
459 set_bit(R1BIO_WriteError, &r1_bio->state); in raid1_end_write_request()
462 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
479 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
491 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_write_request()
494 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
496 r1_bio->bios[mirror] = IO_MADE_GOOD; in raid1_end_write_request()
497 set_bit(R1BIO_MadeGood, &r1_bio->state); in raid1_end_write_request()
503 sector_t lo = r1_bio->sector; in raid1_end_write_request()
504 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
509 atomic_dec(&r1_bio->behind_remaining); in raid1_end_write_request()
518 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && in raid1_end_write_request()
519 test_bit(R1BIO_Uptodate, &r1_bio->state)) { in raid1_end_write_request()
521 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid1_end_write_request()
522 struct bio *mbio = r1_bio->master_bio; in raid1_end_write_request()
527 call_bio_endio(r1_bio); in raid1_end_write_request()
531 if (r1_bio->bios[mirror] == NULL) in raid1_end_write_request()
538 r1_bio_write_done(r1_bio); in raid1_end_write_request()
577 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
579 const sector_t this_sector = r1_bio->sector; in read_balance()
598 sectors = r1_bio->sectors; in read_balance()
607 clear_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
625 if (r1_bio->bios[disk] == IO_BLOCKED in read_balance()
686 set_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
1111 static void alloc_behind_master_bio(struct r1bio *r1_bio, in alloc_behind_master_bio() argument
1119 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); in alloc_behind_master_bio()
1147 r1_bio->behind_master_bio = behind_bio; in alloc_behind_master_bio()
1148 set_bit(R1BIO_BehindIO, &r1_bio->state); in alloc_behind_master_bio()
1190 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) in init_r1bio() argument
1192 r1_bio->master_bio = bio; in init_r1bio()
1193 r1_bio->sectors = bio_sectors(bio); in init_r1bio()
1194 r1_bio->state = 0; in init_r1bio()
1195 r1_bio->mddev = mddev; in init_r1bio()
1196 r1_bio->sector = bio->bi_iter.bi_sector; in init_r1bio()
1203 struct r1bio *r1_bio; in alloc_r1bio() local
1205 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1207 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1208 init_r1bio(r1_bio, mddev, bio); in alloc_r1bio()
1209 return r1_bio; in alloc_r1bio()
1213 int max_read_sectors, struct r1bio *r1_bio) in raid1_read_request() argument
1223 bool print_msg = !!r1_bio; in raid1_read_request()
1231 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; in raid1_read_request()
1237 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); in raid1_read_request()
1251 if (!r1_bio) in raid1_read_request()
1252 r1_bio = alloc_r1bio(mddev, bio); in raid1_read_request()
1254 init_r1bio(r1_bio, mddev, bio); in raid1_read_request()
1255 r1_bio->sectors = max_read_sectors; in raid1_read_request()
1261 rdisk = read_balance(conf, r1_bio, &max_sectors); in raid1_read_request()
1269 (unsigned long long)r1_bio->sector); in raid1_read_request()
1271 raid_end_bio_io(r1_bio); in raid1_read_request()
1279 (unsigned long long)r1_bio->sector, in raid1_read_request()
1299 r1_bio->master_bio = bio; in raid1_read_request()
1300 r1_bio->sectors = max_sectors; in raid1_read_request()
1303 r1_bio->read_disk = rdisk; in raid1_read_request()
1307 r1_bio->bios[rdisk] = read_bio; in raid1_read_request()
1309 read_bio->bi_iter.bi_sector = r1_bio->sector + in raid1_read_request()
1315 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_read_request()
1317 read_bio->bi_private = r1_bio; in raid1_read_request()
1321 disk_devt(mddev->gendisk), r1_bio->sector); in raid1_read_request()
1330 struct r1bio *r1_bio; in raid1_write_request() local
1364 r1_bio = alloc_r1bio(mddev, bio); in raid1_write_request()
1365 r1_bio->sectors = max_write_sectors; in raid1_write_request()
1388 max_sectors = r1_bio->sectors; in raid1_write_request()
1396 r1_bio->bios[i] = NULL; in raid1_write_request()
1399 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1_write_request()
1409 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, in raid1_write_request()
1418 if (is_bad && first_bad <= r1_bio->sector) { in raid1_write_request()
1420 bad_sectors -= (r1_bio->sector - first_bad); in raid1_write_request()
1440 int good_sectors = first_bad - r1_bio->sector; in raid1_write_request()
1445 r1_bio->bios[i] = bio; in raid1_write_request()
1454 if (r1_bio->bios[j]) in raid1_write_request()
1456 r1_bio->state = 0; in raid1_write_request()
1470 r1_bio->master_bio = bio; in raid1_write_request()
1471 r1_bio->sectors = max_sectors; in raid1_write_request()
1474 atomic_set(&r1_bio->remaining, 1); in raid1_write_request()
1475 atomic_set(&r1_bio->behind_remaining, 0); in raid1_write_request()
1481 if (!r1_bio->bios[i]) in raid1_write_request()
1493 alloc_behind_master_bio(r1_bio, bio); in raid1_write_request()
1496 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, in raid1_write_request()
1497 test_bit(R1BIO_BehindIO, &r1_bio->state)); in raid1_write_request()
1501 if (r1_bio->behind_master_bio) in raid1_write_request()
1502 mbio = bio_clone_fast(r1_bio->behind_master_bio, in raid1_write_request()
1507 if (r1_bio->behind_master_bio) { in raid1_write_request()
1511 sector_t lo = r1_bio->sector; in raid1_write_request()
1512 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_write_request()
1518 atomic_inc(&r1_bio->behind_remaining); in raid1_write_request()
1521 r1_bio->bios[i] = mbio; in raid1_write_request()
1523 mbio->bi_iter.bi_sector = (r1_bio->sector + in raid1_write_request()
1532 mbio->bi_private = r1_bio; in raid1_write_request()
1534 atomic_inc(&r1_bio->remaining); in raid1_write_request()
1539 r1_bio->sector); in raid1_write_request()
1560 r1_bio_write_done(r1_bio); in raid1_write_request()
1879 struct r1bio *r1_bio = get_resync_r1bio(bio); in end_sync_read() local
1881 update_head_pos(r1_bio->read_disk, r1_bio); in end_sync_read()
1889 set_bit(R1BIO_Uptodate, &r1_bio->state); in end_sync_read()
1891 if (atomic_dec_and_test(&r1_bio->remaining)) in end_sync_read()
1892 reschedule_retry(r1_bio); in end_sync_read()
1895 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) in abort_sync_write() argument
1898 sector_t s = r1_bio->sector; in abort_sync_write()
1899 long sectors_to_go = r1_bio->sectors; in abort_sync_write()
1909 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate) in put_sync_write_buf() argument
1911 if (atomic_dec_and_test(&r1_bio->remaining)) { in put_sync_write_buf()
1912 struct mddev *mddev = r1_bio->mddev; in put_sync_write_buf()
1913 int s = r1_bio->sectors; in put_sync_write_buf()
1915 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in put_sync_write_buf()
1916 test_bit(R1BIO_WriteError, &r1_bio->state)) in put_sync_write_buf()
1917 reschedule_retry(r1_bio); in put_sync_write_buf()
1919 put_buf(r1_bio); in put_sync_write_buf()
1928 struct r1bio *r1_bio = get_resync_r1bio(bio); in end_sync_write() local
1929 struct mddev *mddev = r1_bio->mddev; in end_sync_write()
1933 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
1936 abort_sync_write(mddev, r1_bio); in end_sync_write()
1941 set_bit(R1BIO_WriteError, &r1_bio->state); in end_sync_write()
1942 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in end_sync_write()
1944 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1945 r1_bio->sector, in end_sync_write()
1946 r1_bio->sectors, in end_sync_write()
1949 set_bit(R1BIO_MadeGood, &r1_bio->state); in end_sync_write()
1951 put_sync_write_buf(r1_bio, uptodate); in end_sync_write()
1973 static int fix_sync_read_error(struct r1bio *r1_bio) in fix_sync_read_error() argument
1986 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error()
1988 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; in fix_sync_read_error()
1990 sector_t sect = r1_bio->sector; in fix_sync_read_error()
1991 int sectors = r1_bio->sectors; in fix_sync_read_error()
1995 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
2009 int d = r1_bio->read_disk; in fix_sync_read_error()
2016 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { in fix_sync_read_error()
2032 } while (!success && d != r1_bio->read_disk); in fix_sync_read_error()
2044 (unsigned long long)r1_bio->sector); in fix_sync_read_error()
2056 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
2057 put_buf(r1_bio); in fix_sync_read_error()
2069 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2073 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2079 r1_bio->bios[d]->bi_end_io = NULL; in fix_sync_read_error()
2084 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2088 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2100 set_bit(R1BIO_Uptodate, &r1_bio->state); in fix_sync_read_error()
2105 static void process_checks(struct r1bio *r1_bio) in process_checks() argument
2114 struct mddev *mddev = r1_bio->mddev; in process_checks()
2121 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); in process_checks()
2124 struct bio *b = r1_bio->bios[i]; in process_checks()
2132 b->bi_iter.bi_sector = r1_bio->sector + in process_checks()
2136 rp->raid_bio = r1_bio; in process_checks()
2140 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); in process_checks()
2143 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && in process_checks()
2144 !r1_bio->bios[primary]->bi_status) { in process_checks()
2145 r1_bio->bios[primary]->bi_end_io = NULL; in process_checks()
2149 r1_bio->read_disk = primary; in process_checks()
2152 struct bio *pbio = r1_bio->bios[primary]; in process_checks()
2153 struct bio *sbio = r1_bio->bios[i]; in process_checks()
2179 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2192 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) in sync_request_write() argument
2199 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in sync_request_write()
2201 if (!fix_sync_read_error(r1_bio)) in sync_request_write()
2205 process_checks(r1_bio); in sync_request_write()
2210 atomic_set(&r1_bio->remaining, 1); in sync_request_write()
2212 wbio = r1_bio->bios[i]; in sync_request_write()
2215 (i == r1_bio->read_disk || in sync_request_write()
2219 abort_sync_write(mddev, r1_bio); in sync_request_write()
2228 atomic_inc(&r1_bio->remaining); in sync_request_write()
2234 put_sync_write_buf(r1_bio, 1); in sync_request_write()
2341 static int narrow_write_error(struct r1bio *r1_bio, int i) in narrow_write_error() argument
2343 struct mddev *mddev = r1_bio->mddev; in narrow_write_error()
2361 int sect_to_write = r1_bio->sectors; in narrow_write_error()
2369 sector = r1_bio->sector; in narrow_write_error()
2380 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in narrow_write_error()
2381 wbio = bio_clone_fast(r1_bio->behind_master_bio, in narrow_write_error()
2385 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, in narrow_write_error()
2390 wbio->bi_iter.bi_sector = r1_bio->sector; in narrow_write_error()
2391 wbio->bi_iter.bi_size = r1_bio->sectors << 9; in narrow_write_error()
2393 bio_trim(wbio, sector - r1_bio->sector, sectors); in narrow_write_error()
2411 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2414 int s = r1_bio->sectors; in handle_sync_write_finished()
2417 struct bio *bio = r1_bio->bios[m]; in handle_sync_write_finished()
2421 test_bit(R1BIO_MadeGood, &r1_bio->state)) { in handle_sync_write_finished()
2422 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); in handle_sync_write_finished()
2425 test_bit(R1BIO_WriteError, &r1_bio->state)) { in handle_sync_write_finished()
2426 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) in handle_sync_write_finished()
2430 put_buf(r1_bio); in handle_sync_write_finished()
2434 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2440 if (r1_bio->bios[m] == IO_MADE_GOOD) { in handle_write_finished()
2443 r1_bio->sector, in handle_write_finished()
2444 r1_bio->sectors, 0); in handle_write_finished()
2446 } else if (r1_bio->bios[m] != NULL) { in handle_write_finished()
2452 if (!narrow_write_error(r1_bio, m)) { in handle_write_finished()
2456 set_bit(R1BIO_Degraded, &r1_bio->state); in handle_write_finished()
2463 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2464 idx = sector_to_idx(r1_bio->sector); in handle_write_finished()
2474 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in handle_write_finished()
2475 close_write(r1_bio); in handle_write_finished()
2476 raid_end_bio_io(r1_bio); in handle_write_finished()
2480 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2486 clear_bit(R1BIO_ReadError, &r1_bio->state); in handle_read_error()
2496 bio = r1_bio->bios[r1_bio->read_disk]; in handle_read_error()
2498 r1_bio->bios[r1_bio->read_disk] = NULL; in handle_read_error()
2500 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2504 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2505 r1_bio->sector, r1_bio->sectors); in handle_read_error()
2510 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; in handle_read_error()
2514 allow_barrier(conf, r1_bio->sector); in handle_read_error()
2515 bio = r1_bio->master_bio; in handle_read_error()
2518 r1_bio->state = 0; in handle_read_error()
2519 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); in handle_read_error()
2525 struct r1bio *r1_bio; in raid1d() local
2542 r1_bio = list_first_entry(&tmp, struct r1bio, in raid1d()
2544 list_del(&r1_bio->retry_list); in raid1d()
2545 idx = sector_to_idx(r1_bio->sector); in raid1d()
2548 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1d()
2549 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2550 close_write(r1_bio); in raid1d()
2551 raid_end_bio_io(r1_bio); in raid1d()
2565 r1_bio = list_entry(head->prev, struct r1bio, retry_list); in raid1d()
2567 idx = sector_to_idx(r1_bio->sector); in raid1d()
2571 mddev = r1_bio->mddev; in raid1d()
2573 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { in raid1d()
2574 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2575 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2576 handle_sync_write_finished(conf, r1_bio); in raid1d()
2578 sync_request_write(mddev, r1_bio); in raid1d()
2579 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2580 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2581 handle_write_finished(conf, r1_bio); in raid1d()
2582 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) in raid1d()
2583 handle_read_error(conf, r1_bio); in raid1d()
2636 struct r1bio *r1_bio; in raid1_sync_request() local
2712 r1_bio = raid1_alloc_init_r1buf(conf); in raid1_sync_request()
2724 r1_bio->mddev = mddev; in raid1_sync_request()
2725 r1_bio->sector = sector_nr; in raid1_sync_request()
2726 r1_bio->state = 0; in raid1_sync_request()
2727 set_bit(R1BIO_IsSync, &r1_bio->state); in raid1_sync_request()
2733 bio = r1_bio->bios[i]; in raid1_sync_request()
2796 r1_bio->read_disk = disk; in raid1_sync_request()
2804 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { in raid1_sync_request()
2812 put_buf(r1_bio); in raid1_sync_request()
2846 put_buf(r1_bio); in raid1_sync_request()
2876 bio = r1_bio->bios[i]; in raid1_sync_request()
2893 r1_bio->sectors = nr_sectors; in raid1_sync_request()
2909 atomic_set(&r1_bio->remaining, read_targets); in raid1_sync_request()
2911 bio = r1_bio->bios[i]; in raid1_sync_request()
2921 atomic_set(&r1_bio->remaining, 1); in raid1_sync_request()
2922 bio = r1_bio->bios[r1_bio->read_disk]; in raid1_sync_request()