Lines Matching refs:r1_bio

59 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,  in check_and_add_serial()  argument
64 sector_t lo = r1_bio->sector; in check_and_add_serial()
65 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial()
82 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio) in wait_for_serialization() argument
86 int idx = sector_to_idx(r1_bio->sector); in wait_for_serialization()
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0); in wait_for_serialization()
149 struct r1bio *r1_bio; in r1buf_pool_alloc() local
155 r1_bio = r1bio_pool_alloc(gfp_flags, pi); in r1buf_pool_alloc()
156 if (!r1_bio) in r1buf_pool_alloc()
172 r1_bio->bios[j] = bio; in r1buf_pool_alloc()
187 bio = r1_bio->bios[j]; in r1buf_pool_alloc()
197 rp->raid_bio = r1_bio; in r1buf_pool_alloc()
201 r1_bio->master_bio = NULL; in r1buf_pool_alloc()
203 return r1_bio; in r1buf_pool_alloc()
211 bio_uninit(r1_bio->bios[j]); in r1buf_pool_alloc()
212 kfree(r1_bio->bios[j]); in r1buf_pool_alloc()
217 rbio_pool_free(r1_bio, data); in r1buf_pool_alloc()
241 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
246 struct bio **bio = r1_bio->bios + i; in put_all_bios()
253 static void free_r1bio(struct r1bio *r1_bio) in free_r1bio() argument
255 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
257 put_all_bios(conf, r1_bio); in free_r1bio()
258 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
261 static void put_buf(struct r1bio *r1_bio) in put_buf() argument
263 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
264 sector_t sect = r1_bio->sector; in put_buf()
268 struct bio *bio = r1_bio->bios[i]; in put_buf()
270 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
273 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
278 static void reschedule_retry(struct r1bio *r1_bio) in reschedule_retry() argument
281 struct mddev *mddev = r1_bio->mddev; in reschedule_retry()
285 idx = sector_to_idx(r1_bio->sector); in reschedule_retry()
287 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
300 static void call_bio_endio(struct r1bio *r1_bio) in call_bio_endio() argument
302 struct bio *bio = r1_bio->master_bio; in call_bio_endio()
304 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in call_bio_endio()
310 static void raid_end_bio_io(struct r1bio *r1_bio) in raid_end_bio_io() argument
312 struct bio *bio = r1_bio->master_bio; in raid_end_bio_io()
313 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io()
314 sector_t sector = r1_bio->sector; in raid_end_bio_io()
317 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid_end_bio_io()
323 call_bio_endio(r1_bio); in raid_end_bio_io()
326 free_r1bio(r1_bio); in raid_end_bio_io()
337 static inline void update_head_pos(int disk, struct r1bio *r1_bio) in update_head_pos() argument
339 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
342 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
348 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) in find_bio_disk() argument
351 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
355 if (r1_bio->bios[mirror] == bio) in find_bio_disk()
359 update_head_pos(mirror, r1_bio); in find_bio_disk()
367 struct r1bio *r1_bio = bio->bi_private; in raid1_end_read_request() local
368 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
369 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
374 update_head_pos(r1_bio->read_disk, r1_bio); in raid1_end_read_request()
377 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_read_request()
379 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_end_read_request()
390 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
391 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
398 raid_end_bio_io(r1_bio); in raid1_end_read_request()
407 (unsigned long long)r1_bio->sector); in raid1_end_read_request()
408 set_bit(R1BIO_ReadError, &r1_bio->state); in raid1_end_read_request()
409 reschedule_retry(r1_bio); in raid1_end_read_request()
414 static void close_write(struct r1bio *r1_bio) in close_write() argument
417 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in close_write()
418 bio_free_pages(r1_bio->behind_master_bio); in close_write()
419 bio_put(r1_bio->behind_master_bio); in close_write()
420 r1_bio->behind_master_bio = NULL; in close_write()
423 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
424 r1_bio->sectors, in close_write()
425 !test_bit(R1BIO_Degraded, &r1_bio->state), in close_write()
426 test_bit(R1BIO_BehindIO, &r1_bio->state)); in close_write()
427 md_write_end(r1_bio->mddev); in close_write()
430 static void r1_bio_write_done(struct r1bio *r1_bio) in r1_bio_write_done() argument
432 if (!atomic_dec_and_test(&r1_bio->remaining)) in r1_bio_write_done()
435 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in r1_bio_write_done()
436 reschedule_retry(r1_bio); in r1_bio_write_done()
438 close_write(r1_bio); in r1_bio_write_done()
439 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) in r1_bio_write_done()
440 reschedule_retry(r1_bio); in r1_bio_write_done()
442 raid_end_bio_io(r1_bio); in r1_bio_write_done()
448 struct r1bio *r1_bio = bio->bi_private; in raid1_end_write_request() local
449 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); in raid1_end_write_request()
450 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
452 int mirror = find_bio_disk(r1_bio, bio); in raid1_end_write_request()
455 sector_t lo = r1_bio->sector; in raid1_end_write_request()
456 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
473 md_error(r1_bio->mddev, rdev); in raid1_end_write_request()
481 set_bit(R1BIO_WriteError, &r1_bio->state); in raid1_end_write_request()
484 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1_end_write_request()
486 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
503 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
515 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_write_request()
518 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
520 r1_bio->bios[mirror] = IO_MADE_GOOD; in raid1_end_write_request()
521 set_bit(R1BIO_MadeGood, &r1_bio->state); in raid1_end_write_request()
529 atomic_dec(&r1_bio->behind_remaining); in raid1_end_write_request()
538 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && in raid1_end_write_request()
539 test_bit(R1BIO_Uptodate, &r1_bio->state)) { in raid1_end_write_request()
541 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid1_end_write_request()
542 struct bio *mbio = r1_bio->master_bio; in raid1_end_write_request()
547 call_bio_endio(r1_bio); in raid1_end_write_request()
552 if (r1_bio->bios[mirror] == NULL) in raid1_end_write_request()
559 r1_bio_write_done(r1_bio); in raid1_end_write_request()
598 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
600 const sector_t this_sector = r1_bio->sector; in read_balance()
619 sectors = r1_bio->sectors; in read_balance()
628 clear_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
646 if (r1_bio->bios[disk] == IO_BLOCKED in read_balance()
707 set_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
1119 static void alloc_behind_master_bio(struct r1bio *r1_bio, in alloc_behind_master_bio() argument
1128 &r1_bio->mddev->bio_set); in alloc_behind_master_bio()
1157 r1_bio->behind_master_bio = behind_bio; in alloc_behind_master_bio()
1158 set_bit(R1BIO_BehindIO, &r1_bio->state); in alloc_behind_master_bio()
1193 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) in init_r1bio() argument
1195 r1_bio->master_bio = bio; in init_r1bio()
1196 r1_bio->sectors = bio_sectors(bio); in init_r1bio()
1197 r1_bio->state = 0; in init_r1bio()
1198 r1_bio->mddev = mddev; in init_r1bio()
1199 r1_bio->sector = bio->bi_iter.bi_sector; in init_r1bio()
1206 struct r1bio *r1_bio; in alloc_r1bio() local
1208 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1210 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1211 init_r1bio(r1_bio, mddev, bio); in alloc_r1bio()
1212 return r1_bio; in alloc_r1bio()
1216 int max_read_sectors, struct r1bio *r1_bio) in raid1_read_request() argument
1226 bool r1bio_existed = !!r1_bio; in raid1_read_request()
1234 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; in raid1_read_request()
1240 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); in raid1_read_request()
1258 if (!r1_bio) in raid1_read_request()
1259 r1_bio = alloc_r1bio(mddev, bio); in raid1_read_request()
1261 init_r1bio(r1_bio, mddev, bio); in raid1_read_request()
1262 r1_bio->sectors = max_read_sectors; in raid1_read_request()
1268 rdisk = read_balance(conf, r1_bio, &max_sectors); in raid1_read_request()
1276 (unsigned long long)r1_bio->sector); in raid1_read_request()
1278 raid_end_bio_io(r1_bio); in raid1_read_request()
1286 (unsigned long long)r1_bio->sector, in raid1_read_request()
1306 r1_bio->master_bio = bio; in raid1_read_request()
1307 r1_bio->sectors = max_sectors; in raid1_read_request()
1310 r1_bio->read_disk = rdisk; in raid1_read_request()
1313 r1_bio->master_bio = bio; in raid1_read_request()
1318 r1_bio->bios[rdisk] = read_bio; in raid1_read_request()
1320 read_bio->bi_iter.bi_sector = r1_bio->sector + in raid1_read_request()
1325 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_read_request()
1327 read_bio->bi_private = r1_bio; in raid1_read_request()
1331 r1_bio->sector); in raid1_read_request()
1340 struct r1bio *r1_bio; in raid1_write_request() local
1382 r1_bio = alloc_r1bio(mddev, bio); in raid1_write_request()
1383 r1_bio->sectors = max_write_sectors; in raid1_write_request()
1399 max_sectors = r1_bio->sectors; in raid1_write_request()
1416 r1_bio->bios[i] = NULL; in raid1_write_request()
1419 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1_write_request()
1429 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, in raid1_write_request()
1438 if (is_bad && first_bad <= r1_bio->sector) { in raid1_write_request()
1440 bad_sectors -= (r1_bio->sector - first_bad); in raid1_write_request()
1460 int good_sectors = first_bad - r1_bio->sector; in raid1_write_request()
1465 r1_bio->bios[i] = bio; in raid1_write_request()
1474 if (r1_bio->bios[j]) in raid1_write_request()
1476 free_r1bio(r1_bio); in raid1_write_request()
1504 r1_bio->master_bio = bio; in raid1_write_request()
1505 r1_bio->sectors = max_sectors; in raid1_write_request()
1509 r1_bio->master_bio = bio; in raid1_write_request()
1510 atomic_set(&r1_bio->remaining, 1); in raid1_write_request()
1511 atomic_set(&r1_bio->behind_remaining, 0); in raid1_write_request()
1518 if (!r1_bio->bios[i]) in raid1_write_request()
1530 alloc_behind_master_bio(r1_bio, bio); in raid1_write_request()
1533 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, in raid1_write_request()
1534 test_bit(R1BIO_BehindIO, &r1_bio->state)); in raid1_write_request()
1538 if (r1_bio->behind_master_bio) { in raid1_write_request()
1540 r1_bio->behind_master_bio, in raid1_write_request()
1543 wait_for_serialization(rdev, r1_bio); in raid1_write_request()
1545 atomic_inc(&r1_bio->behind_remaining); in raid1_write_request()
1551 wait_for_serialization(rdev, r1_bio); in raid1_write_request()
1554 r1_bio->bios[i] = mbio; in raid1_write_request()
1556 mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); in raid1_write_request()
1563 mbio->bi_private = r1_bio; in raid1_write_request()
1565 atomic_inc(&r1_bio->remaining); in raid1_write_request()
1569 r1_bio->sector); in raid1_write_request()
1580 r1_bio_write_done(r1_bio); in raid1_write_request()
1910 struct r1bio *r1_bio = get_resync_r1bio(bio); in end_sync_read() local
1912 update_head_pos(r1_bio->read_disk, r1_bio); in end_sync_read()
1920 set_bit(R1BIO_Uptodate, &r1_bio->state); in end_sync_read()
1922 if (atomic_dec_and_test(&r1_bio->remaining)) in end_sync_read()
1923 reschedule_retry(r1_bio); in end_sync_read()
1926 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) in abort_sync_write() argument
1929 sector_t s = r1_bio->sector; in abort_sync_write()
1930 long sectors_to_go = r1_bio->sectors; in abort_sync_write()
1940 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate) in put_sync_write_buf() argument
1942 if (atomic_dec_and_test(&r1_bio->remaining)) { in put_sync_write_buf()
1943 struct mddev *mddev = r1_bio->mddev; in put_sync_write_buf()
1944 int s = r1_bio->sectors; in put_sync_write_buf()
1946 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in put_sync_write_buf()
1947 test_bit(R1BIO_WriteError, &r1_bio->state)) in put_sync_write_buf()
1948 reschedule_retry(r1_bio); in put_sync_write_buf()
1950 put_buf(r1_bio); in put_sync_write_buf()
1959 struct r1bio *r1_bio = get_resync_r1bio(bio); in end_sync_write() local
1960 struct mddev *mddev = r1_bio->mddev; in end_sync_write()
1964 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
1967 abort_sync_write(mddev, r1_bio); in end_sync_write()
1972 set_bit(R1BIO_WriteError, &r1_bio->state); in end_sync_write()
1973 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in end_sync_write()
1975 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1976 r1_bio->sector, in end_sync_write()
1977 r1_bio->sectors, in end_sync_write()
1980 set_bit(R1BIO_MadeGood, &r1_bio->state); in end_sync_write()
1982 put_sync_write_buf(r1_bio, uptodate); in end_sync_write()
2004 static int fix_sync_read_error(struct r1bio *r1_bio) in fix_sync_read_error() argument
2017 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error()
2019 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; in fix_sync_read_error()
2021 sector_t sect = r1_bio->sector; in fix_sync_read_error()
2022 int sectors = r1_bio->sectors; in fix_sync_read_error()
2026 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
2040 int d = r1_bio->read_disk; in fix_sync_read_error()
2047 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { in fix_sync_read_error()
2063 } while (!success && d != r1_bio->read_disk); in fix_sync_read_error()
2074 (unsigned long long)r1_bio->sector); in fix_sync_read_error()
2086 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
2087 put_buf(r1_bio); in fix_sync_read_error()
2099 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2103 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2109 r1_bio->bios[d]->bi_end_io = NULL; in fix_sync_read_error()
2114 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2118 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2130 set_bit(R1BIO_Uptodate, &r1_bio->state); in fix_sync_read_error()
2135 static void process_checks(struct r1bio *r1_bio) in process_checks() argument
2144 struct mddev *mddev = r1_bio->mddev; in process_checks()
2151 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); in process_checks()
2154 struct bio *b = r1_bio->bios[i]; in process_checks()
2162 b->bi_iter.bi_sector = r1_bio->sector + in process_checks()
2165 rp->raid_bio = r1_bio; in process_checks()
2169 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); in process_checks()
2172 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && in process_checks()
2173 !r1_bio->bios[primary]->bi_status) { in process_checks()
2174 r1_bio->bios[primary]->bi_end_io = NULL; in process_checks()
2178 r1_bio->read_disk = primary; in process_checks()
2181 struct bio *pbio = r1_bio->bios[primary]; in process_checks()
2182 struct bio *sbio = r1_bio->bios[i]; in process_checks()
2208 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2221 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) in sync_request_write() argument
2228 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in sync_request_write()
2230 if (!fix_sync_read_error(r1_bio)) in sync_request_write()
2234 process_checks(r1_bio); in sync_request_write()
2239 atomic_set(&r1_bio->remaining, 1); in sync_request_write()
2241 wbio = r1_bio->bios[i]; in sync_request_write()
2244 (i == r1_bio->read_disk || in sync_request_write()
2248 abort_sync_write(mddev, r1_bio); in sync_request_write()
2257 atomic_inc(&r1_bio->remaining); in sync_request_write()
2263 put_sync_write_buf(r1_bio, 1); in sync_request_write()
2369 static int narrow_write_error(struct r1bio *r1_bio, int i) in narrow_write_error() argument
2371 struct mddev *mddev = r1_bio->mddev; in narrow_write_error()
2389 int sect_to_write = r1_bio->sectors; in narrow_write_error()
2397 sector = r1_bio->sector; in narrow_write_error()
2408 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in narrow_write_error()
2410 r1_bio->behind_master_bio, in narrow_write_error()
2413 wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio, in narrow_write_error()
2418 wbio->bi_iter.bi_sector = r1_bio->sector; in narrow_write_error()
2419 wbio->bi_iter.bi_size = r1_bio->sectors << 9; in narrow_write_error()
2421 bio_trim(wbio, sector - r1_bio->sector, sectors); in narrow_write_error()
2438 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2441 int s = r1_bio->sectors; in handle_sync_write_finished()
2444 struct bio *bio = r1_bio->bios[m]; in handle_sync_write_finished()
2448 test_bit(R1BIO_MadeGood, &r1_bio->state)) { in handle_sync_write_finished()
2449 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); in handle_sync_write_finished()
2452 test_bit(R1BIO_WriteError, &r1_bio->state)) { in handle_sync_write_finished()
2453 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) in handle_sync_write_finished()
2457 put_buf(r1_bio); in handle_sync_write_finished()
2461 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2467 if (r1_bio->bios[m] == IO_MADE_GOOD) { in handle_write_finished()
2470 r1_bio->sector, in handle_write_finished()
2471 r1_bio->sectors, 0); in handle_write_finished()
2473 } else if (r1_bio->bios[m] != NULL) { in handle_write_finished()
2479 if (!narrow_write_error(r1_bio, m)) { in handle_write_finished()
2483 set_bit(R1BIO_Degraded, &r1_bio->state); in handle_write_finished()
2490 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2491 idx = sector_to_idx(r1_bio->sector); in handle_write_finished()
2501 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in handle_write_finished()
2502 close_write(r1_bio); in handle_write_finished()
2503 raid_end_bio_io(r1_bio); in handle_write_finished()
2507 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2514 clear_bit(R1BIO_ReadError, &r1_bio->state); in handle_read_error()
2524 bio = r1_bio->bios[r1_bio->read_disk]; in handle_read_error()
2526 r1_bio->bios[r1_bio->read_disk] = NULL; in handle_read_error()
2528 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2532 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2533 r1_bio->sector, r1_bio->sectors); in handle_read_error()
2538 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; in handle_read_error()
2542 sector = r1_bio->sector; in handle_read_error()
2543 bio = r1_bio->master_bio; in handle_read_error()
2546 r1_bio->state = 0; in handle_read_error()
2547 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); in handle_read_error()
2554 struct r1bio *r1_bio; in raid1d() local
2571 r1_bio = list_first_entry(&tmp, struct r1bio, in raid1d()
2573 list_del(&r1_bio->retry_list); in raid1d()
2574 idx = sector_to_idx(r1_bio->sector); in raid1d()
2577 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1d()
2578 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2579 close_write(r1_bio); in raid1d()
2580 raid_end_bio_io(r1_bio); in raid1d()
2594 r1_bio = list_entry(head->prev, struct r1bio, retry_list); in raid1d()
2596 idx = sector_to_idx(r1_bio->sector); in raid1d()
2600 mddev = r1_bio->mddev; in raid1d()
2602 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { in raid1d()
2603 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2604 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2605 handle_sync_write_finished(conf, r1_bio); in raid1d()
2607 sync_request_write(mddev, r1_bio); in raid1d()
2608 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2609 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2610 handle_write_finished(conf, r1_bio); in raid1d()
2611 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) in raid1d()
2612 handle_read_error(conf, r1_bio); in raid1d()
2665 struct r1bio *r1_bio; in raid1_sync_request() local
2741 r1_bio = raid1_alloc_init_r1buf(conf); in raid1_sync_request()
2753 r1_bio->mddev = mddev; in raid1_sync_request()
2754 r1_bio->sector = sector_nr; in raid1_sync_request()
2755 r1_bio->state = 0; in raid1_sync_request()
2756 set_bit(R1BIO_IsSync, &r1_bio->state); in raid1_sync_request()
2762 bio = r1_bio->bios[i]; in raid1_sync_request()
2825 r1_bio->read_disk = disk; in raid1_sync_request()
2833 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { in raid1_sync_request()
2841 put_buf(r1_bio); in raid1_sync_request()
2875 put_buf(r1_bio); in raid1_sync_request()
2905 bio = r1_bio->bios[i]; in raid1_sync_request()
2922 r1_bio->sectors = nr_sectors; in raid1_sync_request()
2938 atomic_set(&r1_bio->remaining, read_targets); in raid1_sync_request()
2940 bio = r1_bio->bios[i]; in raid1_sync_request()
2950 atomic_set(&r1_bio->remaining, 1); in raid1_sync_request()
2951 bio = r1_bio->bios[r1_bio->read_disk]; in raid1_sync_request()