Lines Matching refs:sectors
319 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
402 r1_bio->sectors, in close_write()
494 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
504 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
545 sector_t sectors) in align_to_barrier_unit_end() argument
549 WARN_ON(sectors == 0); in align_to_barrier_unit_end()
557 if (len > sectors) in align_to_barrier_unit_end()
558 len = sectors; in align_to_barrier_unit_end()
580 int sectors; in read_balance() local
598 sectors = r1_bio->sectors; in read_balance()
609 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
612 this_sector + sectors))) in read_balance()
630 rdev->recovery_offset < this_sector + sectors) in read_balance()
636 if (is_badblock(rdev, this_sector, sectors, in read_balance()
643 best_good_sectors = sectors; in read_balance()
652 if (is_badblock(rdev, this_sector, sectors, in read_balance()
663 if (choose_first && sectors > bad_sectors) in read_balance()
664 sectors = bad_sectors; in read_balance()
665 if (best_good_sectors > sectors) in read_balance()
666 best_good_sectors = sectors; in read_balance()
679 if ((sectors > best_good_sectors) && (best_disk >= 0)) in read_balance()
681 best_good_sectors = sectors; in read_balance()
759 sectors = best_good_sectors; in read_balance()
764 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; in read_balance()
767 *max_sectors = sectors; in read_balance()
1193 r1_bio->sectors = bio_sectors(bio); in init_r1bio()
1255 r1_bio->sectors = max_read_sectors; in raid1_read_request()
1300 r1_bio->sectors = max_sectors; in raid1_read_request()
1365 r1_bio->sectors = max_write_sectors; in raid1_write_request()
1388 max_sectors = r1_bio->sectors; in raid1_write_request()
1471 r1_bio->sectors = max_sectors; in raid1_write_request()
1496 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, in raid1_write_request()
1512 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_write_request()
1568 sector_t sectors; in raid1_make_request() local
1582 sectors = align_to_barrier_unit_end( in raid1_make_request()
1586 raid1_read_request(mddev, bio, sectors, NULL); in raid1_make_request()
1590 raid1_write_request(mddev, bio, sectors); in raid1_make_request()
1899 long sectors_to_go = r1_bio->sectors; in abort_sync_write()
1913 int s = r1_bio->sectors; in put_sync_write_buf()
1942 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in end_sync_write()
1946 r1_bio->sectors, in end_sync_write()
1955 int sectors, struct page *page, int rw) in r1_sync_page_io() argument
1957 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) in r1_sync_page_io()
1968 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) in r1_sync_page_io()
1991 int sectors = r1_bio->sectors; in fix_sync_read_error() local
2007 while(sectors) { in fix_sync_read_error()
2008 int s = sectors; in fix_sync_read_error()
2056 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
2061 sectors -= s; in fix_sync_read_error()
2096 sectors -= s; in fix_sync_read_error()
2121 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); in process_checks()
2140 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); in process_checks()
2179 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2246 sector_t sect, int sectors) in fix_read_error() argument
2249 while(sectors) { in fix_read_error()
2250 int s = sectors; in fix_read_error()
2336 sectors -= s; in fix_read_error()
2360 int sectors; in narrow_write_error() local
2361 int sect_to_write = r1_bio->sectors; in narrow_write_error()
2370 sectors = ((sector + block_sectors) in narrow_write_error()
2376 if (sectors > sect_to_write) in narrow_write_error()
2377 sectors = sect_to_write; in narrow_write_error()
2391 wbio->bi_iter.bi_size = r1_bio->sectors << 9; in narrow_write_error()
2393 bio_trim(wbio, sector - r1_bio->sector, sectors); in narrow_write_error()
2400 sectors, 0) in narrow_write_error()
2404 sect_to_write -= sectors; in narrow_write_error()
2405 sector += sectors; in narrow_write_error()
2406 sectors = block_sectors; in narrow_write_error()
2414 int s = r1_bio->sectors; in handle_sync_write_finished()
2444 r1_bio->sectors, 0); in handle_write_finished()
2505 r1_bio->sector, r1_bio->sectors); in handle_read_error()
2519 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); in handle_read_error()
2893 r1_bio->sectors = nr_sectors; in raid1_sync_request()
2931 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid1_size() argument
2933 if (sectors) in raid1_size()
2934 return sectors; in raid1_size()
3197 static int raid1_resize(struct mddev *mddev, sector_t sectors) in raid1_resize() argument
3206 sector_t newsize = raid1_size(mddev, sectors, 0); in raid1_resize()
3216 if (sectors > mddev->dev_sectors && in raid1_resize()
3221 mddev->dev_sectors = sectors; in raid1_resize()
3222 mddev->resync_max_sectors = sectors; in raid1_resize()