Lines Matching refs:sector_nr

71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
2902 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, in raid10_sync_request() argument
2933 return mddev->dev_sectors - sector_nr; in raid10_sync_request()
2941 if (sector_nr >= max_sector) { in raid10_sync_request()
2996 return reshape_request(mddev, sector_nr, skipped); in raid10_sync_request()
3003 return (max_sector - sector_nr) + sectors_skipped; in raid10_sync_request()
3013 max_sector > (sector_nr | chunk_mask)) in raid10_sync_request()
3014 max_sector = (sector_nr | chunk_mask) + 1; in raid10_sync_request()
3075 sect = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3307 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3309 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid10_sync_request()
3311 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3327 conf->next_resync = sector_nr; in raid10_sync_request()
3330 r10_bio->sector = sector_nr; in raid10_sync_request()
3333 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in raid10_sync_request()
3420 if (sector_nr + max_sync < max_sector) in raid10_sync_request()
3421 max_sector = sector_nr + max_sync; in raid10_sync_request()
3425 if (sector_nr + (len>>9) > max_sector) in raid10_sync_request()
3426 len = (max_sector - sector_nr) << 9; in raid10_sync_request()
3439 sector_nr += len>>9; in raid10_sync_request()
3446 if (conf->cluster_sync_high < sector_nr + nr_sectors) { in raid10_sync_request()
3465 sect_va1 = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3516 if (sector_nr + max_sync < max_sector) in raid10_sync_request()
3517 max_sector = sector_nr + max_sync; in raid10_sync_request()
3519 sectors_skipped += (max_sector - sector_nr); in raid10_sync_request()
3521 sector_nr = max_sector; in raid10_sync_request()
4394 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, in reshape_request() argument
4447 if (sector_nr == 0) { in reshape_request()
4451 sector_nr = (raid10_size(mddev, 0, 0) in reshape_request()
4455 sector_nr = conf->reshape_progress; in reshape_request()
4456 if (sector_nr) { in reshape_request()
4457 mddev->curr_resync_completed = sector_nr; in reshape_request()
4460 return sector_nr; in reshape_request()
4485 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4487 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last) in reshape_request()
4488 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512; in reshape_request()
4506 sector_nr = conf->reshape_progress; in reshape_request()
4507 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4510 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last) in reshape_request()
4511 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1; in reshape_request()
4545 r10_bio->sector = sector_nr; in reshape_request()
4547 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4580 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4584 conf->cluster_sync_low = sector_nr; in reshape_request()
4585 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS; in reshape_request()
4648 sector_nr += len >> 9; in reshape_request()
4660 if (sector_nr <= last) in reshape_request()