Home
last modified time | relevance | path

Searched refs:chunk_sectors (Results 1 – 19 of 19) sorted by relevance

/Linux-v4.19/drivers/md/
Draid0.c107 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
108 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
153 if ((mddev->chunk_sectors << 9) % blksize) { in create_strip_zones()
156 mddev->chunk_sectors << 9, blksize); in create_strip_zones()
321 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector()
357 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size()
369 if (mddev->chunk_sectors == 0) { in raid0_run()
388 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
389 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
390 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
[all …]
Dmd-linear.c119 if (mddev->chunk_sectors) { in linear_conf()
121 sector_div(sectors, mddev->chunk_sectors); in linear_conf()
122 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf()
310 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); in linear_status()
Ddm-zoned-target.c875 unsigned int chunk_sectors = dmz->dev->zone_nr_sectors; in dmz_io_hints() local
885 limits->max_discard_sectors = chunk_sectors; in dmz_io_hints()
886 limits->max_hw_discard_sectors = chunk_sectors; in dmz_io_hints()
887 limits->max_write_zeroes_sectors = chunk_sectors; in dmz_io_hints()
890 limits->chunk_sectors = chunk_sectors; in dmz_io_hints()
891 limits->max_sectors = chunk_sectors; in dmz_io_hints()
Draid5.c754 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
2719 : conf->chunk_sectors; in raid5_compute_sector()
2915 : conf->chunk_sectors; in raid5_compute_blocknr()
3254 if (first + conf->chunk_sectors * (count - 1) != last) in add_stripe_bio()
3329 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
5107 unsigned int chunk_sectors; in in_chunk_boundary() local
5112 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5113 return chunk_sectors >= in in_chunk_boundary()
5114 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
5285 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
[all …]
Ddm-raid.c713 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur()
726 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new()
975 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
1154 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
1484 if (rs->md.chunk_sectors) in parse_raid_params()
1485 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
1534 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; in rs_set_raid456_stripe_cache()
1908 mddev->new_chunk_sectors != mddev->chunk_sectors || in rs_reshape_requested()
2136 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync()
2248 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); in super_init_validation()
[all …]
Draid5-ppl.c335 (data_sector >> ilog2(conf->chunk_sectors) == in ppl_log_stripe()
336 data_sector_last >> ilog2(conf->chunk_sectors)) && in ppl_log_stripe()
829 if ((pp_size >> 9) < conf->chunk_sectors) { in ppl_recover_entry()
838 (data_disks - 1) * conf->chunk_sectors + in ppl_recover_entry()
842 strip_sectors = conf->chunk_sectors; in ppl_recover_entry()
876 (disk * conf->chunk_sectors); in ppl_recover_entry()
Ddm-unstripe.c175 limits->chunk_sectors = uc->chunk_size; in unstripe_io_hints()
Draid5.h574 int chunk_sectors; member
Dmd.c1148 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1177 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1325 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1644 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1702 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1818 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
2430 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing()
3763 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
3786 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
3978 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
[all …]
Draid10.c1594 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status()
2891 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
3614 chunk = mddev->chunk_sectors; in setup_geo()
3794 chunk_size = mddev->chunk_sectors << 9; in raid10_run()
3797 mddev->chunk_sectors); in raid10_run()
3922 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in raid10_run()
4056 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()
4714 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape()
4863 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
Dmd.h299 int chunk_sectors; member
Ddm-table.c1541 zone_sectors = ti_limits.chunk_sectors; in dm_calculate_queue_limits()
1599 zone_sectors = limits->chunk_sectors; in dm_calculate_queue_limits()
Draid5-cache.c209 offset = sector_div(sect, conf->chunk_sectors); in r5c_tree_index()
376 conf->chunk_sectors >> STRIPE_SHIFT)) in r5c_check_cached_full_stripe()
Draid1.c3206 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape()
3209 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
/Linux-v4.19/block/
Dblk-settings.c99 lim->chunk_sectors = 0; in blk_set_default_limits()
269 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors() argument
271 BUG_ON(!is_power_of_2(chunk_sectors)); in blk_queue_chunk_sectors()
272 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
666 if (b->chunk_sectors) in blk_stack_limits()
667 t->chunk_sectors = min_not_zero(t->chunk_sectors, in blk_stack_limits()
668 b->chunk_sectors); in blk_stack_limits()
Dblk-sysfs.c157 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
/Linux-v4.19/drivers/char/
Dps3flash.c38 u64 chunk_sectors; member
50 start_sector, priv->chunk_sectors, in ps3flash_read_write_sectors()
130 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read()
163 sector += priv->chunk_sectors; in ps3flash_read()
199 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write()
238 sector += priv->chunk_sectors; in ps3flash_write()
388 priv->chunk_sectors = dev->bounce_size / dev->blk_size; in ps3flash_probe()
/Linux-v4.19/include/linux/
Dblkdev.h371 unsigned int chunk_sectors; member
798 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; in blk_queue_zone_sectors()
807 return sector >> ilog2(q->limits.chunk_sectors); in blk_queue_zone_no()
1116 if (!q->limits.chunk_sectors) in blk_max_size_offset()
1119 return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - in blk_max_size_offset()
1120 (offset & (q->limits.chunk_sectors - 1)))); in blk_max_size_offset()
1131 if (!q->limits.chunk_sectors || in blk_rq_get_max_sectors()
/Linux-v4.19/Documentation/ABI/testing/
Dsysfs-block262 What: /sys/block/<disk>/queue/chunk_sectors
266 chunk_sectors has different meaning depending on the type
267 of the disk. For a RAID device (dm-raid), chunk_sectors
270 host-aware or host-managed, chunk_sectors indicates the