Home
last modified time | relevance | path

Searched refs:chunk_sectors (Results 1 – 20 of 20) sorted by relevance

/Linux-v5.4/drivers/md/
Draid0.c103 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
104 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
165 if ((mddev->chunk_sectors << 9) % blksize) { in create_strip_zones()
168 mddev->chunk_sectors << 9, blksize); in create_strip_zones()
333 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector()
369 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size()
381 if (mddev->chunk_sectors == 0) { in raid0_run()
400 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
401 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
402 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
[all …]
Dmd-linear.c111 if (mddev->chunk_sectors) { in linear_conf()
113 sector_div(sectors, mddev->chunk_sectors); in linear_conf()
114 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf()
307 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); in linear_status()
Ddm-zoned-target.c876 unsigned int chunk_sectors = dmz->dev->zone_nr_sectors; in dmz_io_hints() local
886 limits->max_discard_sectors = chunk_sectors; in dmz_io_hints()
887 limits->max_hw_discard_sectors = chunk_sectors; in dmz_io_hints()
888 limits->max_write_zeroes_sectors = chunk_sectors; in dmz_io_hints()
891 limits->chunk_sectors = chunk_sectors; in dmz_io_hints()
892 limits->max_sectors = chunk_sectors; in dmz_io_hints()
Draid5.c749 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
2726 : conf->chunk_sectors; in raid5_compute_sector()
2922 : conf->chunk_sectors; in raid5_compute_blocknr()
3261 if (first + conf->chunk_sectors * (count - 1) != last) in add_stripe_bio()
3336 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
5121 unsigned int chunk_sectors; in in_chunk_boundary() local
5126 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5127 return chunk_sectors >= in in_chunk_boundary()
5128 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
5298 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
[all …]
Ddm-raid.c713 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur()
726 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new()
975 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
1154 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
1484 if (rs->md.chunk_sectors) in parse_raid_params()
1485 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
1534 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; in rs_set_raid456_stripe_cache()
1908 mddev->new_chunk_sectors != mddev->chunk_sectors || in rs_reshape_requested()
2136 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync()
2248 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); in super_init_validation()
[all …]
Draid5-ppl.c328 (data_sector >> ilog2(conf->chunk_sectors) == in ppl_log_stripe()
329 data_sector_last >> ilog2(conf->chunk_sectors)) && in ppl_log_stripe()
824 if ((pp_size >> 9) < conf->chunk_sectors) { in ppl_recover_entry()
833 (data_disks - 1) * conf->chunk_sectors + in ppl_recover_entry()
837 strip_sectors = conf->chunk_sectors; in ppl_recover_entry()
871 (disk * conf->chunk_sectors); in ppl_recover_entry()
Ddm-unstripe.c175 limits->chunk_sectors = uc->chunk_size; in unstripe_io_hints()
Draid5.h571 int chunk_sectors; member
Dmd.c1209 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1238 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1388 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1710 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1768 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1895 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
2509 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing()
3842 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
3865 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
4057 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
[all …]
Draid10.c1561 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status()
2859 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
3583 chunk = mddev->chunk_sectors; in setup_geo()
3763 chunk_size = mddev->chunk_sectors << 9; in raid10_run()
3766 mddev->chunk_sectors); in raid10_run()
3891 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in raid10_run()
4027 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()
4744 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape()
4905 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
Dmd.h305 int chunk_sectors; member
Ddm-table.c1535 zone_sectors = ti_limits.chunk_sectors; in dm_calculate_queue_limits()
1593 zone_sectors = limits->chunk_sectors; in dm_calculate_queue_limits()
Draid5-cache.c200 offset = sector_div(sect, conf->chunk_sectors); in r5c_tree_index()
367 conf->chunk_sectors >> STRIPE_SHIFT)) in r5c_check_cached_full_stripe()
Draid1.c3252 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape()
3255 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
/Linux-v5.4/block/
Dblk-settings.c48 lim->chunk_sectors = 0; in blk_set_default_limits()
215 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors() argument
217 BUG_ON(!is_power_of_2(chunk_sectors)); in blk_queue_chunk_sectors()
218 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
613 if (b->chunk_sectors) in blk_stack_limits()
614 t->chunk_sectors = min_not_zero(t->chunk_sectors, in blk_stack_limits()
615 b->chunk_sectors); in blk_stack_limits()
Dblk-sysfs.c150 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
/Linux-v5.4/drivers/char/
Dps3flash.c26 u64 chunk_sectors; member
38 start_sector, priv->chunk_sectors, in ps3flash_read_write_sectors()
118 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read()
151 sector += priv->chunk_sectors; in ps3flash_read()
187 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write()
226 sector += priv->chunk_sectors; in ps3flash_write()
376 priv->chunk_sectors = dev->bounce_size / dev->blk_size; in ps3flash_probe()
/Linux-v5.4/include/linux/
Dblkdev.h327 unsigned int chunk_sectors; member
708 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; in blk_queue_zone_sectors()
722 return sector >> ilog2(q->limits.chunk_sectors); in blk_queue_zone_no()
1016 if (!q->limits.chunk_sectors) in blk_max_size_offset()
1019 return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - in blk_max_size_offset()
1020 (offset & (q->limits.chunk_sectors - 1)))); in blk_max_size_offset()
1031 if (!q->limits.chunk_sectors || in blk_rq_get_max_sectors()
/Linux-v5.4/Documentation/ABI/testing/
Dsysfs-block270 What: /sys/block/<disk>/queue/chunk_sectors
274 chunk_sectors has different meaning depending on the type
275 of the disk. For a RAID device (dm-raid), chunk_sectors
278 host-aware or host-managed, chunk_sectors indicates the
/Linux-v5.4/Documentation/block/
Dqueue-sysfs.rst18 chunk_sectors (RO)
21 For a RAID device (dm-raid), chunk_sectors indicates the size in 512B sectors
23 or host-managed, chunk_sectors indicates the size in 512B sectors of the zones