Lines Matching refs:limits
470 struct queue_limits limits; member
630 return q->limits.zoned; in blk_queue_zoned_model()
655 return sector >> ilog2(disk->queue->limits.chunk_sectors); in disk_zone_no()
912 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
914 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1087 return q->limits.seg_boundary_mask; in queue_segment_boundary()
1092 return q->limits.virt_boundary_mask; in queue_virt_boundary()
1097 return q->limits.max_sectors; in queue_max_sectors()
1107 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
1112 return q->limits.max_segments; in queue_max_segments()
1117 return q->limits.max_discard_segments; in queue_max_discard_segments()
1122 return q->limits.max_segment_size; in queue_max_segment_size()
1128 const struct queue_limits *l = &q->limits; in queue_max_zone_append_sectors()
1148 if (q && q->limits.logical_block_size) in queue_logical_block_size()
1149 retval = q->limits.logical_block_size; in queue_logical_block_size()
1161 return q->limits.physical_block_size; in queue_physical_block_size()
1171 return q->limits.io_min; in queue_io_min()
1181 return q->limits.io_opt; in queue_io_opt()
1192 return q->limits.zone_write_granularity; in queue_zone_write_granularity()
1206 return bdev_get_queue(bdev)->limits.max_discard_sectors; in bdev_max_discard_sectors()
1211 return bdev_get_queue(bdev)->limits.discard_granularity; in bdev_discard_granularity()
1217 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; in bdev_max_secure_erase_sectors()
1225 return q->limits.max_write_zeroes_sectors; in bdev_write_zeroes_sectors()
1295 return q->limits.chunk_sectors; in bdev_zone_sectors()
1312 return q ? q->limits.dma_alignment : 511; in queue_dma_alignment()