Lines Matching full:limits
28 * blk_set_default_limits - reset limits to default values
64 * blk_set_stacking_limits - set default limits for stacking devices
69 * by stacking drivers like DM that have no internal limits.
75 /* Inherit limits from component devices */ in blk_set_stacking_limits()
99 q->limits.bounce = bounce; in blk_queue_bounce_limit()
124 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors() local
134 limits->logical_block_size >> SECTOR_SHIFT); in blk_queue_max_hw_sectors()
135 limits->max_hw_sectors = max_hw_sectors; in blk_queue_max_hw_sectors()
137 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); in blk_queue_max_hw_sectors()
140 limits->logical_block_size >> SECTOR_SHIFT); in blk_queue_max_hw_sectors()
141 limits->max_sectors = max_sectors; in blk_queue_max_hw_sectors()
163 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
175 q->limits.max_hw_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
176 q->limits.max_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
188 q->limits.max_secure_erase_sectors = max_sectors; in blk_queue_max_secure_erase_sectors()
201 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; in blk_queue_max_write_zeroes_sectors()
218 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors()
219 max_sectors = min(q->limits.chunk_sectors, max_sectors); in blk_queue_max_zone_append_sectors()
228 q->limits.max_zone_append_sectors = max_sectors; in blk_queue_max_zone_append_sectors()
249 q->limits.max_segments = max_segments; in blk_queue_max_segments()
265 q->limits.max_discard_segments = max_segments; in blk_queue_max_discard_segments()
287 WARN_ON_ONCE(q->limits.virt_boundary_mask); in blk_queue_max_segment_size()
289 q->limits.max_segment_size = max_size; in blk_queue_max_segment_size()
305 struct queue_limits *limits = &q->limits; in blk_queue_logical_block_size() local
307 limits->logical_block_size = size; in blk_queue_logical_block_size()
309 if (limits->physical_block_size < size) in blk_queue_logical_block_size()
310 limits->physical_block_size = size; in blk_queue_logical_block_size()
312 if (limits->io_min < limits->physical_block_size) in blk_queue_logical_block_size()
313 limits->io_min = limits->physical_block_size; in blk_queue_logical_block_size()
315 limits->max_hw_sectors = in blk_queue_logical_block_size()
316 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); in blk_queue_logical_block_size()
317 limits->max_sectors = in blk_queue_logical_block_size()
318 round_down(limits->max_sectors, size >> SECTOR_SHIFT); in blk_queue_logical_block_size()
334 q->limits.physical_block_size = size; in blk_queue_physical_block_size()
336 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
337 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
339 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
340 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
359 q->limits.zone_write_granularity = size; in blk_queue_zone_write_granularity()
361 if (q->limits.zone_write_granularity < q->limits.logical_block_size) in blk_queue_zone_write_granularity()
362 q->limits.zone_write_granularity = q->limits.logical_block_size; in blk_queue_zone_write_granularity()
379 q->limits.alignment_offset = in blk_queue_alignment_offset()
380 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
381 q->limits.misaligned = 0; in blk_queue_alignment_offset()
401 * @limits: the queue limits
410 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) in blk_limits_io_min() argument
412 limits->io_min = min; in blk_limits_io_min()
414 if (limits->io_min < limits->logical_block_size) in blk_limits_io_min()
415 limits->io_min = limits->logical_block_size; in blk_limits_io_min()
417 if (limits->io_min < limits->physical_block_size) in blk_limits_io_min()
418 limits->io_min = limits->physical_block_size; in blk_limits_io_min()
438 blk_limits_io_min(&q->limits, min); in blk_queue_io_min()
444 * @limits: the queue limits
455 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) in blk_limits_io_opt() argument
457 limits->io_opt = opt; in blk_limits_io_opt()
476 blk_limits_io_opt(&q->limits, opt); in blk_queue_io_opt()
528 * @t: the stacking driver limits (top device)
529 * @b: the underlying queue limits (bottom, component device)
688 * disk_stack_limits - adjust queue limits for stacked drivers
694 * Merges the limits for a top level gendisk and a bottom level
702 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()
741 q->limits.seg_boundary_mask = mask; in blk_queue_segment_boundary()
752 q->limits.virt_boundary_mask = mask; in blk_queue_virt_boundary()
761 q->limits.max_segment_size = UINT_MAX; in blk_queue_virt_boundary()
777 q->limits.dma_alignment = mask; in blk_queue_dma_alignment()
799 if (mask > q->limits.dma_alignment) in blk_queue_update_dma_alignment()
800 q->limits.dma_alignment = mask; in blk_queue_update_dma_alignment()
941 q->limits.zoned = model; in disk_set_zoned()
959 if (q->limits.misaligned) in bdev_alignment_offset()
962 return queue_limit_alignment_offset(&q->limits, in bdev_alignment_offset()
964 return q->limits.alignment_offset; in bdev_alignment_offset()
973 return queue_limit_discard_alignment(&q->limits, in bdev_discard_alignment()
975 return q->limits.discard_alignment; in bdev_discard_alignment()