Lines Matching +full:set +full:- +full:top
1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/backing-dev-defs.h>
16 #include <linux/dma-mapping.h>
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
24 q->rq_timeout = timeout; in blk_queue_rq_timeout()
29 * blk_set_default_limits - reset limits to default values
37 lim->max_segments = BLK_MAX_SEGMENTS; in blk_set_default_limits()
38 lim->max_discard_segments = 1; in blk_set_default_limits()
39 lim->max_integrity_segments = 0; in blk_set_default_limits()
40 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; in blk_set_default_limits()
41 lim->virt_boundary_mask = 0; in blk_set_default_limits()
42 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; in blk_set_default_limits()
43 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; in blk_set_default_limits()
44 lim->max_user_sectors = lim->max_dev_sectors = 0; in blk_set_default_limits()
45 lim->chunk_sectors = 0; in blk_set_default_limits()
46 lim->max_write_zeroes_sectors = 0; in blk_set_default_limits()
47 lim->max_zone_append_sectors = 0; in blk_set_default_limits()
48 lim->max_discard_sectors = 0; in blk_set_default_limits()
49 lim->max_hw_discard_sectors = 0; in blk_set_default_limits()
50 lim->max_secure_erase_sectors = 0; in blk_set_default_limits()
51 lim->discard_granularity = 0; in blk_set_default_limits()
52 lim->discard_alignment = 0; in blk_set_default_limits()
53 lim->discard_misaligned = 0; in blk_set_default_limits()
54 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; in blk_set_default_limits()
55 lim->bounce = BLK_BOUNCE_NONE; in blk_set_default_limits()
56 lim->alignment_offset = 0; in blk_set_default_limits()
57 lim->io_opt = 0; in blk_set_default_limits()
58 lim->misaligned = 0; in blk_set_default_limits()
59 lim->zoned = BLK_ZONED_NONE; in blk_set_default_limits()
60 lim->zone_write_granularity = 0; in blk_set_default_limits()
61 lim->dma_alignment = 511; in blk_set_default_limits()
65 * blk_set_stacking_limits - set default limits for stacking devices
77 lim->max_segments = USHRT_MAX; in blk_set_stacking_limits()
78 lim->max_discard_segments = USHRT_MAX; in blk_set_stacking_limits()
79 lim->max_hw_sectors = UINT_MAX; in blk_set_stacking_limits()
80 lim->max_segment_size = UINT_MAX; in blk_set_stacking_limits()
81 lim->max_sectors = UINT_MAX; in blk_set_stacking_limits()
82 lim->max_dev_sectors = UINT_MAX; in blk_set_stacking_limits()
83 lim->max_write_zeroes_sectors = UINT_MAX; in blk_set_stacking_limits()
84 lim->max_zone_append_sectors = UINT_MAX; in blk_set_stacking_limits()
89 * blk_queue_bounce_limit - set bounce buffer limit for queue
100 q->limits.bounce = bounce; in blk_queue_bounce_limit()
105 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
110 * Enables a low level driver to set a hard upper limit,
111 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
116 * READ/WRITE requests. It is set by the disk driver.
120 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
125 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors()
129 max_hw_sectors = 1 << (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
130 printk(KERN_INFO "%s: set to minimum %d\n", in blk_queue_max_hw_sectors()
135 limits->logical_block_size >> SECTOR_SHIFT); in blk_queue_max_hw_sectors()
136 limits->max_hw_sectors = max_hw_sectors; in blk_queue_max_hw_sectors()
138 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); in blk_queue_max_hw_sectors()
140 if (limits->max_user_sectors) in blk_queue_max_hw_sectors()
141 max_sectors = min(max_sectors, limits->max_user_sectors); in blk_queue_max_hw_sectors()
146 limits->logical_block_size >> SECTOR_SHIFT); in blk_queue_max_hw_sectors()
147 limits->max_sectors = max_sectors; in blk_queue_max_hw_sectors()
149 if (!q->disk) in blk_queue_max_hw_sectors()
151 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
156 * blk_queue_chunk_sectors - set size of the chunk for this queue
161 * If a driver doesn't want IOs to cross a given chunk size, it can set
169 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
174 * blk_queue_max_discard_sectors - set max sectors for a single discard
181 q->limits.max_hw_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
182 q->limits.max_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
187 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
194 q->limits.max_secure_erase_sectors = max_sectors; in blk_queue_max_secure_erase_sectors()
199 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
207 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; in blk_queue_max_write_zeroes_sectors()
212 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
224 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors()
225 max_sectors = min(q->limits.chunk_sectors, max_sectors); in blk_queue_max_zone_append_sectors()
229 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, in blk_queue_max_zone_append_sectors()
230 * or the max_hw_sectors limit not set. in blk_queue_max_zone_append_sectors()
234 q->limits.max_zone_append_sectors = max_sectors; in blk_queue_max_zone_append_sectors()
239 * blk_queue_max_segments - set max hw segments for a request for this queue
244 * Enables a low level driver to set an upper limit on the number of
251 printk(KERN_INFO "%s: set to minimum %d\n", in blk_queue_max_segments()
255 q->limits.max_segments = max_segments; in blk_queue_max_segments()
260 * blk_queue_max_discard_segments - set max segments for discard requests
265 * Enables a low level driver to set an upper limit on the number of
271 q->limits.max_discard_segments = max_segments; in blk_queue_max_discard_segments()
276 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
281 * Enables a low level driver to set an upper limit on the size of a
288 printk(KERN_INFO "%s: set to minimum %d\n", in blk_queue_max_segment_size()
293 WARN_ON_ONCE(q->limits.virt_boundary_mask); in blk_queue_max_segment_size()
295 q->limits.max_segment_size = max_size; in blk_queue_max_segment_size()
300 * blk_queue_logical_block_size - set logical block size for the queue
305 * This should be set to the lowest possible block size that the
311 struct queue_limits *limits = &q->limits; in blk_queue_logical_block_size()
313 limits->logical_block_size = size; in blk_queue_logical_block_size()
315 if (limits->physical_block_size < size) in blk_queue_logical_block_size()
316 limits->physical_block_size = size; in blk_queue_logical_block_size()
318 if (limits->io_min < limits->physical_block_size) in blk_queue_logical_block_size()
319 limits->io_min = limits->physical_block_size; in blk_queue_logical_block_size()
321 limits->max_hw_sectors = in blk_queue_logical_block_size()
322 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); in blk_queue_logical_block_size()
323 limits->max_sectors = in blk_queue_logical_block_size()
324 round_down(limits->max_sectors, size >> SECTOR_SHIFT); in blk_queue_logical_block_size()
329 * blk_queue_physical_block_size - set physical block size for the queue
334 * This should be set to the lowest possible sector size that the
335 * hardware can operate on without reverting to read-modify-write
340 q->limits.physical_block_size = size; in blk_queue_physical_block_size()
342 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
343 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
345 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
346 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
351 * blk_queue_zone_write_granularity - set zone write granularity for the queue
356 * This should be set to the lowest possible size allowing to write in
365 q->limits.zone_write_granularity = size; in blk_queue_zone_write_granularity()
367 if (q->limits.zone_write_granularity < q->limits.logical_block_size) in blk_queue_zone_write_granularity()
368 q->limits.zone_write_granularity = q->limits.logical_block_size; in blk_queue_zone_write_granularity()
373 * blk_queue_alignment_offset - set physical block alignment offset
379 * the legacy DOS partition table 63-sector offset. Low-level drivers
385 q->limits.alignment_offset = in blk_queue_alignment_offset()
386 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
387 q->limits.misaligned = 0; in blk_queue_alignment_offset()
393 struct request_queue *q = disk->queue; in disk_update_readahead()
396 * For read-ahead of large files to be effective, we need to read ahead in disk_update_readahead()
399 disk->bdi->ra_pages = in disk_update_readahead()
401 disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9); in disk_update_readahead()
406 * blk_limits_io_min - set minimum request size for a device
418 limits->io_min = min; in blk_limits_io_min()
420 if (limits->io_min < limits->logical_block_size) in blk_limits_io_min()
421 limits->io_min = limits->logical_block_size; in blk_limits_io_min()
423 if (limits->io_min < limits->physical_block_size) in blk_limits_io_min()
424 limits->io_min = limits->physical_block_size; in blk_limits_io_min()
429 * blk_queue_io_min - set minimum request size for the queue
444 blk_limits_io_min(&q->limits, min); in blk_queue_io_min()
449 * blk_limits_io_opt - set optimal request size for a device
463 limits->io_opt = opt; in blk_limits_io_opt()
468 * blk_queue_io_opt - set optimal request size for the queue
482 blk_limits_io_opt(&q->limits, opt); in blk_queue_io_opt()
483 if (!q->disk) in blk_queue_io_opt()
485 q->disk->bdi->ra_pages = in blk_queue_io_opt()
493 unsigned int granularity = max(lim->physical_block_size, lim->io_min); in queue_limit_alignment_offset()
497 return (granularity + lim->alignment_offset - alignment) % granularity; in queue_limit_alignment_offset()
505 if (!lim->max_discard_sectors) in queue_limit_discard_alignment()
509 alignment = lim->discard_alignment >> SECTOR_SHIFT; in queue_limit_discard_alignment()
510 granularity = lim->discard_granularity >> SECTOR_SHIFT; in queue_limit_discard_alignment()
518 offset = (granularity + alignment - offset) % granularity; in queue_limit_discard_alignment()
533 * blk_stack_limits - adjust queue_limits for stacked devices
534 * @t: the stacking driver limits (top device)
542 * struct (top) and then iteratively call the stacking function for
546 * Returns 0 if the top and bottom queue_limits are compatible. The
547 * top device's block sizes and alignment offsets may be adjusted to
549 * and alignments exist, -1 is returned and the resulting top
550 * queue_limits will have the misaligned flag set to indicate that
556 unsigned int top, bottom, alignment, ret = 0; in blk_stack_limits() local
558 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); in blk_stack_limits()
559 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); in blk_stack_limits()
560 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); in blk_stack_limits()
561 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, in blk_stack_limits()
562 b->max_write_zeroes_sectors); in blk_stack_limits()
563 t->max_zone_append_sectors = min(t->max_zone_append_sectors, in blk_stack_limits()
564 b->max_zone_append_sectors); in blk_stack_limits()
565 t->bounce = max(t->bounce, b->bounce); in blk_stack_limits()
567 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, in blk_stack_limits()
568 b->seg_boundary_mask); in blk_stack_limits()
569 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, in blk_stack_limits()
570 b->virt_boundary_mask); in blk_stack_limits()
572 t->max_segments = min_not_zero(t->max_segments, b->max_segments); in blk_stack_limits()
573 t->max_discard_segments = min_not_zero(t->max_discard_segments, in blk_stack_limits()
574 b->max_discard_segments); in blk_stack_limits()
575 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, in blk_stack_limits()
576 b->max_integrity_segments); in blk_stack_limits()
578 t->max_segment_size = min_not_zero(t->max_segment_size, in blk_stack_limits()
579 b->max_segment_size); in blk_stack_limits()
581 t->misaligned |= b->misaligned; in blk_stack_limits()
586 * compatible with the current top alignment. in blk_stack_limits()
588 if (t->alignment_offset != alignment) { in blk_stack_limits()
590 top = max(t->physical_block_size, t->io_min) in blk_stack_limits()
591 + t->alignment_offset; in blk_stack_limits()
592 bottom = max(b->physical_block_size, b->io_min) + alignment; in blk_stack_limits()
594 /* Verify that top and bottom intervals line up */ in blk_stack_limits()
595 if (max(top, bottom) % min(top, bottom)) { in blk_stack_limits()
596 t->misaligned = 1; in blk_stack_limits()
597 ret = -1; in blk_stack_limits()
601 t->logical_block_size = max(t->logical_block_size, in blk_stack_limits()
602 b->logical_block_size); in blk_stack_limits()
604 t->physical_block_size = max(t->physical_block_size, in blk_stack_limits()
605 b->physical_block_size); in blk_stack_limits()
607 t->io_min = max(t->io_min, b->io_min); in blk_stack_limits()
608 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); in blk_stack_limits()
609 t->dma_alignment = max(t->dma_alignment, b->dma_alignment); in blk_stack_limits()
611 /* Set non-power-of-2 compatible chunk_sectors boundary */ in blk_stack_limits()
612 if (b->chunk_sectors) in blk_stack_limits()
613 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); in blk_stack_limits()
616 if (t->physical_block_size & (t->logical_block_size - 1)) { in blk_stack_limits()
617 t->physical_block_size = t->logical_block_size; in blk_stack_limits()
618 t->misaligned = 1; in blk_stack_limits()
619 ret = -1; in blk_stack_limits()
623 if (t->io_min & (t->physical_block_size - 1)) { in blk_stack_limits()
624 t->io_min = t->physical_block_size; in blk_stack_limits()
625 t->misaligned = 1; in blk_stack_limits()
626 ret = -1; in blk_stack_limits()
630 if (t->io_opt & (t->physical_block_size - 1)) { in blk_stack_limits()
631 t->io_opt = 0; in blk_stack_limits()
632 t->misaligned = 1; in blk_stack_limits()
633 ret = -1; in blk_stack_limits()
637 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { in blk_stack_limits()
638 t->chunk_sectors = 0; in blk_stack_limits()
639 t->misaligned = 1; in blk_stack_limits()
640 ret = -1; in blk_stack_limits()
643 t->raid_partial_stripes_expensive = in blk_stack_limits()
644 max(t->raid_partial_stripes_expensive, in blk_stack_limits()
645 b->raid_partial_stripes_expensive); in blk_stack_limits()
648 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) in blk_stack_limits()
649 % max(t->physical_block_size, t->io_min); in blk_stack_limits()
652 if (t->alignment_offset & (t->logical_block_size - 1)) { in blk_stack_limits()
653 t->misaligned = 1; in blk_stack_limits()
654 ret = -1; in blk_stack_limits()
657 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); in blk_stack_limits()
658 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); in blk_stack_limits()
659 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); in blk_stack_limits()
662 if (b->discard_granularity) { in blk_stack_limits()
665 if (t->discard_granularity != 0 && in blk_stack_limits()
666 t->discard_alignment != alignment) { in blk_stack_limits()
667 top = t->discard_granularity + t->discard_alignment; in blk_stack_limits()
668 bottom = b->discard_granularity + alignment; in blk_stack_limits()
670 /* Verify that top and bottom intervals line up */ in blk_stack_limits()
671 if ((max(top, bottom) % min(top, bottom)) != 0) in blk_stack_limits()
672 t->discard_misaligned = 1; in blk_stack_limits()
675 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, in blk_stack_limits()
676 b->max_discard_sectors); in blk_stack_limits()
677 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, in blk_stack_limits()
678 b->max_hw_discard_sectors); in blk_stack_limits()
679 t->discard_granularity = max(t->discard_granularity, in blk_stack_limits()
680 b->discard_granularity); in blk_stack_limits()
681 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % in blk_stack_limits()
682 t->discard_granularity; in blk_stack_limits()
684 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors, in blk_stack_limits()
685 b->max_secure_erase_sectors); in blk_stack_limits()
686 t->zone_write_granularity = max(t->zone_write_granularity, in blk_stack_limits()
687 b->zone_write_granularity); in blk_stack_limits()
688 t->zoned = max(t->zoned, b->zoned); in blk_stack_limits()
694 * disk_stack_limits - adjust queue limits for stacked drivers
695 * @disk: MD/DM gendisk (top)
700 * Merges the limits for a top level gendisk and a bottom level
706 struct request_queue *t = disk->queue; in disk_stack_limits()
708 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()
711 disk->disk_name, bdev); in disk_stack_limits()
718 * blk_queue_update_dma_pad - update pad mask
729 if (mask > q->dma_pad_mask) in blk_queue_update_dma_pad()
730 q->dma_pad_mask = mask; in blk_queue_update_dma_pad()
735 * blk_queue_segment_boundary - set boundary rules for segment merging
741 if (mask < PAGE_SIZE - 1) { in blk_queue_segment_boundary()
742 mask = PAGE_SIZE - 1; in blk_queue_segment_boundary()
743 printk(KERN_INFO "%s: set to minimum %lx\n", in blk_queue_segment_boundary()
747 q->limits.seg_boundary_mask = mask; in blk_queue_segment_boundary()
752 * blk_queue_virt_boundary - set boundary rules for bio merging
758 q->limits.virt_boundary_mask = mask; in blk_queue_virt_boundary()
767 q->limits.max_segment_size = UINT_MAX; in blk_queue_virt_boundary()
772 * blk_queue_dma_alignment - set dma length and memory alignment
777 * set required memory and length alignment for direct dma transactions.
783 q->limits.dma_alignment = mask; in blk_queue_dma_alignment()
788 * blk_queue_update_dma_alignment - update dma length and memory alignment
797 * (driver, device, transport etc) to set their respective
805 if (mask > q->limits.dma_alignment) in blk_queue_update_dma_alignment()
806 q->limits.dma_alignment = mask; in blk_queue_update_dma_alignment()
811 * blk_set_queue_depth - tell the block layer about the device queue depth
818 q->queue_depth = depth; in blk_set_queue_depth()
824 * blk_queue_write_cache - configure queue's write cache
845 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); in blk_queue_write_cache()
850 * blk_queue_required_elevator_features - Set a queue required elevator features
855 * only elevators that can be used are those that implement at least the set of
861 q->required_elevator_features = features; in blk_queue_required_elevator_features()
866 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
894 xa_for_each(&disk->part_tbl, idx, part) { in disk_has_partitions()
906 * disk_set_zoned - configure the zoned model for a disk
908 * @model: the zoned model to set
910 * Set the zoned model of @disk to @model.
920 struct request_queue *q = disk->queue; in disk_set_zoned()
921 unsigned int old_model = q->limits.zoned; in disk_set_zoned()
935 * devices to take advantage of the zone command set, similarly in disk_set_zoned()
951 q->limits.zoned = model; in disk_set_zoned()
954 * Set the zone write granularity to the device logical block in disk_set_zoned()
969 if (q->limits.misaligned) in bdev_alignment_offset()
970 return -1; in bdev_alignment_offset()
972 return queue_limit_alignment_offset(&q->limits, in bdev_alignment_offset()
973 bdev->bd_start_sect); in bdev_alignment_offset()
974 return q->limits.alignment_offset; in bdev_alignment_offset()
983 return queue_limit_discard_alignment(&q->limits, in bdev_discard_alignment()
984 bdev->bd_start_sect); in bdev_discard_alignment()
985 return q->limits.discard_alignment; in bdev_discard_alignment()