Lines Matching +full:1 +full:q

49 #define BLK_MQ_POLL_CLASSIC -1
64 #define RQF_STARTED ((__force req_flags_t)(1 << 1))
66 #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
68 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
70 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
72 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
74 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
76 #define RQF_FAILED ((__force req_flags_t)(1 << 10))
78 #define RQF_QUIET ((__force req_flags_t)(1 << 11))
80 #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
82 #define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
84 #define RQF_PM ((__force req_flags_t)(1 << 15))
86 #define RQF_HASHED ((__force req_flags_t)(1 << 16))
88 #define RQF_STATS ((__force req_flags_t)(1 << 17))
91 #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
93 #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
95 #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
97 #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
108 MQ_RQ_IN_FLIGHT = 1,
119 struct request_queue *q; member
271 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
335 #define BLK_ALL_ZONES ((unsigned int)-1)
560 #define QUEUE_FLAG_DYING 1 /* queue being torn down */
590 #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
591 (1 << QUEUE_FLAG_SAME_COMP) | \
592 (1 << QUEUE_FLAG_NOWAIT))
594 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
595 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
596 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
598 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) argument
599 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) argument
600 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) argument
601 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) argument
602 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) argument
603 #define blk_queue_noxmerges(q) \ argument
604 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
605 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) argument
606 #define blk_queue_stable_writes(q) \ argument
607 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
608 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) argument
609 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) argument
610 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) argument
611 #define blk_queue_zone_resetall(q) \ argument
612 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
613 #define blk_queue_secure_erase(q) \ argument
614 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
615 #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) argument
616 #define blk_queue_scsi_passthrough(q) \ argument
617 test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
618 #define blk_queue_pci_p2pdma(q) \ argument
619 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
621 #define blk_queue_rq_alloc_time(q) \ argument
622 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
624 #define blk_queue_rq_alloc_time(q) false argument
630 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) argument
631 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) argument
632 #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) argument
633 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) argument
634 #define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) argument
636 extern void blk_set_pm_only(struct request_queue *q);
637 extern void blk_clear_pm_only(struct request_queue *q);
650 static inline bool queue_is_mq(struct request_queue *q) in queue_is_mq() argument
652 return q->mq_ops; in queue_is_mq()
656 static inline enum rpm_status queue_rpm_status(struct request_queue *q) in queue_rpm_status() argument
658 return q->rpm_status; in queue_rpm_status()
661 static inline enum rpm_status queue_rpm_status(struct request_queue *q) in queue_rpm_status() argument
668 blk_queue_zoned_model(struct request_queue *q) in blk_queue_zoned_model() argument
671 return q->limits.zoned; in blk_queue_zoned_model()
675 static inline bool blk_queue_is_zoned(struct request_queue *q) in blk_queue_is_zoned() argument
677 switch (blk_queue_zoned_model(q)) { in blk_queue_is_zoned()
686 static inline sector_t blk_queue_zone_sectors(struct request_queue *q) in blk_queue_zone_sectors() argument
688 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; in blk_queue_zone_sectors()
692 static inline unsigned int blk_queue_nr_zones(struct request_queue *q) in blk_queue_nr_zones() argument
694 return blk_queue_is_zoned(q) ? q->nr_zones : 0; in blk_queue_nr_zones()
697 static inline unsigned int blk_queue_zone_no(struct request_queue *q, in blk_queue_zone_no() argument
700 if (!blk_queue_is_zoned(q)) in blk_queue_zone_no()
702 return sector >> ilog2(q->limits.chunk_sectors); in blk_queue_zone_no()
705 static inline bool blk_queue_zone_is_seq(struct request_queue *q, in blk_queue_zone_is_seq() argument
708 if (!blk_queue_is_zoned(q)) in blk_queue_zone_is_seq()
710 if (!q->conv_zones_bitmap) in blk_queue_zone_is_seq()
712 return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); in blk_queue_zone_is_seq()
715 static inline void blk_queue_max_open_zones(struct request_queue *q, in blk_queue_max_open_zones() argument
718 q->max_open_zones = max_open_zones; in blk_queue_max_open_zones()
721 static inline unsigned int queue_max_open_zones(const struct request_queue *q) in queue_max_open_zones() argument
723 return q->max_open_zones; in queue_max_open_zones()
726 static inline void blk_queue_max_active_zones(struct request_queue *q, in blk_queue_max_active_zones() argument
729 q->max_active_zones = max_active_zones; in blk_queue_max_active_zones()
732 static inline unsigned int queue_max_active_zones(const struct request_queue *q) in queue_max_active_zones() argument
734 return q->max_active_zones; in queue_max_active_zones()
737 static inline unsigned int blk_queue_nr_zones(struct request_queue *q) in blk_queue_nr_zones() argument
741 static inline bool blk_queue_zone_is_seq(struct request_queue *q, in blk_queue_zone_is_seq() argument
746 static inline unsigned int blk_queue_zone_no(struct request_queue *q, in blk_queue_zone_no() argument
751 static inline unsigned int queue_max_open_zones(const struct request_queue *q) in queue_max_open_zones() argument
755 static inline unsigned int queue_max_active_zones(const struct request_queue *q) in queue_max_active_zones() argument
797 static inline unsigned int blk_queue_depth(struct request_queue *q) in blk_queue_depth() argument
799 if (q->queue_depth) in blk_queue_depth()
800 return q->queue_depth; in blk_queue_depth()
802 return q->nr_requests; in blk_queue_depth()
858 extern void blk_rq_init(struct request_queue *q, struct request *rq);
862 extern int blk_lld_busy(struct request_queue *q);
868 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
872 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
873 extern void blk_queue_exit(struct request_queue *q);
874 extern void blk_sync_queue(struct request_queue *q);
895 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
912 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
916 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
917 #define SECTOR_MASK (PAGE_SECTORS - 1)
979 return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); in blk_rq_zone_no()
984 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); in blk_rq_zone_is_seq()
1012 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, in blk_queue_get_max_sectors() argument
1016 return min(q->limits.max_discard_sectors, in blk_queue_get_max_sectors()
1020 return q->limits.max_write_same_sectors; in blk_queue_get_max_sectors()
1023 return q->limits.max_write_zeroes_sectors; in blk_queue_get_max_sectors()
1025 return q->limits.max_sectors; in blk_queue_get_max_sectors()
1032 static inline unsigned int blk_max_size_offset(struct request_queue *q, in blk_max_size_offset() argument
1037 if (q->limits.chunk_sectors) in blk_max_size_offset()
1038 chunk_sectors = q->limits.chunk_sectors; in blk_max_size_offset()
1040 return q->limits.max_sectors; in blk_max_size_offset()
1044 chunk_sectors -= offset & (chunk_sectors - 1); in blk_max_size_offset()
1048 return min(q->limits.max_sectors, chunk_sectors); in blk_max_size_offset()
1054 struct request_queue *q = rq->q; in blk_rq_get_max_sectors() local
1057 return q->limits.max_hw_sectors; in blk_rq_get_max_sectors()
1059 if (!q->limits.chunk_sectors || in blk_rq_get_max_sectors()
1062 return blk_queue_get_max_sectors(q, req_op(rq)); in blk_rq_get_max_sectors()
1064 return min(blk_max_size_offset(q, offset, 0), in blk_rq_get_max_sectors()
1065 blk_queue_get_max_sectors(q, req_op(rq))); in blk_rq_get_max_sectors()
1096 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
1103 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1105 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1107 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1110 extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
1113 void blk_queue_zone_write_granularity(struct request_queue *q,
1115 extern void blk_queue_alignment_offset(struct request_queue *q,
1119 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1121 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1122 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1135 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1136 extern void blk_queue_required_elevator_features(struct request_queue *q,
1138 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1147 * own special payload. In that case we still return 1 here so that this
1153 return 1; in blk_rq_nr_phys_segments()
1163 return max_t(unsigned short, rq->nr_phys_segments, 1); in blk_rq_nr_discard_segments()
1166 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1168 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument
1173 return __blk_rq_map_sg(q, rq, sglist, &last_sg); in blk_rq_map_sg()
1286 #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1294 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
1295 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
1337 static inline unsigned long queue_segment_boundary(const struct request_queue *q) in queue_segment_boundary() argument
1339 return q->limits.seg_boundary_mask; in queue_segment_boundary()
1342 static inline unsigned long queue_virt_boundary(const struct request_queue *q) in queue_virt_boundary() argument
1344 return q->limits.virt_boundary_mask; in queue_virt_boundary()
1347 static inline unsigned int queue_max_sectors(const struct request_queue *q) in queue_max_sectors() argument
1349 return q->limits.max_sectors; in queue_max_sectors()
1352 static inline unsigned int queue_max_bytes(struct request_queue *q) in queue_max_bytes() argument
1354 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; in queue_max_bytes()
1357 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) in queue_max_hw_sectors() argument
1359 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
1362 static inline unsigned short queue_max_segments(const struct request_queue *q) in queue_max_segments() argument
1364 return q->limits.max_segments; in queue_max_segments()
1367 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) in queue_max_discard_segments() argument
1369 return q->limits.max_discard_segments; in queue_max_discard_segments()
1372 static inline unsigned int queue_max_segment_size(const struct request_queue *q) in queue_max_segment_size() argument
1374 return q->limits.max_segment_size; in queue_max_segment_size()
1377 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) in queue_max_zone_append_sectors() argument
1380 const struct queue_limits *l = &q->limits; in queue_max_zone_append_sectors()
1385 static inline unsigned queue_logical_block_size(const struct request_queue *q) in queue_logical_block_size() argument
1389 if (q && q->limits.logical_block_size) in queue_logical_block_size()
1390 retval = q->limits.logical_block_size; in queue_logical_block_size()
1400 static inline unsigned int queue_physical_block_size(const struct request_queue *q) in queue_physical_block_size() argument
1402 return q->limits.physical_block_size; in queue_physical_block_size()
1410 static inline unsigned int queue_io_min(const struct request_queue *q) in queue_io_min() argument
1412 return q->limits.io_min; in queue_io_min()
1420 static inline unsigned int queue_io_opt(const struct request_queue *q) in queue_io_opt() argument
1422 return q->limits.io_opt; in queue_io_opt()
1431 queue_zone_write_granularity(const struct request_queue *q) in queue_zone_write_granularity() argument
1433 return q->limits.zone_write_granularity; in queue_zone_write_granularity()
1442 static inline int queue_alignment_offset(const struct request_queue *q) in queue_alignment_offset() argument
1444 if (q->limits.misaligned) in queue_alignment_offset()
1445 return -1; in queue_alignment_offset()
1447 return q->limits.alignment_offset; in queue_alignment_offset()
1461 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() local
1463 if (q->limits.misaligned) in bdev_alignment_offset()
1464 return -1; in bdev_alignment_offset()
1466 return queue_limit_alignment_offset(&q->limits, in bdev_alignment_offset()
1468 return q->limits.alignment_offset; in bdev_alignment_offset()
1471 static inline int queue_discard_alignment(const struct request_queue *q) in queue_discard_alignment() argument
1473 if (q->limits.discard_misaligned) in queue_discard_alignment()
1474 return -1; in queue_discard_alignment()
1476 return q->limits.discard_alignment; in queue_discard_alignment()
1504 * If max_discard_segments > 1, the driver takes every bio
1513 queue_max_discard_segments(req->q) > 1) in blk_discard_mergable()
1520 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() local
1523 return queue_limit_discard_alignment(&q->limits, in bdev_discard_alignment()
1525 return q->limits.discard_alignment; in bdev_discard_alignment()
1530 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same() local
1532 if (q) in bdev_write_same()
1533 return q->limits.max_write_same_sectors; in bdev_write_same()
1540 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors() local
1542 if (q) in bdev_write_zeroes_sectors()
1543 return q->limits.max_write_zeroes_sectors; in bdev_write_zeroes_sectors()
1550 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model() local
1552 if (q) in bdev_zoned_model()
1553 return blk_queue_zoned_model(q); in bdev_zoned_model()
1560 struct request_queue *q = bdev_get_queue(bdev); in bdev_is_zoned() local
1562 if (q) in bdev_is_zoned()
1563 return blk_queue_is_zoned(q); in bdev_is_zoned()
1570 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors() local
1572 if (q) in bdev_zone_sectors()
1573 return blk_queue_zone_sectors(q); in bdev_zone_sectors()
1579 struct request_queue *q = bdev_get_queue(bdev); in bdev_max_open_zones() local
1581 if (q) in bdev_max_open_zones()
1582 return queue_max_open_zones(q); in bdev_max_open_zones()
1588 struct request_queue *q = bdev_get_queue(bdev); in bdev_max_active_zones() local
1590 if (q) in bdev_max_active_zones()
1591 return queue_max_active_zones(q); in bdev_max_active_zones()
1595 static inline int queue_dma_alignment(const struct request_queue *q) in queue_dma_alignment() argument
1597 return q ? q->dma_alignment : 511; in queue_dma_alignment()
1600 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned() argument
1603 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; in blk_rq_aligned()
1613 size >>= 1; in blksize_bits()
1620 return 1 << bdev->bd_inode->i_blkbits; in block_size()
1634 BLK_INTEGRITY_VERIFY = 1 << 0,
1635 BLK_INTEGRITY_GENERATE = 1 << 1,
1636 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
1637 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
1685 blk_integrity_queue_supports_integrity(struct request_queue *q) in blk_integrity_queue_supports_integrity() argument
1687 return q->integrity.profile; in blk_integrity_queue_supports_integrity()
1695 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments() argument
1698 q->limits.max_integrity_segments = segs; in blk_queue_max_integrity_segments()
1702 queue_max_integrity_segments(const struct request_queue *q) in queue_max_integrity_segments() argument
1704 return q->limits.max_integrity_segments; in queue_max_integrity_segments()
1735 if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) in rq_integrity_vec()
1751 static inline int blk_rq_count_integrity_sg(struct request_queue *q, in blk_rq_count_integrity_sg() argument
1756 static inline int blk_rq_map_integrity_sg(struct request_queue *q, in blk_rq_map_integrity_sg() argument
1771 blk_integrity_queue_supports_integrity(struct request_queue *q) in blk_integrity_queue_supports_integrity() argument
1786 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments() argument
1790 static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) in queue_max_integrity_segments() argument
1816 bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
1818 void blk_ksm_unregister(struct request_queue *q);
1823 struct request_queue *q) in blk_ksm_register() argument
1828 static inline void blk_ksm_unregister(struct request_queue *q) { } in blk_ksm_unregister() argument
1892 return rq->q->seq_zones_wlock && in blk_req_zone_is_write_locked()
1893 test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); in blk_req_zone_is_write_locked()