Lines Matching refs:request_queue
33 struct request_queue;
131 struct request_queue *q;
399 struct request_queue { struct
633 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
634 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
635 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
675 extern void blk_set_pm_only(struct request_queue *q);
676 extern void blk_clear_pm_only(struct request_queue *q);
694 static inline bool queue_is_mq(struct request_queue *q) in queue_is_mq()
700 blk_queue_zoned_model(struct request_queue *q) in blk_queue_zoned_model()
707 static inline bool blk_queue_is_zoned(struct request_queue *q) in blk_queue_is_zoned()
718 static inline sector_t blk_queue_zone_sectors(struct request_queue *q) in blk_queue_zone_sectors()
724 static inline unsigned int blk_queue_nr_zones(struct request_queue *q) in blk_queue_nr_zones()
729 static inline unsigned int blk_queue_zone_no(struct request_queue *q, in blk_queue_zone_no()
737 static inline bool blk_queue_zone_is_seq(struct request_queue *q, in blk_queue_zone_is_seq()
747 static inline void blk_queue_max_open_zones(struct request_queue *q, in blk_queue_max_open_zones()
753 static inline unsigned int queue_max_open_zones(const struct request_queue *q) in queue_max_open_zones()
758 static inline void blk_queue_max_active_zones(struct request_queue *q, in blk_queue_max_active_zones()
764 static inline unsigned int queue_max_active_zones(const struct request_queue *q) in queue_max_active_zones()
769 static inline unsigned int blk_queue_nr_zones(struct request_queue *q) in blk_queue_nr_zones()
773 static inline bool blk_queue_zone_is_seq(struct request_queue *q, in blk_queue_zone_is_seq()
778 static inline unsigned int blk_queue_zone_no(struct request_queue *q, in blk_queue_zone_no()
783 static inline unsigned int queue_max_open_zones(const struct request_queue *q) in queue_max_open_zones()
787 static inline unsigned int queue_max_active_zones(const struct request_queue *q) in queue_max_active_zones()
829 static inline unsigned int blk_queue_depth(struct request_queue *q) in blk_queue_depth()
908 extern void blk_rq_init(struct request_queue *q, struct request *rq);
910 extern struct request *blk_get_request(struct request_queue *, unsigned int op,
912 extern int blk_lld_busy(struct request_queue *q);
918 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
925 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
927 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
932 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
933 extern void blk_queue_exit(struct request_queue *q);
934 extern void blk_sync_queue(struct request_queue *q);
935 extern int blk_rq_map_user(struct request_queue *, struct request *,
939 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
940 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
943 extern void blk_execute_rq(struct request_queue *, struct gendisk *,
945 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
954 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
956 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue()
1055 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, in blk_queue_get_max_sectors()
1075 static inline unsigned int blk_max_size_offset(struct request_queue *q, in blk_max_size_offset()
1097 struct request_queue *q = rq->q; in blk_rq_get_max_sectors()
1138 extern void blk_cleanup_queue(struct request_queue *);
1139 extern void blk_queue_bounce_limit(struct request_queue *, u64);
1140 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1141 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1142 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1143 extern void blk_queue_max_discard_segments(struct request_queue *,
1145 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1146 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1148 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1150 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1152 extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
1153 extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
1155 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1156 extern void blk_queue_alignment_offset(struct request_queue *q,
1158 void blk_queue_update_readahead(struct request_queue *q);
1160 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1162 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1163 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1170 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1171 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1172 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1173 extern void blk_queue_dma_alignment(struct request_queue *, int);
1174 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1175 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1176 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1177 extern void blk_queue_required_elevator_features(struct request_queue *q,
1179 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1207 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1209 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg()
1218 bool __must_check blk_get_queue(struct request_queue *);
1219 struct request_queue *blk_alloc_queue(int node_id);
1220 extern void blk_put_queue(struct request_queue *);
1221 extern void blk_set_queue_dying(struct request_queue *);
1381 static inline unsigned long queue_segment_boundary(const struct request_queue *q) in queue_segment_boundary()
1386 static inline unsigned long queue_virt_boundary(const struct request_queue *q) in queue_virt_boundary()
1391 static inline unsigned int queue_max_sectors(const struct request_queue *q) in queue_max_sectors()
1396 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) in queue_max_hw_sectors()
1401 static inline unsigned short queue_max_segments(const struct request_queue *q) in queue_max_segments()
1406 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) in queue_max_discard_segments()
1411 static inline unsigned int queue_max_segment_size(const struct request_queue *q) in queue_max_segment_size()
1416 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) in queue_max_zone_append_sectors()
1424 static inline unsigned queue_logical_block_size(const struct request_queue *q) in queue_logical_block_size()
1439 static inline unsigned int queue_physical_block_size(const struct request_queue *q) in queue_physical_block_size()
1449 static inline unsigned int queue_io_min(const struct request_queue *q) in queue_io_min()
1459 static inline unsigned int queue_io_opt(const struct request_queue *q) in queue_io_opt()
1469 static inline int queue_alignment_offset(const struct request_queue *q) in queue_alignment_offset()
1488 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1498 static inline int queue_discard_alignment(const struct request_queue *q) in queue_discard_alignment()
1531 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
1541 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
1551 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()
1561 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model()
1571 struct request_queue *q = bdev_get_queue(bdev); in bdev_is_zoned()
1581 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors()
1590 struct request_queue *q = bdev_get_queue(bdev); in bdev_max_open_zones()
1599 struct request_queue *q = bdev_get_queue(bdev); in bdev_max_active_zones()
1606 static inline int queue_dma_alignment(const struct request_queue *q) in queue_dma_alignment()
1611 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned()
1675 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1677 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1696 blk_integrity_queue_supports_integrity(struct request_queue *q) in blk_integrity_queue_supports_integrity()
1706 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments()
1713 queue_max_integrity_segments(const struct request_queue *q) in queue_max_integrity_segments()
1762 static inline int blk_rq_count_integrity_sg(struct request_queue *q, in blk_rq_count_integrity_sg()
1767 static inline int blk_rq_map_integrity_sg(struct request_queue *q, in blk_rq_map_integrity_sg()
1782 blk_integrity_queue_supports_integrity(struct request_queue *q) in blk_integrity_queue_supports_integrity()
1797 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments()
1801 static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) in queue_max_integrity_segments()
1827 bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
1829 void blk_ksm_unregister(struct request_queue *q);
1834 struct request_queue *q) in blk_ksm_register()
1839 static inline void blk_ksm_unregister(struct request_queue *q) { } in blk_ksm_unregister()