Lines Matching refs:request_queue

30 struct request_queue;
140 struct request_queue *queue;
395 struct request_queue { struct
588 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
589 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
590 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
624 extern void blk_set_pm_only(struct request_queue *q);
625 extern void blk_clear_pm_only(struct request_queue *q);
633 static inline bool queue_is_mq(struct request_queue *q) in queue_is_mq()
639 static inline enum rpm_status queue_rpm_status(struct request_queue *q) in queue_rpm_status()
644 static inline enum rpm_status queue_rpm_status(struct request_queue *q) in queue_rpm_status()
651 blk_queue_zoned_model(struct request_queue *q) in blk_queue_zoned_model()
658 static inline bool blk_queue_is_zoned(struct request_queue *q) in blk_queue_is_zoned()
737 static inline unsigned int blk_queue_depth(struct request_queue *q) in blk_queue_depth()
870 extern int blk_lld_busy(struct request_queue *q);
871 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
872 extern void blk_queue_exit(struct request_queue *q);
873 extern void blk_sync_queue(struct request_queue *q);
889 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue()
921 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
922 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
923 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
924 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
925 extern void blk_queue_max_discard_segments(struct request_queue *,
927 void blk_queue_max_secure_erase_sectors(struct request_queue *q,
929 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
930 extern void blk_queue_max_discard_sectors(struct request_queue *q,
932 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
934 extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
935 extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
937 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
938 void blk_queue_zone_write_granularity(struct request_queue *q,
940 extern void blk_queue_alignment_offset(struct request_queue *q,
944 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
946 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
947 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
953 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
954 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
955 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
956 extern void blk_queue_dma_alignment(struct request_queue *, int);
957 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
958 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
959 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
972 extern void blk_queue_required_elevator_features(struct request_queue *q,
974 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
977 bool __must_check blk_get_queue(struct request_queue *);
978 extern void blk_put_queue(struct request_queue *);
1116 static inline unsigned long queue_segment_boundary(const struct request_queue *q) in queue_segment_boundary()
1121 static inline unsigned long queue_virt_boundary(const struct request_queue *q) in queue_virt_boundary()
1126 static inline unsigned int queue_max_sectors(const struct request_queue *q) in queue_max_sectors()
1131 static inline unsigned int queue_max_bytes(struct request_queue *q) in queue_max_bytes()
1136 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) in queue_max_hw_sectors()
1141 static inline unsigned short queue_max_segments(const struct request_queue *q) in queue_max_segments()
1146 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) in queue_max_discard_segments()
1151 static inline unsigned int queue_max_segment_size(const struct request_queue *q) in queue_max_segment_size()
1156 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) in queue_max_zone_append_sectors()
1175 static inline unsigned queue_logical_block_size(const struct request_queue *q) in queue_logical_block_size()
1190 static inline unsigned int queue_physical_block_size(const struct request_queue *q) in queue_physical_block_size()
1200 static inline unsigned int queue_io_min(const struct request_queue *q) in queue_io_min()
1210 static inline unsigned int queue_io_opt(const struct request_queue *q) in queue_io_opt()
1221 queue_zone_write_granularity(const struct request_queue *q) in queue_zone_write_granularity()
1253 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()
1289 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model()
1299 struct request_queue *q = bdev_get_queue(bdev); in bdev_is_zoned()
1318 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors()
1325 static inline int queue_dma_alignment(const struct request_queue *q) in queue_dma_alignment()
1342 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned()
1376 struct request_queue *q);
1381 struct request_queue *q) in blk_crypto_register()