Lines Matching refs:request_queue

29 struct request_queue;
146 struct request_queue *queue;
378 struct request_queue { struct
563 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
564 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
565 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
600 extern void blk_set_pm_only(struct request_queue *q);
601 extern void blk_clear_pm_only(struct request_queue *q);
609 static inline bool queue_is_mq(struct request_queue *q) in queue_is_mq()
615 static inline enum rpm_status queue_rpm_status(struct request_queue *q) in queue_rpm_status()
620 static inline enum rpm_status queue_rpm_status(struct request_queue *q) in queue_rpm_status()
627 blk_queue_zoned_model(struct request_queue *q) in blk_queue_zoned_model()
634 static inline bool blk_queue_is_zoned(struct request_queue *q) in blk_queue_is_zoned()
713 static inline unsigned int blk_queue_depth(struct request_queue *q) in blk_queue_depth()
840 extern int blk_lld_busy(struct request_queue *q);
841 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
842 extern void blk_queue_exit(struct request_queue *q);
843 extern void blk_sync_queue(struct request_queue *q);
858 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue()
890 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
891 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
892 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
893 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
894 extern void blk_queue_max_discard_segments(struct request_queue *,
896 void blk_queue_max_secure_erase_sectors(struct request_queue *q,
898 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
899 extern void blk_queue_max_discard_sectors(struct request_queue *q,
901 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
903 extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
904 extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
906 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
907 void blk_queue_zone_write_granularity(struct request_queue *q,
909 extern void blk_queue_alignment_offset(struct request_queue *q,
913 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
915 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
916 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
922 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
923 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
924 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
925 extern void blk_queue_dma_alignment(struct request_queue *, int);
926 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
927 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
928 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
941 extern void blk_queue_required_elevator_features(struct request_queue *q,
943 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
946 bool __must_check blk_get_queue(struct request_queue *);
947 extern void blk_put_queue(struct request_queue *);
1085 static inline unsigned long queue_segment_boundary(const struct request_queue *q) in queue_segment_boundary()
1090 static inline unsigned long queue_virt_boundary(const struct request_queue *q) in queue_virt_boundary()
1095 static inline unsigned int queue_max_sectors(const struct request_queue *q) in queue_max_sectors()
1100 static inline unsigned int queue_max_bytes(struct request_queue *q) in queue_max_bytes()
1105 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) in queue_max_hw_sectors()
1110 static inline unsigned short queue_max_segments(const struct request_queue *q) in queue_max_segments()
1115 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) in queue_max_discard_segments()
1120 static inline unsigned int queue_max_segment_size(const struct request_queue *q) in queue_max_segment_size()
1125 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) in queue_max_zone_append_sectors()
1144 static inline unsigned queue_logical_block_size(const struct request_queue *q) in queue_logical_block_size()
1159 static inline unsigned int queue_physical_block_size(const struct request_queue *q) in queue_physical_block_size()
1169 static inline unsigned int queue_io_min(const struct request_queue *q) in queue_io_min()
1179 static inline unsigned int queue_io_opt(const struct request_queue *q) in queue_io_opt()
1190 queue_zone_write_granularity(const struct request_queue *q) in queue_zone_write_granularity()
1222 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()
1291 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors()
1310 static inline int queue_dma_alignment(const struct request_queue *q) in queue_dma_alignment()
1327 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned()
1356 struct request_queue *q);
1361 struct request_queue *q) in blk_crypto_register()