Lines Matching refs:request_queue
34 struct request_queue;
133 struct request_queue *q;
290 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
400 struct request_queue { struct
626 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
627 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
628 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
665 extern void blk_set_pm_only(struct request_queue *q);
666 extern void blk_clear_pm_only(struct request_queue *q);
684 static inline bool queue_is_mq(struct request_queue *q) in queue_is_mq()
690 blk_queue_zoned_model(struct request_queue *q) in blk_queue_zoned_model()
695 static inline bool blk_queue_is_zoned(struct request_queue *q) in blk_queue_is_zoned()
706 static inline sector_t blk_queue_zone_sectors(struct request_queue *q) in blk_queue_zone_sectors()
712 static inline unsigned int blk_queue_nr_zones(struct request_queue *q) in blk_queue_nr_zones()
717 static inline unsigned int blk_queue_zone_no(struct request_queue *q, in blk_queue_zone_no()
725 static inline bool blk_queue_zone_is_seq(struct request_queue *q, in blk_queue_zone_is_seq()
733 static inline unsigned int blk_queue_nr_zones(struct request_queue *q) in blk_queue_nr_zones()
772 static inline unsigned int blk_queue_depth(struct request_queue *q) in blk_queue_depth()
852 extern void blk_rq_init(struct request_queue *q, struct request *rq);
854 extern struct request *blk_get_request(struct request_queue *, unsigned int op,
856 extern int blk_lld_busy(struct request_queue *q);
862 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
865 extern void blk_queue_split(struct request_queue *, struct bio **);
869 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
871 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
874 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
875 extern void blk_queue_exit(struct request_queue *q);
876 extern void blk_sync_queue(struct request_queue *q);
877 extern int blk_rq_map_user(struct request_queue *, struct request *,
881 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
882 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
885 extern void blk_execute_rq(struct request_queue *, struct gendisk *,
887 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
896 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
898 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue()
993 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, in blk_queue_get_max_sectors()
1013 static inline unsigned int blk_max_size_offset(struct request_queue *q, in blk_max_size_offset()
1026 struct request_queue *q = rq->q; in blk_rq_get_max_sectors()
1068 extern void blk_cleanup_queue(struct request_queue *);
1069 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1070 extern void blk_queue_bounce_limit(struct request_queue *, u64);
1071 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1072 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1073 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1074 extern void blk_queue_max_discard_segments(struct request_queue *,
1076 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1077 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1079 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1081 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1083 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
1084 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1085 extern void blk_queue_alignment_offset(struct request_queue *q,
1088 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1090 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1091 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1100 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1101 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1102 extern int blk_queue_dma_drain(struct request_queue *q,
1105 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1106 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1107 extern void blk_queue_dma_alignment(struct request_queue *, int);
1108 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1109 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1110 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1111 extern void blk_queue_required_elevator_features(struct request_queue *q,
1113 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1141 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1145 bool __must_check blk_get_queue(struct request_queue *);
1146 struct request_queue *blk_alloc_queue(gfp_t);
1147 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
1148 extern void blk_put_queue(struct request_queue *);
1149 extern void blk_set_queue_dying(struct request_queue *);
1262 static inline unsigned long queue_segment_boundary(const struct request_queue *q) in queue_segment_boundary()
1267 static inline unsigned long queue_virt_boundary(const struct request_queue *q) in queue_virt_boundary()
1272 static inline unsigned int queue_max_sectors(const struct request_queue *q) in queue_max_sectors()
1277 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) in queue_max_hw_sectors()
1282 static inline unsigned short queue_max_segments(const struct request_queue *q) in queue_max_segments()
1287 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) in queue_max_discard_segments()
1292 static inline unsigned int queue_max_segment_size(const struct request_queue *q) in queue_max_segment_size()
1297 static inline unsigned short queue_logical_block_size(const struct request_queue *q) in queue_logical_block_size()
1312 static inline unsigned int queue_physical_block_size(const struct request_queue *q) in queue_physical_block_size()
1322 static inline unsigned int queue_io_min(const struct request_queue *q) in queue_io_min()
1332 static inline unsigned int queue_io_opt(const struct request_queue *q) in queue_io_opt()
1342 static inline int queue_alignment_offset(const struct request_queue *q) in queue_alignment_offset()
1361 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1372 static inline int queue_discard_alignment(const struct request_queue *q) in queue_discard_alignment()
1405 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
1415 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
1425 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()
1435 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model()
1445 struct request_queue *q = bdev_get_queue(bdev); in bdev_is_zoned()
1455 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors()
1462 static inline int queue_dma_alignment(const struct request_queue *q) in queue_dma_alignment()
1467 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned()
1541 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1543 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1544 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1546 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1570 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments()
1577 queue_max_integrity_segments(const struct request_queue *q) in queue_max_integrity_segments()
1626 static inline int blk_rq_count_integrity_sg(struct request_queue *q, in blk_rq_count_integrity_sg()
1631 static inline int blk_rq_map_integrity_sg(struct request_queue *q, in blk_rq_map_integrity_sg()
1656 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments()
1660 static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) in queue_max_integrity_segments()
1664 static inline bool blk_integrity_merge_rq(struct request_queue *rq, in blk_integrity_merge_rq()
1670 static inline bool blk_integrity_merge_bio(struct request_queue *rq, in blk_integrity_merge_bio()