Lines Matching refs:request_queue
34 struct request_queue;
65 struct request_queue *q; /* the queue this rl belongs to */
151 struct request_queue *q;
314 typedef void (request_fn_proc) (struct request_queue *q);
315 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
316 typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
317 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
318 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
323 typedef int (lld_busy_fn) (struct request_queue *q);
325 typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
326 typedef void (exit_rq_fn)(struct request_queue *, struct request *);
433 struct request_queue { struct
711 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
712 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
713 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
714 bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
743 extern int blk_set_preempt_only(struct request_queue *q);
744 extern void blk_clear_preempt_only(struct request_queue *q);
746 static inline int queue_in_flight(struct request_queue *q) in queue_in_flight()
769 static inline bool queue_is_rq_based(struct request_queue *q) in queue_is_rq_based()
774 static inline unsigned int blk_queue_cluster(struct request_queue *q) in blk_queue_cluster()
780 blk_queue_zoned_model(struct request_queue *q) in blk_queue_zoned_model()
785 static inline bool blk_queue_is_zoned(struct request_queue *q) in blk_queue_is_zoned()
796 static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) in blk_queue_zone_sectors()
802 static inline unsigned int blk_queue_zone_no(struct request_queue *q, in blk_queue_zone_no()
810 static inline bool blk_queue_zone_is_seq(struct request_queue *q, in blk_queue_zone_is_seq()
873 static inline unsigned int blk_queue_depth(struct request_queue *q) in blk_queue_depth()
959 extern void blk_rq_init(struct request_queue *q, struct request *rq);
962 extern void __blk_put_request(struct request_queue *, struct request *);
963 extern struct request *blk_get_request(struct request_queue *, unsigned int op,
965 extern void blk_requeue_request(struct request_queue *, struct request *);
966 extern int blk_lld_busy(struct request_queue *q);
972 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
975 extern void blk_delay_queue(struct request_queue *, unsigned long);
976 extern void blk_queue_split(struct request_queue *, struct bio **);
977 extern void blk_recount_segments(struct request_queue *, struct bio *);
981 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
983 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
986 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
987 extern void blk_queue_exit(struct request_queue *q);
988 extern void blk_start_queue(struct request_queue *q);
989 extern void blk_start_queue_async(struct request_queue *q);
990 extern void blk_stop_queue(struct request_queue *q);
991 extern void blk_sync_queue(struct request_queue *q);
992 extern void __blk_stop_queue(struct request_queue *q);
993 extern void __blk_run_queue(struct request_queue *q);
994 extern void __blk_run_queue_uncond(struct request_queue *q);
995 extern void blk_run_queue(struct request_queue *);
996 extern void blk_run_queue_async(struct request_queue *q);
997 extern int blk_rq_map_user(struct request_queue *, struct request *,
1001 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
1002 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
1005 extern void blk_execute_rq(struct request_queue *, struct gendisk *,
1007 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
1013 bool blk_poll(struct request_queue *q, blk_qc_t cookie);
1015 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue()
1093 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, in blk_queue_get_max_sectors()
1113 static inline unsigned int blk_max_size_offset(struct request_queue *q, in blk_max_size_offset()
1126 struct request_queue *q = rq->q; in blk_rq_get_max_sectors()
1154 extern struct request *blk_peek_request(struct request_queue *q);
1156 extern struct request *blk_fetch_request(struct request_queue *q);
1192 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1194 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
1195 extern int blk_init_allocated_queue(struct request_queue *);
1196 extern void blk_cleanup_queue(struct request_queue *);
1197 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1198 extern void blk_queue_bounce_limit(struct request_queue *, u64);
1199 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1200 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1201 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1202 extern void blk_queue_max_discard_segments(struct request_queue *,
1204 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1205 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1207 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1209 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1211 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
1212 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1213 extern void blk_queue_alignment_offset(struct request_queue *q,
1216 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1218 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1219 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1228 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1229 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
1230 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1231 extern int blk_queue_dma_drain(struct request_queue *q,
1234 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1235 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1236 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1237 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1238 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1239 extern void blk_queue_dma_alignment(struct request_queue *, int);
1240 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1241 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1242 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1243 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1244 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1245 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1272 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1276 bool __must_check blk_get_queue(struct request_queue *);
1277 struct request_queue *blk_alloc_queue(gfp_t);
1278 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
1280 extern void blk_put_queue(struct request_queue *);
1281 extern void blk_set_queue_dying(struct request_queue *);
1287 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1288 extern int blk_pre_runtime_suspend(struct request_queue *q);
1289 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1290 extern void blk_pre_runtime_resume(struct request_queue *q);
1291 extern void blk_post_runtime_resume(struct request_queue *q, int err);
1292 extern void blk_set_runtime_active(struct request_queue *q);
1294 static inline void blk_pm_runtime_init(struct request_queue *q, in blk_pm_runtime_init()
1296 static inline int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend()
1300 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} in blk_post_runtime_suspend()
1301 static inline void blk_pre_runtime_resume(struct request_queue *q) {} in blk_pre_runtime_resume()
1302 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} in blk_post_runtime_resume()
1303 static inline void blk_set_runtime_active(struct request_queue *q) {} in blk_set_runtime_active()
1368 extern int blk_queue_start_tag(struct request_queue *, struct request *);
1369 extern struct request *blk_queue_find_tag(struct request_queue *, int);
1370 extern void blk_queue_end_tag(struct request_queue *, struct request *);
1371 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1372 extern void blk_queue_free_tags(struct request_queue *);
1373 extern int blk_queue_resize_tags(struct request_queue *, int);
1437 static inline unsigned long queue_segment_boundary(struct request_queue *q) in queue_segment_boundary()
1442 static inline unsigned long queue_virt_boundary(struct request_queue *q) in queue_virt_boundary()
1447 static inline unsigned int queue_max_sectors(struct request_queue *q) in queue_max_sectors()
1452 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) in queue_max_hw_sectors()
1457 static inline unsigned short queue_max_segments(struct request_queue *q) in queue_max_segments()
1462 static inline unsigned short queue_max_discard_segments(struct request_queue *q) in queue_max_discard_segments()
1467 static inline unsigned int queue_max_segment_size(struct request_queue *q) in queue_max_segment_size()
1472 static inline unsigned short queue_logical_block_size(struct request_queue *q) in queue_logical_block_size()
1487 static inline unsigned int queue_physical_block_size(struct request_queue *q) in queue_physical_block_size()
1497 static inline unsigned int queue_io_min(struct request_queue *q) in queue_io_min()
1507 static inline unsigned int queue_io_opt(struct request_queue *q) in queue_io_opt()
1517 static inline int queue_alignment_offset(struct request_queue *q) in queue_alignment_offset()
1536 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1547 static inline int queue_discard_alignment(struct request_queue *q) in queue_discard_alignment()
1580 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
1590 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
1600 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()
1610 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model()
1620 struct request_queue *q = bdev_get_queue(bdev); in bdev_is_zoned()
1630 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors()
1637 static inline int queue_dma_alignment(struct request_queue *q) in queue_dma_alignment()
1642 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned()
1665 static inline bool queue_flush_queueable(struct request_queue *q) in queue_flush_queueable()
1679 static inline bool __bvec_gap_to_prev(struct request_queue *q, in __bvec_gap_to_prev()
1690 static inline bool bvec_gap_to_prev(struct request_queue *q, in bvec_gap_to_prev()
1703 static inline bool bios_segs_mergeable(struct request_queue *q, in bios_segs_mergeable()
1717 static inline bool bio_will_gap(struct request_queue *q, in bio_will_gap()
1805 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1807 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1808 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1810 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1834 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments()
1841 queue_max_integrity_segments(struct request_queue *q) in queue_max_integrity_segments()
1899 static inline int blk_rq_count_integrity_sg(struct request_queue *q, in blk_rq_count_integrity_sg()
1904 static inline int blk_rq_map_integrity_sg(struct request_queue *q, in blk_rq_map_integrity_sg()
1929 static inline void blk_queue_max_integrity_segments(struct request_queue *q, in blk_queue_max_integrity_segments()
1933 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) in queue_max_integrity_segments()
1937 static inline bool blk_integrity_merge_rq(struct request_queue *rq, in blk_integrity_merge_rq()
1943 static inline bool blk_integrity_merge_bio(struct request_queue *rq, in blk_integrity_merge_bio()