Lines Matching refs:request_queue
34 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) in blk_queue_prep_rq()
51 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) in blk_queue_unprep_rq()
57 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) in blk_queue_softirq_done()
63 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) in blk_queue_rq_timeout()
69 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) in blk_queue_rq_timed_out()
76 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) in blk_queue_lld_busy()
163 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) in blk_queue_make_request()
190 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) in blk_queue_bounce_limit()
237 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors()
269 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors()
281 void blk_queue_max_discard_sectors(struct request_queue *q, in blk_queue_max_discard_sectors()
294 void blk_queue_max_write_same_sectors(struct request_queue *q, in blk_queue_max_write_same_sectors()
307 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, in blk_queue_max_write_zeroes_sectors()
323 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) in blk_queue_max_segments()
344 void blk_queue_max_discard_segments(struct request_queue *q, in blk_queue_max_discard_segments()
360 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) in blk_queue_max_segment_size()
382 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) in blk_queue_logical_block_size()
404 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) in blk_queue_physical_block_size()
427 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) in blk_queue_alignment_offset()
472 void blk_queue_io_min(struct request_queue *q, unsigned int min) in blk_queue_io_min()
510 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) in blk_queue_io_opt()
521 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) in blk_queue_stack_limits()
688 struct request_queue *bq = bdev_get_queue(bdev); in bdev_stack_limits()
709 struct request_queue *t = disk->queue; in disk_stack_limits()
733 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) in blk_queue_dma_pad()
749 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) in blk_queue_update_dma_pad()
777 int blk_queue_dma_drain(struct request_queue *q, in blk_queue_dma_drain()
798 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) in blk_queue_segment_boundary()
815 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) in blk_queue_virt_boundary()
831 void blk_queue_dma_alignment(struct request_queue *q, int mask) in blk_queue_dma_alignment()
851 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) in blk_queue_update_dma_alignment()
860 void blk_queue_flush_queueable(struct request_queue *q, bool queueable) in blk_queue_flush_queueable()
875 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) in blk_set_queue_depth()
890 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) in blk_queue_write_cache()