Lines Matching refs:rq
44 static int blk_mq_poll_stats_bkt(const struct request *rq) in blk_mq_poll_stats_bkt() argument
48 ddir = rq_data_dir(rq); in blk_mq_poll_stats_bkt()
49 bytes = blk_rq_bytes(rq); in blk_mq_poll_stats_bkt()
93 struct request *rq, void *priv, in blk_mq_check_inflight() argument
103 if (rq->part == mi->part) in blk_mq_check_inflight()
119 struct request *rq, void *priv, in blk_mq_check_inflight_rw() argument
124 if (rq->part == mi->part) in blk_mq_check_inflight_rw()
125 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight_rw()
281 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init() local
285 rq->tag = -1; in blk_mq_rq_ctx_init()
286 rq->internal_tag = tag; in blk_mq_rq_ctx_init()
292 rq->tag = tag; in blk_mq_rq_ctx_init()
293 rq->internal_tag = -1; in blk_mq_rq_ctx_init()
294 data->hctx->tags->rqs[rq->tag] = rq; in blk_mq_rq_ctx_init()
298 rq->q = data->q; in blk_mq_rq_ctx_init()
299 rq->mq_ctx = data->ctx; in blk_mq_rq_ctx_init()
300 rq->rq_flags = rq_flags; in blk_mq_rq_ctx_init()
301 rq->cpu = -1; in blk_mq_rq_ctx_init()
302 rq->cmd_flags = op; in blk_mq_rq_ctx_init()
304 rq->rq_flags |= RQF_PREEMPT; in blk_mq_rq_ctx_init()
306 rq->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init()
307 INIT_LIST_HEAD(&rq->queuelist); in blk_mq_rq_ctx_init()
308 INIT_HLIST_NODE(&rq->hash); in blk_mq_rq_ctx_init()
309 RB_CLEAR_NODE(&rq->rb_node); in blk_mq_rq_ctx_init()
310 rq->rq_disk = NULL; in blk_mq_rq_ctx_init()
311 rq->part = NULL; in blk_mq_rq_ctx_init()
312 rq->start_time_ns = ktime_get_ns(); in blk_mq_rq_ctx_init()
313 rq->io_start_time_ns = 0; in blk_mq_rq_ctx_init()
314 rq->nr_phys_segments = 0; in blk_mq_rq_ctx_init()
316 rq->nr_integrity_segments = 0; in blk_mq_rq_ctx_init()
318 rq->special = NULL; in blk_mq_rq_ctx_init()
320 rq->extra_len = 0; in blk_mq_rq_ctx_init()
321 rq->__deadline = 0; in blk_mq_rq_ctx_init()
323 INIT_LIST_HEAD(&rq->timeout_list); in blk_mq_rq_ctx_init()
324 rq->timeout = 0; in blk_mq_rq_ctx_init()
326 rq->end_io = NULL; in blk_mq_rq_ctx_init()
327 rq->end_io_data = NULL; in blk_mq_rq_ctx_init()
328 rq->next_rq = NULL; in blk_mq_rq_ctx_init()
331 rq->rl = NULL; in blk_mq_rq_ctx_init()
335 refcount_set(&rq->ref, 1); in blk_mq_rq_ctx_init()
336 return rq; in blk_mq_rq_ctx_init()
344 struct request *rq; in blk_mq_get_request() local
384 rq = blk_mq_rq_ctx_init(data, tag, op); in blk_mq_get_request()
386 rq->elv.icq = NULL; in blk_mq_get_request()
389 blk_mq_sched_assign_ioc(rq, bio); in blk_mq_get_request()
391 e->type->ops.mq.prepare_request(rq, bio); in blk_mq_get_request()
392 rq->rq_flags |= RQF_ELVPRIV; in blk_mq_get_request()
396 return rq; in blk_mq_get_request()
403 struct request *rq; in blk_mq_alloc_request() local
410 rq = blk_mq_get_request(q, NULL, op, &alloc_data); in blk_mq_alloc_request()
413 if (!rq) in blk_mq_alloc_request()
418 rq->__data_len = 0; in blk_mq_alloc_request()
419 rq->__sector = (sector_t) -1; in blk_mq_alloc_request()
420 rq->bio = rq->biotail = NULL; in blk_mq_alloc_request()
421 return rq; in blk_mq_alloc_request()
429 struct request *rq; in blk_mq_alloc_request_hctx() local
461 rq = blk_mq_get_request(q, NULL, op, &alloc_data); in blk_mq_alloc_request_hctx()
464 if (!rq) in blk_mq_alloc_request_hctx()
467 return rq; in blk_mq_alloc_request_hctx()
471 static void __blk_mq_free_request(struct request *rq) in __blk_mq_free_request() argument
473 struct request_queue *q = rq->q; in __blk_mq_free_request()
474 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_free_request()
476 const int sched_tag = rq->internal_tag; in __blk_mq_free_request()
478 if (rq->tag != -1) in __blk_mq_free_request()
479 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
486 void blk_mq_free_request(struct request *rq) in blk_mq_free_request() argument
488 struct request_queue *q = rq->q; in blk_mq_free_request()
490 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_free_request()
493 if (rq->rq_flags & RQF_ELVPRIV) { in blk_mq_free_request()
495 e->type->ops.mq.finish_request(rq); in blk_mq_free_request()
496 if (rq->elv.icq) { in blk_mq_free_request()
497 put_io_context(rq->elv.icq->ioc); in blk_mq_free_request()
498 rq->elv.icq = NULL; in blk_mq_free_request()
502 ctx->rq_completed[rq_is_sync(rq)]++; in blk_mq_free_request()
503 if (rq->rq_flags & RQF_MQ_INFLIGHT) in blk_mq_free_request()
506 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) in blk_mq_free_request()
509 rq_qos_done(q, rq); in blk_mq_free_request()
511 if (blk_rq_rl(rq)) in blk_mq_free_request()
512 blk_put_rl(blk_rq_rl(rq)); in blk_mq_free_request()
514 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in blk_mq_free_request()
515 if (refcount_dec_and_test(&rq->ref)) in blk_mq_free_request()
516 __blk_mq_free_request(rq); in blk_mq_free_request()
520 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) in __blk_mq_end_request() argument
524 if (rq->rq_flags & RQF_STATS) { in __blk_mq_end_request()
525 blk_mq_poll_stats_start(rq->q); in __blk_mq_end_request()
526 blk_stat_add(rq, now); in __blk_mq_end_request()
529 blk_account_io_done(rq, now); in __blk_mq_end_request()
531 if (rq->end_io) { in __blk_mq_end_request()
532 rq_qos_done(rq->q, rq); in __blk_mq_end_request()
533 rq->end_io(rq, error); in __blk_mq_end_request()
535 if (unlikely(blk_bidi_rq(rq))) in __blk_mq_end_request()
536 blk_mq_free_request(rq->next_rq); in __blk_mq_end_request()
537 blk_mq_free_request(rq); in __blk_mq_end_request()
542 void blk_mq_end_request(struct request *rq, blk_status_t error) in blk_mq_end_request() argument
544 if (blk_update_request(rq, error, blk_rq_bytes(rq))) in blk_mq_end_request()
546 __blk_mq_end_request(rq, error); in blk_mq_end_request()
552 struct request *rq = data; in __blk_mq_complete_request_remote() local
554 rq->q->softirq_done_fn(rq); in __blk_mq_complete_request_remote()
557 static void __blk_mq_complete_request(struct request *rq) in __blk_mq_complete_request() argument
559 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_complete_request()
563 if (!blk_mq_mark_complete(rq)) in __blk_mq_complete_request()
565 if (rq->internal_tag != -1) in __blk_mq_complete_request()
566 blk_mq_sched_completed_request(rq); in __blk_mq_complete_request()
568 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { in __blk_mq_complete_request()
569 rq->q->softirq_done_fn(rq); in __blk_mq_complete_request()
574 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) in __blk_mq_complete_request()
578 rq->csd.func = __blk_mq_complete_request_remote; in __blk_mq_complete_request()
579 rq->csd.info = rq; in __blk_mq_complete_request()
580 rq->csd.flags = 0; in __blk_mq_complete_request()
581 smp_call_function_single_async(ctx->cpu, &rq->csd); in __blk_mq_complete_request()
583 rq->q->softirq_done_fn(rq); in __blk_mq_complete_request()
616 void blk_mq_complete_request(struct request *rq) in blk_mq_complete_request() argument
618 if (unlikely(blk_should_fake_timeout(rq->q))) in blk_mq_complete_request()
620 __blk_mq_complete_request(rq); in blk_mq_complete_request()
624 int blk_mq_request_started(struct request *rq) in blk_mq_request_started() argument
626 return blk_mq_rq_state(rq) != MQ_RQ_IDLE; in blk_mq_request_started()
630 void blk_mq_start_request(struct request *rq) in blk_mq_start_request() argument
632 struct request_queue *q = rq->q; in blk_mq_start_request()
634 blk_mq_sched_started_request(rq); in blk_mq_start_request()
636 trace_block_rq_issue(q, rq); in blk_mq_start_request()
639 rq->io_start_time_ns = ktime_get_ns(); in blk_mq_start_request()
641 rq->throtl_size = blk_rq_sectors(rq); in blk_mq_start_request()
643 rq->rq_flags |= RQF_STATS; in blk_mq_start_request()
644 rq_qos_issue(q, rq); in blk_mq_start_request()
647 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); in blk_mq_start_request()
649 blk_add_timer(rq); in blk_mq_start_request()
650 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); in blk_mq_start_request()
652 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_mq_start_request()
658 rq->nr_phys_segments++; in blk_mq_start_request()
663 static void __blk_mq_requeue_request(struct request *rq) in __blk_mq_requeue_request() argument
665 struct request_queue *q = rq->q; in __blk_mq_requeue_request()
667 blk_mq_put_driver_tag(rq); in __blk_mq_requeue_request()
669 trace_block_rq_requeue(q, rq); in __blk_mq_requeue_request()
670 rq_qos_requeue(q, rq); in __blk_mq_requeue_request()
672 if (blk_mq_request_started(rq)) { in __blk_mq_requeue_request()
673 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in __blk_mq_requeue_request()
674 rq->rq_flags &= ~RQF_TIMED_OUT; in __blk_mq_requeue_request()
675 if (q->dma_drain_size && blk_rq_bytes(rq)) in __blk_mq_requeue_request()
676 rq->nr_phys_segments--; in __blk_mq_requeue_request()
680 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) in blk_mq_requeue_request() argument
682 __blk_mq_requeue_request(rq); in blk_mq_requeue_request()
685 blk_mq_sched_requeue_request(rq); in blk_mq_requeue_request()
687 BUG_ON(blk_queued_rq(rq)); in blk_mq_requeue_request()
688 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); in blk_mq_requeue_request()
697 struct request *rq, *next; in blk_mq_requeue_work() local
703 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { in blk_mq_requeue_work()
704 if (!(rq->rq_flags & RQF_SOFTBARRIER)) in blk_mq_requeue_work()
707 rq->rq_flags &= ~RQF_SOFTBARRIER; in blk_mq_requeue_work()
708 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
709 blk_mq_sched_insert_request(rq, true, false, false); in blk_mq_requeue_work()
713 rq = list_entry(rq_list.next, struct request, queuelist); in blk_mq_requeue_work()
714 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
715 blk_mq_sched_insert_request(rq, false, false, false); in blk_mq_requeue_work()
721 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, in blk_mq_add_to_requeue_list() argument
724 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list()
731 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); in blk_mq_add_to_requeue_list()
735 rq->rq_flags |= RQF_SOFTBARRIER; in blk_mq_add_to_requeue_list()
736 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
738 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
787 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) in blk_mq_req_expired() argument
791 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) in blk_mq_req_expired()
793 if (rq->rq_flags & RQF_TIMED_OUT) in blk_mq_req_expired()
796 deadline = blk_rq_deadline(rq); in blk_mq_req_expired()
808 struct request *rq, void *priv, bool reserved) in blk_mq_check_expired() argument
816 if (!blk_mq_req_expired(rq, next)) in blk_mq_check_expired()
828 if (!refcount_inc_not_zero(&rq->ref)) in blk_mq_check_expired()
837 if (blk_mq_req_expired(rq, next)) in blk_mq_check_expired()
838 blk_mq_rq_timed_out(rq, reserved); in blk_mq_check_expired()
839 if (refcount_dec_and_test(&rq->ref)) in blk_mq_check_expired()
840 __blk_mq_free_request(rq); in blk_mq_check_expired()
922 struct request *rq; member
934 dispatch_data->rq = list_entry_rq(ctx->rq_list.next); in dispatch_rq_from_ctx()
935 list_del_init(&dispatch_data->rq->queuelist); in dispatch_rq_from_ctx()
941 return !dispatch_data->rq; in dispatch_rq_from_ctx()
950 .rq = NULL, in blk_mq_dequeue_from_ctx()
956 return data.rq; in blk_mq_dequeue_from_ctx()
967 bool blk_mq_get_driver_tag(struct request *rq) in blk_mq_get_driver_tag() argument
970 .q = rq->q, in blk_mq_get_driver_tag()
971 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), in blk_mq_get_driver_tag()
976 if (rq->tag != -1) in blk_mq_get_driver_tag()
979 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) in blk_mq_get_driver_tag()
983 rq->tag = blk_mq_get_tag(&data); in blk_mq_get_driver_tag()
984 if (rq->tag >= 0) { in blk_mq_get_driver_tag()
986 rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_mq_get_driver_tag()
989 data.hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
993 return rq->tag != -1; in blk_mq_get_driver_tag()
1018 struct request *rq) in blk_mq_mark_tag_wait() argument
1036 return blk_mq_get_driver_tag(rq); in blk_mq_mark_tag_wait()
1061 ret = blk_mq_get_driver_tag(rq); in blk_mq_mark_tag_wait()
1117 struct request *rq, *nxt; in blk_mq_dispatch_rq_list() local
1134 rq = list_first_entry(list, struct request, queuelist); in blk_mq_dispatch_rq_list()
1136 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); in blk_mq_dispatch_rq_list()
1140 if (!blk_mq_get_driver_tag(rq)) { in blk_mq_dispatch_rq_list()
1148 if (!blk_mq_mark_tag_wait(hctx, rq)) { in blk_mq_dispatch_rq_list()
1160 list_del_init(&rq->queuelist); in blk_mq_dispatch_rq_list()
1162 bd.rq = rq; in blk_mq_dispatch_rq_list()
1186 list_add(&rq->queuelist, list); in blk_mq_dispatch_rq_list()
1187 __blk_mq_requeue_request(rq); in blk_mq_dispatch_rq_list()
1193 blk_mq_end_request(rq, BLK_STS_IOERR); in blk_mq_dispatch_rq_list()
1534 struct request *rq, in __blk_mq_insert_req_list() argument
1537 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_req_list()
1541 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_req_list()
1544 list_add(&rq->queuelist, &ctx->rq_list); in __blk_mq_insert_req_list()
1546 list_add_tail(&rq->queuelist, &ctx->rq_list); in __blk_mq_insert_req_list()
1549 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in __blk_mq_insert_request() argument
1552 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_request()
1556 __blk_mq_insert_req_list(hctx, rq, at_head); in __blk_mq_insert_request()
1564 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) in blk_mq_request_bypass_insert() argument
1566 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_request_bypass_insert()
1567 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); in blk_mq_request_bypass_insert()
1570 list_add_tail(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1581 struct request *rq; in blk_mq_insert_requests() local
1587 list_for_each_entry(rq, list, queuelist) { in blk_mq_insert_requests()
1588 BUG_ON(rq->mq_ctx != ctx); in blk_mq_insert_requests()
1589 trace_block_rq_insert(hctx->queue, rq); in blk_mq_insert_requests()
1612 struct request *rq; in blk_mq_flush_plug_list() local
1626 rq = list_entry_rq(list.next); in blk_mq_flush_plug_list()
1627 list_del_init(&rq->queuelist); in blk_mq_flush_plug_list()
1628 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
1629 if (rq->mq_ctx != this_ctx) { in blk_mq_flush_plug_list()
1637 this_ctx = rq->mq_ctx; in blk_mq_flush_plug_list()
1638 this_q = rq->q; in blk_mq_flush_plug_list()
1643 list_add_tail(&rq->queuelist, &ctx_list); in blk_mq_flush_plug_list()
1657 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) in blk_mq_bio_to_request() argument
1659 blk_init_request_from_bio(rq, bio); in blk_mq_bio_to_request()
1661 blk_rq_set_rl(rq, blk_get_rl(rq->q, bio)); in blk_mq_bio_to_request()
1663 blk_account_io_start(rq, true); in blk_mq_bio_to_request()
1666 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) in request_to_qc_t() argument
1668 if (rq->tag != -1) in request_to_qc_t()
1669 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false); in request_to_qc_t()
1671 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); in request_to_qc_t()
1675 struct request *rq, in __blk_mq_issue_directly() argument
1678 struct request_queue *q = rq->q; in __blk_mq_issue_directly()
1680 .rq = rq, in __blk_mq_issue_directly()
1686 new_cookie = request_to_qc_t(hctx, rq); in __blk_mq_issue_directly()
1702 __blk_mq_requeue_request(rq); in __blk_mq_issue_directly()
1714 struct request *rq, in __blk_mq_try_issue_directly() argument
1718 struct request_queue *q = rq->q; in __blk_mq_try_issue_directly()
1740 if (!blk_mq_get_driver_tag(rq)) { in __blk_mq_try_issue_directly()
1745 return __blk_mq_issue_directly(hctx, rq, cookie); in __blk_mq_try_issue_directly()
1750 blk_mq_sched_insert_request(rq, false, run_queue, false); in __blk_mq_try_issue_directly()
1755 struct request *rq, blk_qc_t *cookie) in blk_mq_try_issue_directly() argument
1764 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); in blk_mq_try_issue_directly()
1766 blk_mq_sched_insert_request(rq, false, true, false); in blk_mq_try_issue_directly()
1768 blk_mq_end_request(rq, ret); in blk_mq_try_issue_directly()
1773 blk_status_t blk_mq_request_issue_directly(struct request *rq) in blk_mq_request_issue_directly() argument
1778 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_request_issue_directly()
1779 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); in blk_mq_request_issue_directly()
1782 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true); in blk_mq_request_issue_directly()
1793 struct request *rq = list_first_entry(list, struct request, in blk_mq_try_issue_list_directly() local
1796 list_del_init(&rq->queuelist); in blk_mq_try_issue_list_directly()
1797 ret = blk_mq_request_issue_directly(rq); in blk_mq_try_issue_list_directly()
1801 list_add(&rq->queuelist, list); in blk_mq_try_issue_list_directly()
1804 blk_mq_end_request(rq, ret); in blk_mq_try_issue_list_directly()
1814 struct request *rq; in blk_mq_make_request() local
1838 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); in blk_mq_make_request()
1839 if (unlikely(!rq)) { in blk_mq_make_request()
1846 rq_qos_track(q, rq, bio); in blk_mq_make_request()
1848 cookie = request_to_qc_t(data.hctx, rq); in blk_mq_make_request()
1853 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1856 blk_insert_flush(rq); in blk_mq_make_request()
1862 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1884 list_add_tail(&rq->queuelist, &plug->mq_list); in blk_mq_make_request()
1886 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1899 list_add_tail(&rq->queuelist, &plug->mq_list); in blk_mq_make_request()
1912 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1913 blk_mq_try_issue_directly(data.hctx, rq, &cookie); in blk_mq_make_request()
1916 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1917 blk_mq_sched_insert_request(rq, false, true, true); in blk_mq_make_request()
1932 struct request *rq = tags->static_rqs[i]; in blk_mq_free_rqs() local
1934 if (!rq) in blk_mq_free_rqs()
1936 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs()
2005 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, in blk_mq_init_request() argument
2011 ret = set->ops->init_request(set, rq, hctx_idx, node); in blk_mq_init_request()
2016 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in blk_mq_init_request()
2078 struct request *rq = p; in blk_mq_alloc_rqs() local
2080 tags->static_rqs[i] = rq; in blk_mq_alloc_rqs()
2081 if (blk_mq_init_request(set, rq, hctx_idx, node)) { in blk_mq_alloc_rqs()
3049 struct request *rq) in blk_mq_poll_nsecs() argument
3070 bucket = blk_mq_poll_stats_bkt(rq); in blk_mq_poll_nsecs()
3082 struct request *rq) in blk_mq_poll_hybrid_sleep() argument
3089 if (rq->rq_flags & RQF_MQ_POLL_SLEPT) in blk_mq_poll_hybrid_sleep()
3104 nsecs = blk_mq_poll_nsecs(q, hctx, rq); in blk_mq_poll_hybrid_sleep()
3109 rq->rq_flags |= RQF_MQ_POLL_SLEPT; in blk_mq_poll_hybrid_sleep()
3123 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) in blk_mq_poll_hybrid_sleep()
3138 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) in __blk_mq_poll() argument
3150 if (blk_mq_poll_hybrid_sleep(q, hctx, rq)) in __blk_mq_poll()
3161 ret = q->mq_ops->poll(hctx, rq->tag); in __blk_mq_poll()
3185 struct request *rq; in blk_mq_poll() local
3192 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll()
3194 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll()
3201 if (!rq) in blk_mq_poll()
3205 return __blk_mq_poll(hctx, rq); in blk_mq_poll()