Home
last modified time | relevance | path

Searched refs:flush_rq (Results 1 – 6 of 6) sorted by relevance

/Linux-v5.15/block/
Dblk-flush.c214 static void flush_end_io(struct request *flush_rq, blk_status_t error) in flush_end_io() argument
216 struct request_queue *q = flush_rq->q; in flush_end_io()
220 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io()
225 if (!refcount_dec_and_test(&flush_rq->ref)) { in flush_end_io()
231 blk_account_io_flush(flush_rq); in flush_end_io()
237 WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); in flush_end_io()
242 flush_rq->tag = BLK_MQ_NO_TAG; in flush_end_io()
244 blk_mq_put_driver_tag(flush_rq); in flush_end_io()
245 flush_rq->internal_tag = BLK_MQ_NO_TAG; in flush_end_io()
289 struct request *flush_rq = fq->flush_rq; in blk_kick_flush() local
[all …]
Dblk-mq.c2652 unsigned int queue_depth, struct request *flush_rq) in blk_mq_clear_flush_rq_mapping() argument
2661 WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0); in blk_mq_clear_flush_rq_mapping()
2664 cmpxchg(&tags->rqs[i], flush_rq, NULL); in blk_mq_clear_flush_rq_mapping()
2681 struct request *flush_rq = hctx->fq->flush_rq; in blk_mq_exit_hctx() local
2687 set->queue_depth, flush_rq); in blk_mq_exit_hctx()
2689 set->ops->exit_request(set, flush_rq, hctx_idx); in blk_mq_exit_hctx()
2746 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, in blk_mq_init_hctx()
Dblk.h27 struct request *flush_rq; member
/Linux-v5.15/drivers/infiniband/hw/irdma/
Dctrl.c2241 bool flush_sq = false, flush_rq = false; in irdma_sc_qp_flush_wqes() local
2243 if (info->rq && !qp->flush_rq) in irdma_sc_qp_flush_wqes()
2244 flush_rq = true; in irdma_sc_qp_flush_wqes()
2248 qp->flush_rq |= flush_rq; in irdma_sc_qp_flush_wqes()
2250 if (!flush_sq && !flush_rq) { in irdma_sc_qp_flush_wqes()
2263 if (flush_rq) in irdma_sc_qp_flush_wqes()
2286 FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) | in irdma_sc_qp_flush_wqes()
Dtype.h522 bool flush_rq:1; member
Dhw.c2706 iwqp->sc_qp.flush_rq = false; in irdma_flush_wqes()