Lines Matching refs:mq
26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) in mmc_cqe_dcmd_busy() argument
29 return mq->in_flight[MMC_ISSUE_DCMD]; in mmc_cqe_dcmd_busy()
32 void mmc_cqe_check_busy(struct mmc_queue *mq) in mmc_cqe_check_busy() argument
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) in mmc_cqe_check_busy()
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; in mmc_cqe_check_busy()
37 mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; in mmc_cqe_check_busy()
61 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) in mmc_issue_type() argument
63 struct mmc_host *host = mq->card->host; in mmc_issue_type()
65 if (mq->use_cqe && !host->hsq_enabled) in mmc_issue_type()
74 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) in __mmc_cqe_recovery_notifier() argument
76 if (!mq->recovery_needed) { in __mmc_cqe_recovery_notifier()
77 mq->recovery_needed = true; in __mmc_cqe_recovery_notifier()
78 schedule_work(&mq->recovery_work); in __mmc_cqe_recovery_notifier()
88 struct mmc_queue *mq = q->queuedata; in mmc_cqe_recovery_notifier() local
91 spin_lock_irqsave(&mq->lock, flags); in mmc_cqe_recovery_notifier()
92 __mmc_cqe_recovery_notifier(mq); in mmc_cqe_recovery_notifier()
93 spin_unlock_irqrestore(&mq->lock, flags); in mmc_cqe_recovery_notifier()
100 struct mmc_queue *mq = req->q->queuedata; in mmc_cqe_timed_out() local
101 struct mmc_host *host = mq->card->host; in mmc_cqe_timed_out()
102 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); in mmc_cqe_timed_out()
125 struct mmc_queue *mq = q->queuedata; in mmc_mq_timed_out() local
126 struct mmc_card *card = mq->card; in mmc_mq_timed_out()
131 spin_lock_irqsave(&mq->lock, flags); in mmc_mq_timed_out()
132 ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled; in mmc_mq_timed_out()
133 spin_unlock_irqrestore(&mq->lock, flags); in mmc_mq_timed_out()
140 struct mmc_queue *mq = container_of(work, struct mmc_queue, in mmc_mq_recovery_handler() local
142 struct request_queue *q = mq->queue; in mmc_mq_recovery_handler()
143 struct mmc_host *host = mq->card->host; in mmc_mq_recovery_handler()
145 mmc_get_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
147 mq->in_recovery = true; in mmc_mq_recovery_handler()
149 if (mq->use_cqe && !host->hsq_enabled) in mmc_mq_recovery_handler()
150 mmc_blk_cqe_recovery(mq); in mmc_mq_recovery_handler()
152 mmc_blk_mq_recovery(mq); in mmc_mq_recovery_handler()
154 mq->in_recovery = false; in mmc_mq_recovery_handler()
156 spin_lock_irq(&mq->lock); in mmc_mq_recovery_handler()
157 mq->recovery_needed = false; in mmc_mq_recovery_handler()
158 spin_unlock_irq(&mq->lock); in mmc_mq_recovery_handler()
163 mmc_put_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
210 static int __mmc_init_request(struct mmc_queue *mq, struct request *req, in __mmc_init_request() argument
214 struct mmc_card *card = mq->card; in __mmc_init_request()
241 struct mmc_queue *mq = set->driver_data; in mmc_mq_exit_request() local
243 mmc_exit_request(mq->queue, req); in mmc_mq_exit_request()
251 struct mmc_queue *mq = q->queuedata; in mmc_mq_queue_rq() local
252 struct mmc_card *card = mq->card; in mmc_mq_queue_rq()
259 if (mmc_card_removed(mq->card)) { in mmc_mq_queue_rq()
264 issue_type = mmc_issue_type(mq, req); in mmc_mq_queue_rq()
266 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
268 if (mq->recovery_needed || mq->busy) { in mmc_mq_queue_rq()
269 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
275 if (mmc_cqe_dcmd_busy(mq)) { in mmc_mq_queue_rq()
276 mq->cqe_busy |= MMC_CQE_DCMD_BUSY; in mmc_mq_queue_rq()
277 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
286 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { in mmc_mq_queue_rq()
287 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
304 mq->busy = true; in mmc_mq_queue_rq()
306 mq->in_flight[issue_type] += 1; in mmc_mq_queue_rq()
307 get_card = (mmc_tot_in_flight(mq) == 1); in mmc_mq_queue_rq()
308 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); in mmc_mq_queue_rq()
310 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
318 mmc_get_card(card, &mq->ctx); in mmc_mq_queue_rq()
320 if (mq->use_cqe) { in mmc_mq_queue_rq()
327 issued = mmc_blk_mq_issue_rq(mq, req); in mmc_mq_queue_rq()
344 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
345 mq->in_flight[issue_type] -= 1; in mmc_mq_queue_rq()
346 if (mmc_tot_in_flight(mq) == 0) in mmc_mq_queue_rq()
348 mq->busy = false; in mmc_mq_queue_rq()
349 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
351 mmc_put_card(card, &mq->ctx); in mmc_mq_queue_rq()
353 WRITE_ONCE(mq->busy, false); in mmc_mq_queue_rq()
367 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_setup_queue() argument
372 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); in mmc_setup_queue()
373 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); in mmc_setup_queue()
375 mmc_queue_setup_discard(mq->queue, card); in mmc_setup_queue()
378 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); in mmc_setup_queue()
379 blk_queue_max_hw_sectors(mq->queue, in mmc_setup_queue()
382 WARN(!blk_queue_can_use_dma_map_merging(mq->queue, in mmc_setup_queue()
385 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); in mmc_setup_queue()
390 blk_queue_logical_block_size(mq->queue, block_size); in mmc_setup_queue()
397 blk_queue_max_segment_size(mq->queue, in mmc_setup_queue()
400 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); in mmc_setup_queue()
402 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); in mmc_setup_queue()
403 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); in mmc_setup_queue()
405 mutex_init(&mq->complete_lock); in mmc_setup_queue()
407 init_waitqueue_head(&mq->wait); in mmc_setup_queue()
425 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_init_queue() argument
430 mq->card = card; in mmc_init_queue()
431 mq->use_cqe = host->cqe_enabled; in mmc_init_queue()
433 spin_lock_init(&mq->lock); in mmc_init_queue()
435 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue()
436 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue()
441 if (mq->use_cqe && !host->hsq_enabled) in mmc_init_queue()
442 mq->tag_set.queue_depth = in mmc_init_queue()
445 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue()
446 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue()
447 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in mmc_init_queue()
448 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue()
449 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue()
450 mq->tag_set.driver_data = mq; in mmc_init_queue()
464 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_init_queue()
468 mq->queue = blk_mq_init_queue(&mq->tag_set); in mmc_init_queue()
469 if (IS_ERR(mq->queue)) { in mmc_init_queue()
470 ret = PTR_ERR(mq->queue); in mmc_init_queue()
475 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); in mmc_init_queue()
477 mq->queue->queuedata = mq; in mmc_init_queue()
478 blk_queue_rq_timeout(mq->queue, 60 * HZ); in mmc_init_queue()
480 mmc_setup_queue(mq, card); in mmc_init_queue()
484 blk_mq_free_tag_set(&mq->tag_set); in mmc_init_queue()
488 void mmc_queue_suspend(struct mmc_queue *mq) in mmc_queue_suspend() argument
490 blk_mq_quiesce_queue(mq->queue); in mmc_queue_suspend()
496 mmc_claim_host(mq->card->host); in mmc_queue_suspend()
497 mmc_release_host(mq->card->host); in mmc_queue_suspend()
500 void mmc_queue_resume(struct mmc_queue *mq) in mmc_queue_resume() argument
502 blk_mq_unquiesce_queue(mq->queue); in mmc_queue_resume()
505 void mmc_cleanup_queue(struct mmc_queue *mq) in mmc_cleanup_queue() argument
507 struct request_queue *q = mq->queue; in mmc_cleanup_queue()
517 blk_mq_free_tag_set(&mq->tag_set); in mmc_cleanup_queue()
524 flush_work(&mq->complete_work); in mmc_cleanup_queue()
526 mq->card = NULL; in mmc_cleanup_queue()
532 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) in mmc_queue_map_sg() argument
536 return blk_rq_map_sg(mq->queue, req, mqrq->sg); in mmc_queue_map_sg()