Lines Matching +full:input +full:- +full:depth
1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include "blk-stat.h"
6 #include "blk-mq-tag.h"
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
55 unsigned int hctx_idx, unsigned int depth);
72 * CPU -> queue mappings
77 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
86 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); in blk_mq_map_queue_type()
104 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
113 return ctx->hctxs[blk_mq_get_hctx_type(opf)]; in blk_mq_map_queue()
136 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
140 * This assumes per-cpu software queueing queues. They could be per-node
141 * as well, for instance. For now this is hardcoded as-is. Note that we don't
151 /* input parameter */
162 /* input & output parameter */
174 if (!(data->rq_flags & RQF_ELV)) in blk_mq_tags_from_data()
175 return data->hctx->tags; in blk_mq_tags_from_data()
176 return data->hctx->sched_tags; in blk_mq_tags_from_data()
181 return test_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_hctx_stopped()
186 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
197 if (q->mq_ops->put_budget) in blk_mq_put_dispatch_budget()
198 q->mq_ops->put_budget(q, budget_token); in blk_mq_put_dispatch_budget()
203 if (q->mq_ops->get_budget) in blk_mq_get_dispatch_budget()
204 return q->mq_ops->get_budget(q); in blk_mq_get_dispatch_budget()
213 if (rq->q->mq_ops->set_rq_budget_token) in blk_mq_set_rq_budget_token()
214 rq->q->mq_ops->set_rq_budget_token(rq, token); in blk_mq_set_rq_budget_token()
219 if (rq->q->mq_ops->get_rq_budget_token) in blk_mq_get_rq_budget_token()
220 return rq->q->mq_ops->get_rq_budget_token(rq); in blk_mq_get_rq_budget_token()
221 return -1; in blk_mq_get_rq_budget_token()
226 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_inc_active_requests()
227 atomic_inc(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_inc_active_requests()
229 atomic_inc(&hctx->nr_active); in __blk_mq_inc_active_requests()
235 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_sub_active_requests()
236 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_sub_active_requests()
238 atomic_sub(val, &hctx->nr_active); in __blk_mq_sub_active_requests()
248 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_active_requests()
249 return atomic_read(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_active_requests()
250 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests()
255 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); in __blk_mq_put_driver_tag()
256 rq->tag = BLK_MQ_NO_TAG; in __blk_mq_put_driver_tag()
258 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag()
259 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
266 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) in blk_mq_put_driver_tag()
269 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()
276 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_get_driver_tag()
278 if (rq->tag != BLK_MQ_NO_TAG && in blk_mq_get_driver_tag()
279 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { in blk_mq_get_driver_tag()
280 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
292 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
296 * blk_mq_plug() - Get caller context plug
310 * Return current->plug if the bio can be plugged and NULL otherwise
316 bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio))) in blk_mq_plug()
323 return current->plug; in blk_mq_plug()
330 struct request *rq = list_entry_rq(list->next); in blk_mq_free_requests()
332 list_del_init(&rq->queuelist); in blk_mq_free_requests()
339 * and attempt to provide a fair share of the tag depth for each of them.
344 unsigned int depth, users; in hctx_may_queue() local
346 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in hctx_may_queue()
352 if (bt->sb.depth == 1) in hctx_may_queue()
355 if (blk_mq_is_shared_tags(hctx->flags)) { in hctx_may_queue()
356 struct request_queue *q = hctx->queue; in hctx_may_queue()
358 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) in hctx_may_queue()
361 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
365 users = atomic_read(&hctx->tags->active_queues); in hctx_may_queue()
373 depth = max((bt->sb.depth + users - 1) / users, 4U); in hctx_may_queue()
374 return __blk_mq_active_requests(hctx) < depth; in hctx_may_queue()
388 srcu_idx = srcu_read_lock((q)->srcu); \
390 srcu_read_unlock((q)->srcu, srcu_idx); \