Lines Matching refs:khd
468 struct kyber_hctx_data *khd; in kyber_init_hctx() local
471 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
472 if (!khd) in kyber_init_hctx()
475 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
478 if (!khd->kcqs) in kyber_init_hctx()
482 kyber_ctx_queue_init(&khd->kcqs[i]); in kyber_init_hctx()
485 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx()
489 sbitmap_free(&khd->kcq_map[i]); in kyber_init_hctx()
494 spin_lock_init(&khd->lock); in kyber_init_hctx()
497 INIT_LIST_HEAD(&khd->rqs[i]); in kyber_init_hctx()
498 khd->domain_wait[i].sbq = NULL; in kyber_init_hctx()
499 init_waitqueue_func_entry(&khd->domain_wait[i].wait, in kyber_init_hctx()
501 khd->domain_wait[i].wait.private = hctx; in kyber_init_hctx()
502 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry); in kyber_init_hctx()
503 atomic_set(&khd->wait_index[i], 0); in kyber_init_hctx()
506 khd->cur_domain = 0; in kyber_init_hctx()
507 khd->batching = 0; in kyber_init_hctx()
509 hctx->sched_data = khd; in kyber_init_hctx()
515 kfree(khd->kcqs); in kyber_init_hctx()
517 kfree(khd); in kyber_init_hctx()
523 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_exit_hctx() local
527 sbitmap_free(&khd->kcq_map[i]); in kyber_exit_hctx()
528 kfree(khd->kcqs); in kyber_exit_hctx()
574 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_bio_merge() local
575 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; in kyber_bio_merge()
595 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_insert_requests() local
600 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]]; in kyber_insert_requests()
609 sbitmap_set_bit(&khd->kcq_map[sched_domain], in kyber_insert_requests()
663 struct kyber_hctx_data *khd; member
671 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr]; in flush_busy_kcq()
682 static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd, in kyber_flush_busy_kcqs() argument
687 .khd = khd, in kyber_flush_busy_kcqs()
692 sbitmap_for_each_set(&khd->kcq_map[sched_domain], in kyber_flush_busy_kcqs()
708 struct kyber_hctx_data *khd, in kyber_get_domain_token() argument
711 unsigned int sched_domain = khd->cur_domain; in kyber_get_domain_token()
713 struct sbq_wait *wait = &khd->domain_wait[sched_domain]; in kyber_get_domain_token()
726 &khd->wait_index[sched_domain]); in kyber_get_domain_token()
727 khd->domain_ws[sched_domain] = ws; in kyber_get_domain_token()
745 ws = khd->domain_ws[sched_domain]; in kyber_get_domain_token()
756 struct kyber_hctx_data *khd, in kyber_dispatch_cur_domain() argument
763 rqs = &khd->rqs[khd->cur_domain]; in kyber_dispatch_cur_domain()
775 nr = kyber_get_domain_token(kqd, khd, hctx); in kyber_dispatch_cur_domain()
777 khd->batching++; in kyber_dispatch_cur_domain()
783 kyber_domain_names[khd->cur_domain]); in kyber_dispatch_cur_domain()
785 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { in kyber_dispatch_cur_domain()
786 nr = kyber_get_domain_token(kqd, khd, hctx); in kyber_dispatch_cur_domain()
788 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs); in kyber_dispatch_cur_domain()
790 khd->batching++; in kyber_dispatch_cur_domain()
796 kyber_domain_names[khd->cur_domain]); in kyber_dispatch_cur_domain()
807 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_dispatch_request() local
811 spin_lock(&khd->lock); in kyber_dispatch_request()
817 if (khd->batching < kyber_batch_size[khd->cur_domain]) { in kyber_dispatch_request()
818 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); in kyber_dispatch_request()
832 khd->batching = 0; in kyber_dispatch_request()
834 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1) in kyber_dispatch_request()
835 khd->cur_domain = 0; in kyber_dispatch_request()
837 khd->cur_domain++; in kyber_dispatch_request()
839 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); in kyber_dispatch_request()
846 spin_unlock(&khd->lock); in kyber_dispatch_request()
852 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_has_work() local
856 if (!list_empty_careful(&khd->rqs[i]) || in kyber_has_work()
857 sbitmap_any_bit_set(&khd->kcq_map[i])) in kyber_has_work()
912 __acquires(&khd->lock) \
915 struct kyber_hctx_data *khd = hctx->sched_data; \
917 spin_lock(&khd->lock); \
918 return seq_list_start(&khd->rqs[domain], *pos); \
925 struct kyber_hctx_data *khd = hctx->sched_data; \
927 return seq_list_next(v, &khd->rqs[domain], pos); \
931 __releases(&khd->lock) \
934 struct kyber_hctx_data *khd = hctx->sched_data; \
936 spin_unlock(&khd->lock); \
949 struct kyber_hctx_data *khd = hctx->sched_data; \
950 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
973 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_cur_domain_show() local
975 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]); in kyber_cur_domain_show()
982 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_batching_show() local
984 seq_printf(m, "%u\n", khd->batching); in kyber_batching_show()