Lines Matching refs:kernel_queues

50 	q = &hdev->kernel_queues[0];  in hl_hw_queue_update_ci()
231 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; in hl_hw_queue_send_cb_no_cmpl()
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job()
336 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in int_queue_schedule_job()
372 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in hw_queue_schedule_job()
413 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_signal_cs()
472 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_wait_cs()
652 q = &hdev->kernel_queues[0]; in hl_hw_queue_schedule_cs()
783 q = &hdev->kernel_queues[0]; in hl_hw_queue_schedule_cs()
808 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; in hl_hw_queue_inc_ci_kernel()
909 sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in sync_stream_queue_init()
916 if (hdev->kernel_queues[q_idx].collective_mode == in sync_stream_queue_init()
929 } else if (hdev->kernel_queues[q_idx].collective_mode == in sync_stream_queue_init()
938 if (!hdev->kernel_queues[q_idx].supports_sync_stream) in sync_stream_queue_init()
964 &hdev->kernel_queues[q_idx].sync_stream_prop; in sync_stream_queue_reset()
1074 hdev->kernel_queues = kcalloc(asic->max_queues, in hl_hw_queues_create()
1075 sizeof(*hdev->kernel_queues), GFP_KERNEL); in hl_hw_queues_create()
1077 if (!hdev->kernel_queues) { in hl_hw_queues_create()
1083 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues; in hl_hw_queues_create()
1101 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++) in hl_hw_queues_create()
1104 kfree(hdev->kernel_queues); in hl_hw_queues_create()
1115 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) in hl_hw_queues_destroy()
1118 kfree(hdev->kernel_queues); in hl_hw_queues_destroy()
1127 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) { in hl_hw_queue_reset()