Lines Matching full:cs

22  * enum hl_cs_wait_status - cs wait status
23 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
24 * @CS_WAIT_STATUS_COMPLETED: cs completed
25 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
46 * CS outcome store supports the following operations: in hl_push_cs_outcome()
47 * push outcome - store a recent CS outcome in the store in hl_push_cs_outcome()
48 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store in hl_push_cs_outcome()
51 * a single CS outcome. in hl_push_cs_outcome()
71 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq); in hl_push_cs_outcome()
231 void cs_get(struct hl_cs *cs) in cs_get() argument
233 kref_get(&cs->refcount); in cs_get()
236 static int cs_get_unless_zero(struct hl_cs *cs) in cs_get_unless_zero() argument
238 return kref_get_unless_zero(&cs->refcount); in cs_get_unless_zero()
241 static void cs_put(struct hl_cs *cs) in cs_put() argument
243 kref_put(&cs->refcount, cs_do_release); in cs_put()
258 bool cs_needs_completion(struct hl_cs *cs) in cs_needs_completion() argument
260 /* In case this is a staged CS, only the last CS in sequence should in cs_needs_completion()
261 * get a completion, any non staged CS will always get a completion in cs_needs_completion()
263 if (cs->staged_cs && !cs->staged_last) in cs_needs_completion()
269 bool cs_needs_timeout(struct hl_cs *cs) in cs_needs_timeout() argument
271 /* In case this is a staged CS, only the first CS in sequence should in cs_needs_timeout()
272 * get a timeout, any non staged CS will always get a timeout in cs_needs_timeout()
274 if (cs->staged_cs && !cs->staged_first) in cs_needs_timeout()
309 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
310 parser.cs_sequence = job->cs->sequence; in cs_parser()
321 parser.completion = cs_needs_completion(job->cs); in cs_parser()
336 * won't be accessed again for this CS in cs_parser()
350 struct hl_cs *cs = job->cs; in hl_complete_job() local
381 spin_lock(&cs->job_lock); in hl_complete_job()
383 spin_unlock(&cs->job_lock); in hl_complete_job()
387 /* We decrement reference only for a CS that gets completion in hl_complete_job()
388 * because the reference was incremented only for this kind of CS in hl_complete_job()
391 * In staged submission, only the last CS marked as 'staged_last' in hl_complete_job()
393 * As for all the rest CS's in the staged submission which do not get in hl_complete_job()
394 * completion, their CS reference will be decremented by the in hl_complete_job()
395 * 'staged_last' CS during the CS release flow. in hl_complete_job()
396 * All relevant PQ CI counters will be incremented during the CS release in hl_complete_job()
399 if (cs_needs_completion(cs) && in hl_complete_job()
401 cs_put(cs); in hl_complete_job()
407 * hl_staged_cs_find_first - locate the first CS in this staged submission
414 * Find and return a CS pointer with the given sequence
418 struct hl_cs *cs; in hl_staged_cs_find_first() local
420 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node) in hl_staged_cs_find_first()
421 if (cs->staged_cs && cs->staged_first && in hl_staged_cs_find_first()
422 cs->sequence == cs_seq) in hl_staged_cs_find_first()
423 return cs; in hl_staged_cs_find_first()
429 * is_staged_cs_last_exists - returns true if the last CS in sequence exists
432 * @cs: staged submission member
435 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs) in is_staged_cs_last_exists() argument
439 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs, in is_staged_cs_last_exists()
449 * staged_cs_get - get CS reference if this CS is a part of a staged CS
452 * @cs: current CS
455 * Increment CS reference for every CS in this staged submission except for
456 * the CS which get completion.
458 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs) in staged_cs_get() argument
460 /* Only the last CS in this staged submission will get a completion. in staged_cs_get()
461 * We must increment the reference for all other CS's in this in staged_cs_get()
465 if (!cs->staged_last) in staged_cs_get()
466 cs_get(cs); in staged_cs_get()
470 * staged_cs_put - put a CS in case it is part of staged submission
473 * @cs: CS to put
475 * This function decrements a CS reference (for a non completion CS)
477 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) in staged_cs_put() argument
479 /* We release all CS's in a staged submission except the last in staged_cs_put()
480 * CS which we have never incremented its reference. in staged_cs_put()
482 if (!cs_needs_completion(cs)) in staged_cs_put()
483 cs_put(cs); in staged_cs_put()
486 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) in cs_handle_tdr() argument
490 if (!cs_needs_timeout(cs)) in cs_handle_tdr()
496 * Hence, we choose the CS that reaches this function first which is in cs_handle_tdr()
497 * the CS marked as 'staged_last'. in cs_handle_tdr()
498 * In case single staged cs was submitted which has both first and last in cs_handle_tdr()
500 * removed the cs node from the list before getting here, in cs_handle_tdr()
501 * in such cases just continue with the cs to cancel it's TDR work. in cs_handle_tdr()
503 if (cs->staged_cs && cs->staged_last) { in cs_handle_tdr()
504 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); in cs_handle_tdr()
506 cs = first_cs; in cs_handle_tdr()
511 /* Don't cancel TDR in case this CS was timedout because we might be in cs_handle_tdr()
514 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT) in cs_handle_tdr()
517 if (cs->tdr_active) in cs_handle_tdr()
518 cancel_delayed_work_sync(&cs->work_tdr); in cs_handle_tdr()
522 /* queue TDR for next CS */ in cs_handle_tdr()
538 * force_complete_multi_cs - complete all contexts that wait on multi-CS
559 * multi-cS. in force_complete_multi_cs()
564 "multi-CS completion context %d still waiting when calling force completion\n", in force_complete_multi_cs()
572 * complete_multi_cs - complete all waiting entities on multi-CS
575 * @cs: CS structure
577 * with the completed CS.
579 * - a completed CS worked on stream master QID 4, multi CS completion
582 * - a completed CS worked on stream master QID 4, multi CS completion
586 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs) in complete_multi_cs() argument
588 struct hl_fence *fence = cs->fence; in complete_multi_cs()
591 /* in case of multi CS check for completion only for the first CS */ in complete_multi_cs()
592 if (cs->staged_cs && !cs->staged_first) in complete_multi_cs()
607 * 2. the completed CS has at least one overlapping stream in complete_multi_cs()
613 /* extract the timestamp only of first completed CS */ in complete_multi_cs()
623 * least one CS will be set as completed when polling in complete_multi_cs()
631 /* In case CS completed without mcs completion initialized */ in complete_multi_cs()
636 struct hl_cs *cs, in cs_release_sob_reset_handler() argument
639 /* Skip this handler if the cs wasn't submitted, to avoid putting in cs_release_sob_reset_handler()
643 if (!hl_cs_cmpl->hw_sob || !cs->submitted) in cs_release_sob_reset_handler()
649 * we get refcount upon reservation of signals or signal/wait cs for the in cs_release_sob_reset_handler()
650 * hw_sob object, and need to put it when the first staged cs in cs_release_sob_reset_handler()
651 * (which cotains the encaps signals) or cs signal/wait is completed. in cs_release_sob_reset_handler()
658 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n", in cs_release_sob_reset_handler()
676 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); in cs_do_release() local
677 struct hl_device *hdev = cs->ctx->hdev; in cs_do_release()
680 container_of(cs->fence, struct hl_cs_compl, base_fence); in cs_do_release()
682 cs->completed = true; in cs_do_release()
686 * finished, because each one of them took refcnt to CS, we still in cs_do_release()
688 * will have leaked memory and what's worse, the CS object (and in cs_do_release()
692 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_do_release()
695 if (!cs->submitted) { in cs_do_release()
697 * In case the wait for signal CS was submitted, the fence put in cs_do_release()
701 if (cs->type == CS_TYPE_WAIT || in cs_do_release()
702 cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_do_release()
703 hl_fence_put(cs->signal_fence); in cs_do_release()
709 hl_hw_queue_update_ci(cs); in cs_do_release()
711 /* remove CS from CS mirror list */ in cs_do_release()
713 list_del_init(&cs->mirror_node); in cs_do_release()
716 cs_handle_tdr(hdev, cs); in cs_do_release()
718 if (cs->staged_cs) { in cs_do_release()
719 /* the completion CS decrements reference for the entire in cs_do_release()
722 if (cs->staged_last) { in cs_do_release()
726 &cs->staged_cs_node, staged_cs_node) in cs_do_release()
730 /* A staged CS will be a member in the list only after it in cs_do_release()
734 if (cs->submitted) { in cs_do_release()
736 list_del(&cs->staged_cs_node); in cs_do_release()
740 /* decrement refcount to handle when first staged cs in cs_do_release()
748 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_do_release()
749 && cs->encaps_signals) in cs_do_release()
750 kref_put(&cs->encaps_sig_hdl->refcount, in cs_do_release()
757 hl_debugfs_remove_cs(cs); in cs_do_release()
759 hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL; in cs_do_release()
765 if (cs->timedout) in cs_do_release()
766 cs->fence->error = -ETIMEDOUT; in cs_do_release()
767 else if (cs->aborted) in cs_do_release()
768 cs->fence->error = -EIO; in cs_do_release()
769 else if (!cs->submitted) in cs_do_release()
770 cs->fence->error = -EBUSY; in cs_do_release()
772 if (unlikely(cs->skip_reset_on_timeout)) { in cs_do_release()
775 cs->sequence, in cs_do_release()
776 div_u64(jiffies - cs->submission_time_jiffies, HZ)); in cs_do_release()
779 if (cs->timestamp) { in cs_do_release()
780 cs->fence->timestamp = ktime_get(); in cs_do_release()
781 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence, in cs_do_release()
782 cs->fence->timestamp, cs->fence->error); in cs_do_release()
785 hl_ctx_put(cs->ctx); in cs_do_release()
787 complete_all(&cs->fence->completion); in cs_do_release()
788 complete_multi_cs(hdev, cs); in cs_do_release()
790 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl); in cs_do_release()
792 hl_fence_put(cs->fence); in cs_do_release()
794 kfree(cs->jobs_in_queue_cnt); in cs_do_release()
795 kfree(cs); in cs_do_release()
803 struct hl_cs *cs = container_of(work, struct hl_cs, in cs_timedout() local
805 bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false; in cs_timedout()
807 rc = cs_get_unless_zero(cs); in cs_timedout()
811 if ((!cs->submitted) || (cs->completed)) { in cs_timedout()
812 cs_put(cs); in cs_timedout()
816 hdev = cs->ctx->hdev; in cs_timedout()
824 /* Mark the CS is timed out so we won't try to cancel its TDR */ in cs_timedout()
825 cs->timedout = true; in cs_timedout()
828 /* Save only the first CS timeout parameters */ in cs_timedout()
832 hdev->captured_err_info.cs_timeout.seq = cs->sequence; in cs_timedout()
840 switch (cs->type) { in cs_timedout()
844 cs->sequence); in cs_timedout()
850 cs->sequence); in cs_timedout()
856 cs->sequence); in cs_timedout()
862 cs->sequence); in cs_timedout()
870 cs_put(cs); in cs_timedout()
883 struct hl_cs *cs; in allocate_cs() local
888 cs = kzalloc(sizeof(*cs), GFP_ATOMIC); in allocate_cs()
889 if (!cs) in allocate_cs()
890 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in allocate_cs()
892 if (!cs) { in allocate_cs()
901 cs->ctx = ctx; in allocate_cs()
902 cs->submitted = false; in allocate_cs()
903 cs->completed = false; in allocate_cs()
904 cs->type = cs_type; in allocate_cs()
905 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP); in allocate_cs()
906 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); in allocate_cs()
907 cs->timeout_jiffies = timeout; in allocate_cs()
908 cs->skip_reset_on_timeout = in allocate_cs()
911 cs->submission_time_jiffies = jiffies; in allocate_cs()
912 INIT_LIST_HEAD(&cs->job_list); in allocate_cs()
913 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); in allocate_cs()
914 kref_init(&cs->refcount); in allocate_cs()
915 spin_lock_init(&cs->job_lock); in allocate_cs()
928 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
929 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC); in allocate_cs()
930 if (!cs->jobs_in_queue_cnt) in allocate_cs()
931 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
932 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL); in allocate_cs()
934 if (!cs->jobs_in_queue_cnt) { in allocate_cs()
942 cs_cmpl->type = cs->type; in allocate_cs()
944 cs->fence = &cs_cmpl->base_fence; in allocate_cs()
957 * This causes a deadlock because this CS will never be in allocate_cs()
958 * completed as it depends on future CS's for completion. in allocate_cs()
962 "Staged CS %llu deadlock due to lack of resources", in allocate_cs()
966 "Rejecting CS because of too many in-flights CS\n"); in allocate_cs()
976 cs->sequence = cs_cmpl->cs_seq; in allocate_cs()
989 *cs_new = cs; in allocate_cs()
995 kfree(cs->jobs_in_queue_cnt); in allocate_cs()
999 kfree(cs); in allocate_cs()
1004 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) in cs_rollback() argument
1008 staged_cs_put(hdev, cs); in cs_rollback()
1010 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_rollback()
1017 struct hl_cs *cs, *tmp; in hl_cs_rollback_all() local
1022 /* flush all completions before iterating over the CS mirror list in in hl_cs_rollback_all()
1031 /* Make sure we don't have leftovers in the CS mirror list */ in hl_cs_rollback_all()
1032 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { in hl_cs_rollback_all()
1033 cs_get(cs); in hl_cs_rollback_all()
1034 cs->aborted = true; in hl_cs_rollback_all()
1035 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", in hl_cs_rollback_all()
1036 cs->ctx->asid, cs->sequence); in hl_cs_rollback_all()
1037 cs_rollback(hdev, cs); in hl_cs_rollback_all()
1038 cs_put(cs); in hl_cs_rollback_all()
1096 struct hl_cs *cs = job->cs; in job_wq_completion() local
1097 struct hl_device *hdev = cs->ctx->hdev; in job_wq_completion()
1105 struct hl_cs *cs = container_of(work, struct hl_cs, finish_work); in cs_completion() local
1106 struct hl_device *hdev = cs->ctx->hdev; in cs_completion()
1109 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_completion()
1276 "CS type flags are mutually exclusive, context %d\n", in hl_cs_sanity_checks()
1288 dev_err(hdev->dev, "Sync stream CS is not supported\n"); in hl_cs_sanity_checks()
1294 dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid); in hl_cs_sanity_checks()
1299 "Sync stream CS mandates one chunk only, context %d\n", in hl_cs_sanity_checks()
1338 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); in hl_cs_copy_chunk_array()
1346 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs, in cs_staged_submission() argument
1353 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST); in cs_staged_submission()
1354 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST); in cs_staged_submission()
1356 if (cs->staged_first) { in cs_staged_submission()
1357 /* Staged CS sequence is the first CS sequence */ in cs_staged_submission()
1358 INIT_LIST_HEAD(&cs->staged_cs_node); in cs_staged_submission()
1359 cs->staged_sequence = cs->sequence; in cs_staged_submission()
1361 if (cs->encaps_signals) in cs_staged_submission()
1362 cs->encaps_sig_hdl_id = encaps_signal_handle; in cs_staged_submission()
1367 cs->staged_sequence = sequence; in cs_staged_submission()
1370 /* Increment CS reference if needed */ in cs_staged_submission()
1371 staged_cs_get(hdev, cs); in cs_staged_submission()
1373 cs->staged_cs = true; in cs_staged_submission()
1400 struct hl_cs *cs; in cs_ioctl_default() local
1422 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags, in cs_ioctl_default()
1427 *cs_seq = cs->sequence; in cs_ioctl_default()
1429 hl_debugfs_add_cs(cs); in cs_ioctl_default()
1431 rc = cs_staged_submission(hdev, cs, user_sequence, flags, in cs_ioctl_default()
1437 * rather than the internal CS sequence in cs_ioctl_default()
1439 if (cs->staged_cs) in cs_ioctl_default()
1440 *cs_seq = cs->staged_sequence; in cs_ioctl_default()
1442 /* Validate ALL the CS chunks before submitting the CS */ in cs_ioctl_default()
1475 * queues of this CS in cs_ioctl_default()
1500 job->cs = cs; in cs_ioctl_default()
1505 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_default()
1506 cs->jobs_cnt++; in cs_ioctl_default()
1508 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_default()
1511 * Increment CS reference. When CS reference is 0, CS is in cs_ioctl_default()
1516 if (cs_needs_completion(cs) && in cs_ioctl_default()
1519 cs_get(cs); in cs_ioctl_default()
1528 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", in cs_ioctl_default()
1529 cs->ctx->asid, cs->sequence, job->id, rc); in cs_ioctl_default()
1534 /* We allow a CS with any queue type combination as long as it does in cs_ioctl_default()
1537 if (int_queues_only && cs_needs_completion(cs)) { in cs_ioctl_default()
1541 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n", in cs_ioctl_default()
1542 cs->ctx->asid, cs->sequence); in cs_ioctl_default()
1548 INIT_WORK(&cs->finish_work, cs_completion); in cs_ioctl_default()
1551 * store the (external/HW queues) streams used by the CS in the in cs_ioctl_default()
1552 * fence object for multi-CS completion in cs_ioctl_default()
1555 cs->fence->stream_master_qid_map = stream_master_qid_map; in cs_ioctl_default()
1557 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_default()
1561 "Failed to submit CS %d.%llu to H/W queues, error %d\n", in cs_ioctl_default()
1562 cs->ctx->asid, cs->sequence, rc); in cs_ioctl_default()
1566 *signal_initial_sob_count = cs->initial_sob_count; in cs_ioctl_default()
1575 cs_rollback(hdev, cs); in cs_ioctl_default()
1579 /* We finished with the CS in this function, so put the ref */ in cs_ioctl_default()
1580 cs_put(cs); in cs_ioctl_default()
1609 "Failed to switch to context %d, rejecting CS! %d\n", in hl_cs_ctx_switch()
1634 "Need to run restore phase but restore CS is empty\n"); in hl_cs_ctx_switch()
1645 "Failed to submit restore CS for context %d (%d)\n", in hl_cs_ctx_switch()
1664 "Restore CS for context %d failed to complete %d\n", in hl_cs_ctx_switch()
1699 * @hw_sob: the H/W SOB used in this signal CS.
1756 * for the reservation or the next signal cs. in hl_cs_signal_sob_wraparound_handler()
1757 * we do it here, and for both encaps and regular signal cs in hl_cs_signal_sob_wraparound_handler()
1761 * in addition, if we have combination of cs signal and in hl_cs_signal_sob_wraparound_handler()
1763 * no more reservations and only signal cs keep coming, in hl_cs_signal_sob_wraparound_handler()
1805 "Wait for signal CS supports only one signal CS seq\n"); in cs_ioctl_extract_signal_seq()
1844 struct hl_ctx *ctx, struct hl_cs *cs, in cs_ioctl_signal_wait_create_jobs() argument
1862 if (cs->type == CS_TYPE_WAIT) in cs_ioctl_signal_wait_create_jobs()
1877 job->cs = cs; in cs_ioctl_signal_wait_create_jobs()
1883 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_ioctl_signal_wait_create_jobs()
1884 && cs->encaps_signals) in cs_ioctl_signal_wait_create_jobs()
1897 cs_get(cs); in cs_ioctl_signal_wait_create_jobs()
1899 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_signal_wait_create_jobs()
1900 cs->jobs_cnt++; in cs_ioctl_signal_wait_create_jobs()
1902 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_signal_wait_create_jobs()
2115 struct hl_cs *cs; in cs_ioctl_signal_wait() local
2187 /* check if cs sequence has encapsulated in cs_ioctl_signal_wait()
2198 * needed when multiple wait cs are used with offset in cs_ioctl_signal_wait()
2213 /* treat as signal CS already finished */ in cs_ioctl_signal_wait()
2236 "Failed to get signal CS with seq 0x%llx\n", in cs_ioctl_signal_wait()
2243 /* signal CS already finished */ in cs_ioctl_signal_wait()
2260 "CS seq 0x%llx is not of a signal/encaps-signal CS\n", in cs_ioctl_signal_wait()
2268 /* signal CS already finished */ in cs_ioctl_signal_wait()
2275 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout); in cs_ioctl_signal_wait()
2284 * Save the signal CS fence for later initialization right before in cs_ioctl_signal_wait()
2285 * hanging the wait CS on the queue. in cs_ioctl_signal_wait()
2286 * for encaps signals case, we save the cs sequence and handle pointer in cs_ioctl_signal_wait()
2290 cs->signal_fence = sig_fence; in cs_ioctl_signal_wait()
2295 if (cs->encaps_signals) in cs_ioctl_signal_wait()
2296 cs->encaps_sig_hdl = encaps_sig_hdl; in cs_ioctl_signal_wait()
2299 hl_debugfs_add_cs(cs); in cs_ioctl_signal_wait()
2301 *cs_seq = cs->sequence; in cs_ioctl_signal_wait()
2304 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, in cs_ioctl_signal_wait()
2308 cs, q_idx, collective_engine_id, in cs_ioctl_signal_wait()
2320 INIT_WORK(&cs->finish_work, cs_completion); in cs_ioctl_signal_wait()
2322 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_signal_wait()
2324 /* In case wait cs failed here, it means the signal cs in cs_ioctl_signal_wait()
2332 "Failed to submit CS %d.%llu to H/W queues, error %d\n", in cs_ioctl_signal_wait()
2333 ctx->asid, cs->sequence, rc); in cs_ioctl_signal_wait()
2337 *signal_sob_addr_offset = cs->sob_addr_offset; in cs_ioctl_signal_wait()
2338 *signal_initial_sob_count = cs->initial_sob_count; in cs_ioctl_signal_wait()
2346 cs_rollback(hdev, cs); in cs_ioctl_signal_wait()
2350 /* We finished with the CS in this function, so put the ref */ in cs_ioctl_signal_wait()
2351 cs_put(cs); in cs_ioctl_signal_wait()
2422 /* In case this is a staged CS, user should supply the CS sequence */ in hl_cs_ioctl()
2502 "Can't wait on CS %llu because current CS is at seq %llu\n", in hl_wait_for_fence()
2510 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", in hl_wait_for_fence()
2551 * hl_cs_poll_fences - iterate CS fences to check for CS completion
2553 * @mcs_data: multi-CS internal data
2554 * @mcs_compl: multi-CS completion structure
2558 * The function iterates on all CS sequence in the list and set bit in
2559 * completion_bitmap for each completed CS.
2562 * completion to the multi-CS context.
2583 * 1. CS will complete the multi-CS prior clearing the completion. in which in hl_cs_poll_fences()
2584 * case the fence iteration is guaranteed to catch the CS completion. in hl_cs_poll_fences()
2601 * In order to prevent case where we wait until timeout even though a CS associated in hl_cs_poll_fences()
2602 * with the multi-CS actually completed we do things in the below order: in hl_cs_poll_fences()
2603 * 1. for each fence set it's QID map in the multi-CS completion QID map. This way in hl_cs_poll_fences()
2604 * any CS can, potentially, complete the multi CS for the specific QID (note in hl_cs_poll_fences()
2607 * 2. only after allowing multi-CS completion for the specific QID we check whether in hl_cs_poll_fences()
2608 * the specific CS already completed (and thus the wait for completion part will in hl_cs_poll_fences()
2609 * be skipped). if the CS not completed it is guaranteed that completing CS will in hl_cs_poll_fences()
2622 "wait_for_fence error :%d for CS seq %llu\n", in hl_cs_poll_fences()
2629 /* CS did not finished, QID to wait on already stored */ in hl_cs_poll_fences()
2634 * returns to user indicating CS completed before it finished in hl_cs_poll_fences()
2642 * in case multi CS is completed but MCS handling not done in hl_cs_poll_fences()
2643 * we "complete" the multi CS to prevent it from waiting in hl_cs_poll_fences()
2644 * until time-out and the "multi-CS handling done" will have in hl_cs_poll_fences()
2667 * already gone. In this case, CS set as completed but in hl_cs_poll_fences()
2750 * to multi-CS CSs will be set incrementally at a later stage in hl_wait_multi_cs_completion_init()
2760 dev_err(hdev->dev, "no available multi-CS completion structure\n"); in hl_wait_multi_cs_completion_init()
2785 * hl_wait_multi_cs_completion - wait for first CS to complete
2787 * @mcs_data: multi-CS internal data
2809 * hl_multi_cs_completion_init - init array of multi-CS completion structures
2827 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2830 * @data: pointer to multi-CS wait ioctl in/out args
2848 dev_err(hdev->dev, "Wait for multi CS is not supported\n"); in hl_multi_cs_wait_ioctl()
2866 /* copy CS sequence array from user */ in hl_multi_cs_wait_ioctl()
2870 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n"); in hl_multi_cs_wait_ioctl()
2882 /* initialize the multi-CS internal data */ in hl_multi_cs_wait_ioctl()
2890 /* wait (with timeout) for the first CS to be completed */ in hl_multi_cs_wait_ioctl()
2898 /* poll all CS fences, extract timestamp */ in hl_multi_cs_wait_ioctl()
2902 * skip wait for CS completion when one of the below is true: in hl_multi_cs_wait_ioctl()
2904 * - one or more CS in the list completed in hl_multi_cs_wait_ioctl()
2916 * poll fences once again to update the CS map. in hl_multi_cs_wait_ioctl()
2927 * it got a completion) it either got completed by CS in the multi CS list in hl_multi_cs_wait_ioctl()
2929 * got completed by CS submitted to one of the shared stream master but in hl_multi_cs_wait_ioctl()
2930 * not in the multi CS list (in which case we should wait again but modify in hl_multi_cs_wait_ioctl()
2931 * the timeout and set timestamp as zero to let a CS related to the current in hl_multi_cs_wait_ioctl()
2932 * multi-CS set a new, relevant, timestamp) in hl_multi_cs_wait_ioctl()
2953 "user process got signal while waiting for Multi-CS\n"); in hl_multi_cs_wait_ioctl()
2970 /* update if some CS was gone */ in hl_multi_cs_wait_ioctl()
2993 "user process got signal while waiting for CS handle %llu\n", in hl_cs_wait_ioctl()
3003 "CS %llu has timed-out while user process is waiting for it\n", in hl_cs_wait_ioctl()
3008 "CS %llu has been aborted while user process is waiting for it\n", in hl_cs_wait_ioctl()