Lines Matching full:cs

18  * enum hl_cs_wait_status - cs wait status
19 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20 * @CS_WAIT_STATUS_COMPLETED: cs completed
21 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
149 void cs_get(struct hl_cs *cs) in cs_get() argument
151 kref_get(&cs->refcount); in cs_get()
154 static int cs_get_unless_zero(struct hl_cs *cs) in cs_get_unless_zero() argument
156 return kref_get_unless_zero(&cs->refcount); in cs_get_unless_zero()
159 static void cs_put(struct hl_cs *cs) in cs_put() argument
161 kref_put(&cs->refcount, cs_do_release); in cs_put()
176 bool cs_needs_completion(struct hl_cs *cs) in cs_needs_completion() argument
178 /* In case this is a staged CS, only the last CS in sequence should in cs_needs_completion()
179 * get a completion, any non staged CS will always get a completion in cs_needs_completion()
181 if (cs->staged_cs && !cs->staged_last) in cs_needs_completion()
187 bool cs_needs_timeout(struct hl_cs *cs) in cs_needs_timeout() argument
189 /* In case this is a staged CS, only the first CS in sequence should in cs_needs_timeout()
190 * get a timeout, any non staged CS will always get a timeout in cs_needs_timeout()
192 if (cs->staged_cs && !cs->staged_first) in cs_needs_timeout()
227 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
228 parser.cs_sequence = job->cs->sequence; in cs_parser()
239 parser.completion = cs_needs_completion(job->cs); in cs_parser()
254 * won't be accessed again for this CS in cs_parser()
268 struct hl_cs *cs = job->cs; in complete_job() local
299 spin_lock(&cs->job_lock); in complete_job()
301 spin_unlock(&cs->job_lock); in complete_job()
305 /* We decrement reference only for a CS that gets completion in complete_job()
306 * because the reference was incremented only for this kind of CS in complete_job()
309 * In staged submission, only the last CS marked as 'staged_last' in complete_job()
311 * As for all the rest CS's in the staged submission which do not get in complete_job()
312 * completion, their CS reference will be decremented by the in complete_job()
313 * 'staged_last' CS during the CS release flow. in complete_job()
314 * All relevant PQ CI counters will be incremented during the CS release in complete_job()
317 if (cs_needs_completion(cs) && in complete_job()
320 cs_put(cs); in complete_job()
326 * hl_staged_cs_find_first - locate the first CS in this staged submission
333 * Find and return a CS pointer with the given sequence
337 struct hl_cs *cs; in hl_staged_cs_find_first() local
339 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node) in hl_staged_cs_find_first()
340 if (cs->staged_cs && cs->staged_first && in hl_staged_cs_find_first()
341 cs->sequence == cs_seq) in hl_staged_cs_find_first()
342 return cs; in hl_staged_cs_find_first()
348 * is_staged_cs_last_exists - returns true if the last CS in sequence exists
351 * @cs: staged submission member
354 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs) in is_staged_cs_last_exists() argument
358 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs, in is_staged_cs_last_exists()
368 * staged_cs_get - get CS reference if this CS is a part of a staged CS
371 * @cs: current CS
374 * Increment CS reference for every CS in this staged submission except for
375 * the CS which get completion.
377 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs) in staged_cs_get() argument
379 /* Only the last CS in this staged submission will get a completion. in staged_cs_get()
380 * We must increment the reference for all other CS's in this in staged_cs_get()
384 if (!cs->staged_last) in staged_cs_get()
385 cs_get(cs); in staged_cs_get()
389 * staged_cs_put - put a CS in case it is part of staged submission
392 * @cs: CS to put
394 * This function decrements a CS reference (for a non completion CS)
396 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) in staged_cs_put() argument
398 /* We release all CS's in a staged submission except the last in staged_cs_put()
399 * CS which we have never incremented its reference. in staged_cs_put()
401 if (!cs_needs_completion(cs)) in staged_cs_put()
402 cs_put(cs); in staged_cs_put()
405 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) in cs_handle_tdr() argument
410 if (!cs_needs_timeout(cs)) in cs_handle_tdr()
416 * Hence, we choose the CS that reaches this function first which is in cs_handle_tdr()
417 * the CS marked as 'staged_last'. in cs_handle_tdr()
418 * In case single staged cs was submitted which has both first and last in cs_handle_tdr()
420 * removed the cs node from the list before getting here, in cs_handle_tdr()
421 * in such cases just continue with the cs to cancel it's TDR work. in cs_handle_tdr()
423 if (cs->staged_cs && cs->staged_last) { in cs_handle_tdr()
424 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); in cs_handle_tdr()
426 cs = first_cs; in cs_handle_tdr()
431 /* Don't cancel TDR in case this CS was timedout because we might be in cs_handle_tdr()
434 if (cs && (cs->timedout || in cs_handle_tdr()
438 if (cs && cs->tdr_active) in cs_handle_tdr()
439 cancel_delayed_work_sync(&cs->work_tdr); in cs_handle_tdr()
443 /* queue TDR for next CS */ in cs_handle_tdr()
459 * force_complete_multi_cs - complete all contexts that wait on multi-CS
480 * multi-cS. in force_complete_multi_cs()
485 "multi-CS completion context %d still waiting when calling force completion\n", in force_complete_multi_cs()
493 * complete_multi_cs - complete all waiting entities on multi-CS
496 * @cs: CS structure
498 * with the completed CS.
500 * - a completed CS worked on stream master QID 4, multi CS completion
503 * - a completed CS worked on stream master QID 4, multi CS completion
507 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs) in complete_multi_cs() argument
509 struct hl_fence *fence = cs->fence; in complete_multi_cs()
512 /* in case of multi CS check for completion only for the first CS */ in complete_multi_cs()
513 if (cs->staged_cs && !cs->staged_first) in complete_multi_cs()
528 * 2. the completed CS has at least one overlapping stream in complete_multi_cs()
534 /* extract the timestamp only of first completed CS */ in complete_multi_cs()
546 struct hl_cs *cs, in cs_release_sob_reset_handler() argument
549 /* Skip this handler if the cs wasn't submitted, to avoid putting in cs_release_sob_reset_handler()
553 if (!hl_cs_cmpl->hw_sob || !cs->submitted) in cs_release_sob_reset_handler()
559 * we get refcount upon reservation of signals or signal/wait cs for the in cs_release_sob_reset_handler()
560 * hw_sob object, and need to put it when the first staged cs in cs_release_sob_reset_handler()
561 * (which cotains the encaps signals) or cs signal/wait is completed. in cs_release_sob_reset_handler()
568 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n", in cs_release_sob_reset_handler()
586 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); in cs_do_release() local
587 struct hl_device *hdev = cs->ctx->hdev; in cs_do_release()
590 container_of(cs->fence, struct hl_cs_compl, base_fence); in cs_do_release()
592 cs->completed = true; in cs_do_release()
596 * finished, because each one of them took refcnt to CS, we still in cs_do_release()
598 * will have leaked memory and what's worse, the CS object (and in cs_do_release()
602 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_do_release()
605 if (!cs->submitted) { in cs_do_release()
607 * In case the wait for signal CS was submitted, the fence put in cs_do_release()
611 if (cs->type == CS_TYPE_WAIT || in cs_do_release()
612 cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_do_release()
613 hl_fence_put(cs->signal_fence); in cs_do_release()
619 hl_hw_queue_update_ci(cs); in cs_do_release()
621 /* remove CS from CS mirror list */ in cs_do_release()
623 list_del_init(&cs->mirror_node); in cs_do_release()
626 cs_handle_tdr(hdev, cs); in cs_do_release()
628 if (cs->staged_cs) { in cs_do_release()
629 /* the completion CS decrements reference for the entire in cs_do_release()
632 if (cs->staged_last) { in cs_do_release()
636 &cs->staged_cs_node, staged_cs_node) in cs_do_release()
640 /* A staged CS will be a member in the list only after it in cs_do_release()
644 if (cs->submitted) { in cs_do_release()
646 list_del(&cs->staged_cs_node); in cs_do_release()
650 /* decrement refcount to handle when first staged cs in cs_do_release()
658 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_do_release()
659 && cs->encaps_signals) in cs_do_release()
660 kref_put(&cs->encaps_sig_hdl->refcount, in cs_do_release()
667 hl_debugfs_remove_cs(cs); in cs_do_release()
669 hl_ctx_put(cs->ctx); in cs_do_release()
675 if (cs->timedout) in cs_do_release()
676 cs->fence->error = -ETIMEDOUT; in cs_do_release()
677 else if (cs->aborted) in cs_do_release()
678 cs->fence->error = -EIO; in cs_do_release()
679 else if (!cs->submitted) in cs_do_release()
680 cs->fence->error = -EBUSY; in cs_do_release()
682 if (unlikely(cs->skip_reset_on_timeout)) { in cs_do_release()
685 cs->sequence, in cs_do_release()
686 div_u64(jiffies - cs->submission_time_jiffies, HZ)); in cs_do_release()
689 if (cs->timestamp) in cs_do_release()
690 cs->fence->timestamp = ktime_get(); in cs_do_release()
691 complete_all(&cs->fence->completion); in cs_do_release()
692 complete_multi_cs(hdev, cs); in cs_do_release()
694 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl); in cs_do_release()
696 hl_fence_put(cs->fence); in cs_do_release()
698 kfree(cs->jobs_in_queue_cnt); in cs_do_release()
699 kfree(cs); in cs_do_release()
706 struct hl_cs *cs = container_of(work, struct hl_cs, in cs_timedout() local
708 bool skip_reset_on_timeout = cs->skip_reset_on_timeout; in cs_timedout()
710 rc = cs_get_unless_zero(cs); in cs_timedout()
714 if ((!cs->submitted) || (cs->completed)) { in cs_timedout()
715 cs_put(cs); in cs_timedout()
719 /* Mark the CS is timed out so we won't try to cancel its TDR */ in cs_timedout()
721 cs->timedout = true; in cs_timedout()
723 hdev = cs->ctx->hdev; in cs_timedout()
725 switch (cs->type) { in cs_timedout()
729 cs->sequence); in cs_timedout()
735 cs->sequence); in cs_timedout()
741 cs->sequence); in cs_timedout()
747 cs->sequence); in cs_timedout()
755 cs_put(cs); in cs_timedout()
772 struct hl_cs *cs; in allocate_cs() local
777 cs = kzalloc(sizeof(*cs), GFP_ATOMIC); in allocate_cs()
778 if (!cs) in allocate_cs()
779 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in allocate_cs()
781 if (!cs) { in allocate_cs()
790 cs->ctx = ctx; in allocate_cs()
791 cs->submitted = false; in allocate_cs()
792 cs->completed = false; in allocate_cs()
793 cs->type = cs_type; in allocate_cs()
794 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP); in allocate_cs()
795 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); in allocate_cs()
796 cs->timeout_jiffies = timeout; in allocate_cs()
797 cs->skip_reset_on_timeout = in allocate_cs()
800 cs->submission_time_jiffies = jiffies; in allocate_cs()
801 INIT_LIST_HEAD(&cs->job_list); in allocate_cs()
802 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); in allocate_cs()
803 kref_init(&cs->refcount); in allocate_cs()
804 spin_lock_init(&cs->job_lock); in allocate_cs()
817 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
818 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC); in allocate_cs()
819 if (!cs->jobs_in_queue_cnt) in allocate_cs()
820 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
821 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL); in allocate_cs()
823 if (!cs->jobs_in_queue_cnt) { in allocate_cs()
831 cs_cmpl->type = cs->type; in allocate_cs()
833 cs->fence = &cs_cmpl->base_fence; in allocate_cs()
846 * This causes a deadlock because this CS will never be in allocate_cs()
847 * completed as it depends on future CS's for completion. in allocate_cs()
851 "Staged CS %llu deadlock due to lack of resources", in allocate_cs()
855 "Rejecting CS because of too many in-flights CS\n"); in allocate_cs()
865 cs->sequence = cs_cmpl->cs_seq; in allocate_cs()
878 *cs_new = cs; in allocate_cs()
884 kfree(cs->jobs_in_queue_cnt); in allocate_cs()
888 kfree(cs); in allocate_cs()
893 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) in cs_rollback() argument
897 staged_cs_put(hdev, cs); in cs_rollback()
899 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_rollback()
906 struct hl_cs *cs, *tmp; in hl_cs_rollback_all() local
910 /* flush all completions before iterating over the CS mirror list in in hl_cs_rollback_all()
916 /* Make sure we don't have leftovers in the CS mirror list */ in hl_cs_rollback_all()
917 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { in hl_cs_rollback_all()
918 cs_get(cs); in hl_cs_rollback_all()
919 cs->aborted = true; in hl_cs_rollback_all()
920 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", in hl_cs_rollback_all()
921 cs->ctx->asid, cs->sequence); in hl_cs_rollback_all()
922 cs_rollback(hdev, cs); in hl_cs_rollback_all()
923 cs_put(cs); in hl_cs_rollback_all()
972 struct hl_cs *cs = job->cs; in job_wq_completion() local
973 struct hl_device *hdev = cs->ctx->hdev; in job_wq_completion()
1124 "Device is %s. Can't submit new CS\n", in hl_cs_sanity_checks()
1139 "CS type flags are mutually exclusive, context %d\n", in hl_cs_sanity_checks()
1149 dev_err(hdev->dev, "Sync stream CS is not supported\n"); in hl_cs_sanity_checks()
1156 "Got execute CS with 0 chunks, context %d\n", in hl_cs_sanity_checks()
1162 "Sync stream CS mandates one chunk only, context %d\n", in hl_cs_sanity_checks()
1201 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); in hl_cs_copy_chunk_array()
1209 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs, in cs_staged_submission() argument
1216 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST); in cs_staged_submission()
1217 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST); in cs_staged_submission()
1219 if (cs->staged_first) { in cs_staged_submission()
1220 /* Staged CS sequence is the first CS sequence */ in cs_staged_submission()
1221 INIT_LIST_HEAD(&cs->staged_cs_node); in cs_staged_submission()
1222 cs->staged_sequence = cs->sequence; in cs_staged_submission()
1224 if (cs->encaps_signals) in cs_staged_submission()
1225 cs->encaps_sig_hdl_id = encaps_signal_handle; in cs_staged_submission()
1230 cs->staged_sequence = sequence; in cs_staged_submission()
1233 /* Increment CS reference if needed */ in cs_staged_submission()
1234 staged_cs_get(hdev, cs); in cs_staged_submission()
1236 cs->staged_cs = true; in cs_staged_submission()
1262 struct hl_cs *cs; in cs_ioctl_default() local
1284 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags, in cs_ioctl_default()
1289 *cs_seq = cs->sequence; in cs_ioctl_default()
1291 hl_debugfs_add_cs(cs); in cs_ioctl_default()
1293 rc = cs_staged_submission(hdev, cs, user_sequence, flags, in cs_ioctl_default()
1299 * rather than the internal CS sequence in cs_ioctl_default()
1301 if (cs->staged_cs) in cs_ioctl_default()
1302 *cs_seq = cs->staged_sequence; in cs_ioctl_default()
1304 /* Validate ALL the CS chunks before submitting the CS */ in cs_ioctl_default()
1337 * queues of this CS in cs_ioctl_default()
1359 job->cs = cs; in cs_ioctl_default()
1364 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_default()
1366 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_default()
1369 * Increment CS reference. When CS reference is 0, CS is in cs_ioctl_default()
1374 if (cs_needs_completion(cs) && in cs_ioctl_default()
1377 cs_get(cs); in cs_ioctl_default()
1386 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", in cs_ioctl_default()
1387 cs->ctx->asid, cs->sequence, job->id, rc); in cs_ioctl_default()
1392 /* We allow a CS with any queue type combination as long as it does in cs_ioctl_default()
1395 if (int_queues_only && cs_needs_completion(cs)) { in cs_ioctl_default()
1399 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n", in cs_ioctl_default()
1400 cs->ctx->asid, cs->sequence); in cs_ioctl_default()
1406 * store the (external/HW queues) streams used by the CS in the in cs_ioctl_default()
1407 * fence object for multi-CS completion in cs_ioctl_default()
1410 cs->fence->stream_master_qid_map = stream_master_qid_map; in cs_ioctl_default()
1412 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_default()
1416 "Failed to submit CS %d.%llu to H/W queues, error %d\n", in cs_ioctl_default()
1417 cs->ctx->asid, cs->sequence, rc); in cs_ioctl_default()
1428 cs_rollback(hdev, cs); in cs_ioctl_default()
1432 /* We finished with the CS in this function, so put the ref */ in cs_ioctl_default()
1433 cs_put(cs); in cs_ioctl_default()
1460 "Failed to switch to context %d, rejecting CS! %d\n", in hl_cs_ctx_switch()
1485 "Need to run restore phase but restore CS is empty\n"); in hl_cs_ctx_switch()
1496 "Failed to submit restore CS for context %d (%d)\n", in hl_cs_ctx_switch()
1515 "Restore CS for context %d failed to complete %d\n", in hl_cs_ctx_switch()
1549 * @hw_sob: the H/W SOB used in this signal CS.
1606 * for the reservation or the next signal cs. in hl_cs_signal_sob_wraparound_handler()
1607 * we do it here, and for both encaps and regular signal cs in hl_cs_signal_sob_wraparound_handler()
1611 * in addition, if we have combination of cs signal and in hl_cs_signal_sob_wraparound_handler()
1613 * no more reservations and only signal cs keep coming, in hl_cs_signal_sob_wraparound_handler()
1655 "Wait for signal CS supports only one signal CS seq\n"); in cs_ioctl_extract_signal_seq()
1694 struct hl_ctx *ctx, struct hl_cs *cs, in cs_ioctl_signal_wait_create_jobs() argument
1712 if (cs->type == CS_TYPE_WAIT) in cs_ioctl_signal_wait_create_jobs()
1727 job->cs = cs; in cs_ioctl_signal_wait_create_jobs()
1733 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_ioctl_signal_wait_create_jobs()
1734 && cs->encaps_signals) in cs_ioctl_signal_wait_create_jobs()
1747 cs_get(cs); in cs_ioctl_signal_wait_create_jobs()
1749 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_signal_wait_create_jobs()
1751 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_signal_wait_create_jobs()
1956 struct hl_cs *cs; in cs_ioctl_signal_wait() local
2028 /* check if cs sequence has encapsulated in cs_ioctl_signal_wait()
2041 * multiple wait cs are used with offset in cs_ioctl_signal_wait()
2051 /* treat as signal CS already finished */ in cs_ioctl_signal_wait()
2074 "Failed to get signal CS with seq 0x%llx\n", in cs_ioctl_signal_wait()
2081 /* signal CS already finished */ in cs_ioctl_signal_wait()
2098 "CS seq 0x%llx is not of a signal/encaps-signal CS\n", in cs_ioctl_signal_wait()
2106 /* signal CS already finished */ in cs_ioctl_signal_wait()
2113 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout); in cs_ioctl_signal_wait()
2122 * Save the signal CS fence for later initialization right before in cs_ioctl_signal_wait()
2123 * hanging the wait CS on the queue. in cs_ioctl_signal_wait()
2124 * for encaps signals case, we save the cs sequence and handle pointer in cs_ioctl_signal_wait()
2128 cs->signal_fence = sig_fence; in cs_ioctl_signal_wait()
2133 if (cs->encaps_signals) in cs_ioctl_signal_wait()
2134 cs->encaps_sig_hdl = encaps_sig_hdl; in cs_ioctl_signal_wait()
2137 hl_debugfs_add_cs(cs); in cs_ioctl_signal_wait()
2139 *cs_seq = cs->sequence; in cs_ioctl_signal_wait()
2142 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, in cs_ioctl_signal_wait()
2146 cs, q_idx, collective_engine_id, in cs_ioctl_signal_wait()
2157 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_signal_wait()
2159 /* In case wait cs failed here, it means the signal cs in cs_ioctl_signal_wait()
2167 "Failed to submit CS %d.%llu to H/W queues, error %d\n", in cs_ioctl_signal_wait()
2168 ctx->asid, cs->sequence, rc); in cs_ioctl_signal_wait()
2178 cs_rollback(hdev, cs); in cs_ioctl_signal_wait()
2182 /* We finished with the CS in this function, so put the ref */ in cs_ioctl_signal_wait()
2183 cs_put(cs); in cs_ioctl_signal_wait()
2218 /* In case this is a staged CS, user should supply the CS sequence */ in hl_cs_ioctl()
2280 "Can't wait on CS %llu because current CS is at seq %llu\n", in hl_wait_for_fence()
2287 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", in hl_wait_for_fence()
2323 * hl_cs_poll_fences - iterate CS fences to check for CS completion
2325 * @mcs_data: multi-CS internal data
2329 * The function iterates on all CS sequence in the list and set bit in
2330 * completion_bitmap for each completed CS.
2369 "wait_for_fence error :%d for CS seq %llu\n", in hl_cs_poll_fences()
2469 "no available multi-CS completion structure\n"); in hl_wait_multi_cs_completion_init()
2494 * hl_wait_multi_cs_completion - wait for first CS to complete
2496 * @mcs_data: multi-CS internal data
2527 * hl_multi_cs_completion_init - init array of multi-CS completion structures
2545 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2548 * @data: pointer to multi-CS wait ioctl in/out args
2565 dev_err(hdev->dev, "Wait for multi CS is not supported\n"); in hl_multi_cs_wait_ioctl()
2583 /* copy CS sequence array from user */ in hl_multi_cs_wait_ioctl()
2587 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n"); in hl_multi_cs_wait_ioctl()
2599 /* initialize the multi-CS internal data */ in hl_multi_cs_wait_ioctl()
2607 /* poll all CS fences, extract timestamp */ in hl_multi_cs_wait_ioctl()
2611 * skip wait for CS completion when one of the below is true: in hl_multi_cs_wait_ioctl()
2613 * - one or more CS in the list completed in hl_multi_cs_wait_ioctl()
2619 /* wait (with timeout) for the first CS to be completed */ in hl_multi_cs_wait_ioctl()
2627 * poll fences once again to update the CS map. in hl_multi_cs_wait_ioctl()
2635 * it got a completion) we expect to see at least one CS in hl_multi_cs_wait_ioctl()
2640 "Multi-CS got completion on wait but no CS completed\n"); in hl_multi_cs_wait_ioctl()
2657 "user process got signal while waiting for Multi-CS\n"); in hl_multi_cs_wait_ioctl()
2674 /* update if some CS was gone */ in hl_multi_cs_wait_ioctl()
2698 "user process got signal while waiting for CS handle %llu\n", in hl_cs_wait_ioctl()
2708 "CS %llu has timed-out while user process is waiting for it\n", in hl_cs_wait_ioctl()
2713 "CS %llu has been aborted while user process is waiting for it\n", in hl_cs_wait_ioctl()