Lines Matching full:cs

53 static void cs_get(struct hl_cs *cs)  in cs_get()  argument
55 kref_get(&cs->refcount); in cs_get()
58 static int cs_get_unless_zero(struct hl_cs *cs) in cs_get_unless_zero() argument
60 return kref_get_unless_zero(&cs->refcount); in cs_get_unless_zero()
63 static void cs_put(struct hl_cs *cs) in cs_put() argument
65 kref_put(&cs->refcount, cs_do_release); in cs_put()
85 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
86 parser.cs_sequence = job->cs->sequence; in cs_parser()
111 * won't be accessed again for this CS in cs_parser()
125 struct hl_cs *cs = job->cs; in free_job() local
147 spin_lock(&cs->job_lock); in free_job()
149 spin_unlock(&cs->job_lock); in free_job()
154 cs_put(cs); in free_job()
161 struct hl_cs *cs = container_of(ref, struct hl_cs, in cs_do_release() local
163 struct hl_device *hdev = cs->ctx->hdev; in cs_do_release()
166 cs->completed = true; in cs_do_release()
170 * finished, because each one of them took refcnt to CS, we still in cs_do_release()
172 * will have leaked memory and what's worse, the CS object (and in cs_do_release()
176 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_do_release()
180 if (cs->submitted) { in cs_do_release()
193 dev_crit(hdev->dev, "CS active cnt %d is negative\n", in cs_do_release()
199 hl_int_hw_queue_update_ci(cs); in cs_do_release()
202 /* remove CS from hw_queues mirror list */ in cs_do_release()
203 list_del_init(&cs->mirror_node); in cs_do_release()
207 * Don't cancel TDR in case this CS was timedout because we in cs_do_release()
210 if ((!cs->timedout) && in cs_do_release()
214 if (cs->tdr_active) in cs_do_release()
215 cancel_delayed_work_sync(&cs->work_tdr); in cs_do_release()
219 /* queue TDR for next CS */ in cs_do_release()
238 hl_debugfs_remove_cs(cs); in cs_do_release()
240 hl_ctx_put(cs->ctx); in cs_do_release()
242 if (cs->timedout) in cs_do_release()
243 dma_fence_set_error(cs->fence, -ETIMEDOUT); in cs_do_release()
244 else if (cs->aborted) in cs_do_release()
245 dma_fence_set_error(cs->fence, -EIO); in cs_do_release()
247 dma_fence_signal(cs->fence); in cs_do_release()
248 dma_fence_put(cs->fence); in cs_do_release()
250 kfree(cs); in cs_do_release()
257 struct hl_cs *cs = container_of(work, struct hl_cs, in cs_timedout() local
259 rc = cs_get_unless_zero(cs); in cs_timedout()
263 if ((!cs->submitted) || (cs->completed)) { in cs_timedout()
264 cs_put(cs); in cs_timedout()
268 /* Mark the CS is timed out so we won't try to cancel its TDR */ in cs_timedout()
269 cs->timedout = true; in cs_timedout()
271 hdev = cs->ctx->hdev; in cs_timedout()
272 ctx_asid = cs->ctx->asid; in cs_timedout()
276 ctx_asid, cs->sequence); in cs_timedout()
278 cs_put(cs); in cs_timedout()
289 struct hl_cs *cs; in allocate_cs() local
292 cs = kzalloc(sizeof(*cs), GFP_ATOMIC); in allocate_cs()
293 if (!cs) in allocate_cs()
296 cs->ctx = ctx; in allocate_cs()
297 cs->submitted = false; in allocate_cs()
298 cs->completed = false; in allocate_cs()
299 INIT_LIST_HEAD(&cs->job_list); in allocate_cs()
300 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); in allocate_cs()
301 kref_init(&cs->refcount); in allocate_cs()
302 spin_lock_init(&cs->job_lock); in allocate_cs()
312 cs->fence = &fence->base_fence; in allocate_cs()
321 "Rejecting CS because of too many in-flights CS\n"); in allocate_cs()
329 cs->sequence = fence->cs_seq; in allocate_cs()
341 *cs_new = cs; in allocate_cs()
348 kfree(cs); in allocate_cs()
352 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) in cs_rollback() argument
356 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_rollback()
362 struct hl_cs *cs, *tmp; in hl_cs_rollback_all() local
368 list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, in hl_cs_rollback_all()
370 cs_get(cs); in hl_cs_rollback_all()
371 cs->aborted = true; in hl_cs_rollback_all()
372 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", in hl_cs_rollback_all()
373 cs->ctx->asid, cs->sequence); in hl_cs_rollback_all()
374 cs_rollback(hdev, cs); in hl_cs_rollback_all()
375 cs_put(cs); in hl_cs_rollback_all()
383 struct hl_cs *cs = job->cs; in job_wq_completion() local
384 struct hl_device *hdev = cs->ctx->hdev; in job_wq_completion()
471 struct hl_cs *cs; in _hl_cs_ioctl() local
496 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); in _hl_cs_ioctl()
504 rc = allocate_cs(hdev, hpriv->ctx, &cs); in _hl_cs_ioctl()
510 *cs_seq = cs->sequence; in _hl_cs_ioctl()
512 hl_debugfs_add_cs(cs); in _hl_cs_ioctl()
514 /* Validate ALL the CS chunks before submitting the CS */ in _hl_cs_ioctl()
540 job->cs = cs; in _hl_cs_ioctl()
549 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in _hl_cs_ioctl()
551 list_add_tail(&job->cs_node, &cs->job_list); in _hl_cs_ioctl()
554 * Increment CS reference. When CS reference is 0, CS is in _hl_cs_ioctl()
560 cs_get(cs); in _hl_cs_ioctl()
567 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", in _hl_cs_ioctl()
568 cs->ctx->asid, cs->sequence, job->id, rc); in _hl_cs_ioctl()
575 "Reject CS %d.%llu because no external queues jobs\n", in _hl_cs_ioctl()
576 cs->ctx->asid, cs->sequence); in _hl_cs_ioctl()
581 rc = hl_hw_queue_schedule_cs(cs); in _hl_cs_ioctl()
584 "Failed to submit CS %d.%llu to H/W queues, error %d\n", in _hl_cs_ioctl()
585 cs->ctx->asid, cs->sequence, rc); in _hl_cs_ioctl()
598 cs_rollback(hdev, cs); in _hl_cs_ioctl()
602 /* We finished with the CS in this function, so put the ref */ in _hl_cs_ioctl()
603 cs_put(cs); in _hl_cs_ioctl()
623 "Device is %s. Can't submit new CS\n", in hl_cs_ioctl()
643 "Failed to switch to context %d, rejecting CS! %d\n", in hl_cs_ioctl()
665 "Need to run restore phase but restore CS is empty\n"); in hl_cs_ioctl()
676 "Failed to submit restore CS for context %d (%d)\n", in hl_cs_ioctl()
688 "Restore CS for context %d failed to complete %ld\n", in hl_cs_ioctl()
715 "Got execute CS with 0 chunks, context %d\n", in hl_cs_ioctl()
780 dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n", in hl_cs_wait_ioctl()