Lines Matching refs:s_job

548 	struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,  in drm_sched_job_finish()  local
550 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_finish()
559 cancel_delayed_work_sync(&s_job->work_tdr); in drm_sched_job_finish()
564 !list_is_last(&s_job->node, &sched->ring_mirror_list)) { in drm_sched_job_finish()
565 struct drm_sched_job *next = list_next_entry(s_job, node); in drm_sched_job_finish()
571 list_del(&s_job->node); in drm_sched_job_finish()
574 dma_fence_put(&s_job->s_fence->finished); in drm_sched_job_finish()
575 sched->ops->free_job(s_job); in drm_sched_job_finish()
586 static void drm_sched_job_begin(struct drm_sched_job *s_job) in drm_sched_job_begin() argument
588 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_begin()
590 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb, in drm_sched_job_begin()
594 list_add_tail(&s_job->node, &sched->ring_mirror_list); in drm_sched_job_begin()
597 struct drm_sched_job, node) == s_job) in drm_sched_job_begin()
598 schedule_delayed_work(&s_job->work_tdr, sched->timeout); in drm_sched_job_begin()
619 struct drm_sched_job *s_job; in drm_sched_hw_job_reset() local
624 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { in drm_sched_hw_job_reset()
625 if (s_job->s_fence->parent && in drm_sched_hw_job_reset()
626 dma_fence_remove_callback(s_job->s_fence->parent, in drm_sched_hw_job_reset()
627 &s_job->s_fence->cb)) { in drm_sched_hw_job_reset()
628 dma_fence_put(s_job->s_fence->parent); in drm_sched_hw_job_reset()
629 s_job->s_fence->parent = NULL; in drm_sched_hw_job_reset()
669 struct drm_sched_job *s_job, *tmp; in drm_sched_job_recovery() local
674 s_job = list_first_entry_or_null(&sched->ring_mirror_list, in drm_sched_job_recovery()
676 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) in drm_sched_job_recovery()
677 schedule_delayed_work(&s_job->work_tdr, sched->timeout); in drm_sched_job_recovery()
679 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { in drm_sched_job_recovery()
680 struct drm_sched_fence *s_fence = s_job->s_fence; in drm_sched_job_recovery()
684 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { in drm_sched_job_recovery()
686 guilty_context = s_job->s_fence->scheduled.context; in drm_sched_job_recovery()
689 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) in drm_sched_job_recovery()
693 fence = sched->ops->run_job(s_job); in drm_sched_job_recovery()