Lines Matching refs:sched
62 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
72 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
78 rq->sched = sched; in drm_sched_rq_init()
245 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_cb()
262 struct drm_gpu_scheduler *sched; in drm_sched_entity_flush() local
266 sched = entity->rq->sched; in drm_sched_entity_flush()
274 sched->job_scheduled, in drm_sched_entity_flush()
278 wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity)); in drm_sched_entity_flush()
302 struct drm_gpu_scheduler *sched; in drm_sched_entity_fini() local
304 sched = entity->rq->sched; in drm_sched_entity_fini()
317 kthread_park(sched->thread); in drm_sched_entity_fini()
318 kthread_unpark(sched->thread); in drm_sched_entity_fini()
374 drm_sched_wakeup(entity->rq->sched); in drm_sched_entity_wakeup()
421 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local
429 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized()
438 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_entity_add_dependency_cb() local
454 if (s_fence && s_fence->sched == sched) { in drm_sched_entity_add_dependency_cb()
483 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_entity_pop_job() local
490 while ((entity->dependency = sched->ops->dependency(sched_job, entity))) in drm_sched_entity_pop_job()
520 struct drm_gpu_scheduler *sched = sched_job->sched; in drm_sched_entity_push_job() local
539 drm_sched_wakeup(sched); in drm_sched_entity_push_job()
550 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_finish() local
561 spin_lock(&sched->job_list_lock); in drm_sched_job_finish()
563 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_job_finish()
564 !list_is_last(&s_job->node, &sched->ring_mirror_list)) { in drm_sched_job_finish()
568 schedule_delayed_work(&next->work_tdr, sched->timeout); in drm_sched_job_finish()
572 spin_unlock(&sched->job_list_lock); in drm_sched_job_finish()
575 sched->ops->free_job(s_job); in drm_sched_job_finish()
588 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_begin() local
593 spin_lock(&sched->job_list_lock); in drm_sched_job_begin()
594 list_add_tail(&s_job->node, &sched->ring_mirror_list); in drm_sched_job_begin()
595 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_job_begin()
596 list_first_entry_or_null(&sched->ring_mirror_list, in drm_sched_job_begin()
598 schedule_delayed_work(&s_job->work_tdr, sched->timeout); in drm_sched_job_begin()
599 spin_unlock(&sched->job_list_lock); in drm_sched_job_begin()
607 job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
617 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) in drm_sched_hw_job_reset() argument
623 spin_lock(&sched->job_list_lock); in drm_sched_hw_job_reset()
624 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { in drm_sched_hw_job_reset()
630 atomic_dec(&sched->hw_rq_count); in drm_sched_hw_job_reset()
633 spin_unlock(&sched->job_list_lock); in drm_sched_hw_job_reset()
642 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_hw_job_reset()
647 if (atomic_read(&bad->karma) > bad->sched->hang_limit) in drm_sched_hw_job_reset()
667 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) in drm_sched_job_recovery() argument
673 spin_lock(&sched->job_list_lock); in drm_sched_job_recovery()
674 s_job = list_first_entry_or_null(&sched->ring_mirror_list, in drm_sched_job_recovery()
676 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) in drm_sched_job_recovery()
677 schedule_delayed_work(&s_job->work_tdr, sched->timeout); in drm_sched_job_recovery()
679 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { in drm_sched_job_recovery()
684 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { in drm_sched_job_recovery()
692 spin_unlock(&sched->job_list_lock); in drm_sched_job_recovery()
693 fence = sched->ops->run_job(s_job); in drm_sched_job_recovery()
694 atomic_inc(&sched->hw_rq_count); in drm_sched_job_recovery()
709 spin_lock(&sched->job_list_lock); in drm_sched_job_recovery()
711 spin_unlock(&sched->job_list_lock); in drm_sched_job_recovery()
731 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_job_init() local
733 job->sched = sched; in drm_sched_job_init()
735 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_init()
739 job->id = atomic64_inc_return(&sched->job_id_count); in drm_sched_job_init()
756 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) in drm_sched_ready() argument
758 return atomic_read(&sched->hw_rq_count) < in drm_sched_ready()
759 sched->hw_submission_limit; in drm_sched_ready()
768 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched) in drm_sched_wakeup() argument
770 if (drm_sched_ready(sched)) in drm_sched_wakeup()
771 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_wakeup()
782 drm_sched_select_entity(struct drm_gpu_scheduler *sched) in drm_sched_select_entity() argument
787 if (!drm_sched_ready(sched)) in drm_sched_select_entity()
792 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); in drm_sched_select_entity()
812 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_process_job() local
815 atomic_dec(&sched->hw_rq_count); in drm_sched_process_job()
820 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_process_job()
830 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) in drm_sched_blocked() argument
850 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; in drm_sched_main() local
861 wait_event_interruptible(sched->wake_up_worker, in drm_sched_main()
862 (!drm_sched_blocked(sched) && in drm_sched_main()
863 (entity = drm_sched_select_entity(sched))) || in drm_sched_main()
875 atomic_inc(&sched->hw_rq_count); in drm_sched_main()
878 fence = sched->ops->run_job(sched_job); in drm_sched_main()
895 wake_up(&sched->job_scheduled); in drm_sched_main()
912 int drm_sched_init(struct drm_gpu_scheduler *sched, in drm_sched_init() argument
920 sched->ops = ops; in drm_sched_init()
921 sched->hw_submission_limit = hw_submission; in drm_sched_init()
922 sched->name = name; in drm_sched_init()
923 sched->timeout = timeout; in drm_sched_init()
924 sched->hang_limit = hang_limit; in drm_sched_init()
926 drm_sched_rq_init(sched, &sched->sched_rq[i]); in drm_sched_init()
928 init_waitqueue_head(&sched->wake_up_worker); in drm_sched_init()
929 init_waitqueue_head(&sched->job_scheduled); in drm_sched_init()
930 INIT_LIST_HEAD(&sched->ring_mirror_list); in drm_sched_init()
931 spin_lock_init(&sched->job_list_lock); in drm_sched_init()
932 atomic_set(&sched->hw_rq_count, 0); in drm_sched_init()
933 atomic64_set(&sched->job_id_count, 0); in drm_sched_init()
936 sched->thread = kthread_run(drm_sched_main, sched, sched->name); in drm_sched_init()
937 if (IS_ERR(sched->thread)) { in drm_sched_init()
939 return PTR_ERR(sched->thread); in drm_sched_init()
953 void drm_sched_fini(struct drm_gpu_scheduler *sched) in drm_sched_fini() argument
955 if (sched->thread) in drm_sched_fini()
956 kthread_stop(sched->thread); in drm_sched_fini()