Lines Matching refs:sched
73 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
79 rq->sched = sched; in drm_sched_rq_init()
96 atomic_inc(rq->sched->score); in drm_sched_rq_add_entity()
115 atomic_dec(rq->sched->score); in drm_sched_rq_remove_entity()
175 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() local
177 atomic_dec(&sched->hw_rq_count); in drm_sched_job_done()
178 atomic_dec(sched->score); in drm_sched_job_done()
185 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_job_done()
211 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local
219 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized()
233 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) in drm_sched_start_timeout() argument
235 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_start_timeout()
236 !list_empty(&sched->pending_list)) in drm_sched_start_timeout()
237 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); in drm_sched_start_timeout()
247 void drm_sched_fault(struct drm_gpu_scheduler *sched) in drm_sched_fault() argument
249 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); in drm_sched_fault()
265 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) in drm_sched_suspend_timeout() argument
269 sched_timeout = sched->work_tdr.timer.expires; in drm_sched_suspend_timeout()
275 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) in drm_sched_suspend_timeout()
279 return sched->timeout; in drm_sched_suspend_timeout()
291 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, in drm_sched_resume_timeout() argument
294 spin_lock(&sched->job_list_lock); in drm_sched_resume_timeout()
296 if (list_empty(&sched->pending_list)) in drm_sched_resume_timeout()
297 cancel_delayed_work(&sched->work_tdr); in drm_sched_resume_timeout()
299 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); in drm_sched_resume_timeout()
301 spin_unlock(&sched->job_list_lock); in drm_sched_resume_timeout()
307 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_begin() local
309 spin_lock(&sched->job_list_lock); in drm_sched_job_begin()
310 list_add_tail(&s_job->list, &sched->pending_list); in drm_sched_job_begin()
311 drm_sched_start_timeout(sched); in drm_sched_job_begin()
312 spin_unlock(&sched->job_list_lock); in drm_sched_job_begin()
317 struct drm_gpu_scheduler *sched; in drm_sched_job_timedout() local
321 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); in drm_sched_job_timedout()
324 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
325 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_job_timedout()
335 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
337 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
343 if (sched->free_guilty) { in drm_sched_job_timedout()
344 job->sched->ops->free_job(job); in drm_sched_job_timedout()
345 sched->free_guilty = false; in drm_sched_job_timedout()
348 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
352 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
353 drm_sched_start_timeout(sched); in drm_sched_job_timedout()
354 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
391 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) in drm_sched_stop() argument
395 kthread_park(sched->thread); in drm_sched_stop()
404 if (bad && bad->sched == sched) in drm_sched_stop()
409 list_add(&bad->list, &sched->pending_list); in drm_sched_stop()
417 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, in drm_sched_stop()
424 atomic_dec(&sched->hw_rq_count); in drm_sched_stop()
430 spin_lock(&sched->job_list_lock); in drm_sched_stop()
432 spin_unlock(&sched->job_list_lock); in drm_sched_stop()
448 sched->ops->free_job(s_job); in drm_sched_stop()
450 sched->free_guilty = true; in drm_sched_stop()
460 cancel_delayed_work(&sched->work_tdr); in drm_sched_stop()
472 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) in drm_sched_start() argument
482 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { in drm_sched_start()
485 atomic_inc(&sched->hw_rq_count); in drm_sched_start()
496 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", in drm_sched_start()
503 spin_lock(&sched->job_list_lock); in drm_sched_start()
504 drm_sched_start_timeout(sched); in drm_sched_start()
505 spin_unlock(&sched->job_list_lock); in drm_sched_start()
508 kthread_unpark(sched->thread); in drm_sched_start()
518 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) in drm_sched_resubmit_jobs() argument
520 drm_sched_resubmit_jobs_ext(sched, INT_MAX); in drm_sched_resubmit_jobs()
531 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max) in drm_sched_resubmit_jobs_ext() argument
539 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { in drm_sched_resubmit_jobs_ext()
545 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { in drm_sched_resubmit_jobs_ext()
553 fence = sched->ops->run_job(s_job); in drm_sched_resubmit_jobs_ext()
626 struct drm_gpu_scheduler *sched; in drm_sched_job_arm() local
631 sched = entity->rq->sched; in drm_sched_job_arm()
633 job->sched = sched; in drm_sched_job_arm()
634 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_arm()
635 job->id = atomic64_inc_return(&sched->job_id_count); in drm_sched_job_arm()
770 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) in drm_sched_ready() argument
772 return atomic_read(&sched->hw_rq_count) < in drm_sched_ready()
773 sched->hw_submission_limit; in drm_sched_ready()
782 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) in drm_sched_wakeup() argument
784 if (drm_sched_ready(sched)) in drm_sched_wakeup()
785 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_wakeup()
796 drm_sched_select_entity(struct drm_gpu_scheduler *sched) in drm_sched_select_entity() argument
801 if (!drm_sched_ready(sched)) in drm_sched_select_entity()
806 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); in drm_sched_select_entity()
823 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) in drm_sched_get_cleanup_job() argument
827 spin_lock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
829 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
837 cancel_delayed_work(&sched->work_tdr); in drm_sched_get_cleanup_job()
839 next = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
846 drm_sched_start_timeout(sched); in drm_sched_get_cleanup_job()
852 spin_unlock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
869 struct drm_gpu_scheduler *sched, *picked_sched = NULL; in drm_sched_pick_best() local
874 sched = sched_list[i]; in drm_sched_pick_best()
876 if (!sched->ready) { in drm_sched_pick_best()
878 sched->name); in drm_sched_pick_best()
882 num_score = atomic_read(sched->score); in drm_sched_pick_best()
885 picked_sched = sched; in drm_sched_pick_best()
900 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) in drm_sched_blocked() argument
919 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; in drm_sched_main() local
931 wait_event_interruptible(sched->wake_up_worker, in drm_sched_main()
932 (cleanup_job = drm_sched_get_cleanup_job(sched)) || in drm_sched_main()
933 (!drm_sched_blocked(sched) && in drm_sched_main()
934 (entity = drm_sched_select_entity(sched))) || in drm_sched_main()
938 sched->ops->free_job(cleanup_job); in drm_sched_main()
952 atomic_inc(&sched->hw_rq_count); in drm_sched_main()
956 fence = sched->ops->run_job(sched_job); in drm_sched_main()
970 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", in drm_sched_main()
979 wake_up(&sched->job_scheduled); in drm_sched_main()
1000 int drm_sched_init(struct drm_gpu_scheduler *sched, in drm_sched_init() argument
1007 sched->ops = ops; in drm_sched_init()
1008 sched->hw_submission_limit = hw_submission; in drm_sched_init()
1009 sched->name = name; in drm_sched_init()
1010 sched->timeout = timeout; in drm_sched_init()
1011 sched->timeout_wq = timeout_wq ? : system_wq; in drm_sched_init()
1012 sched->hang_limit = hang_limit; in drm_sched_init()
1013 sched->score = score ? score : &sched->_score; in drm_sched_init()
1014 sched->dev = dev; in drm_sched_init()
1016 drm_sched_rq_init(sched, &sched->sched_rq[i]); in drm_sched_init()
1018 init_waitqueue_head(&sched->wake_up_worker); in drm_sched_init()
1019 init_waitqueue_head(&sched->job_scheduled); in drm_sched_init()
1020 INIT_LIST_HEAD(&sched->pending_list); in drm_sched_init()
1021 spin_lock_init(&sched->job_list_lock); in drm_sched_init()
1022 atomic_set(&sched->hw_rq_count, 0); in drm_sched_init()
1023 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); in drm_sched_init()
1024 atomic_set(&sched->_score, 0); in drm_sched_init()
1025 atomic64_set(&sched->job_id_count, 0); in drm_sched_init()
1028 sched->thread = kthread_run(drm_sched_main, sched, sched->name); in drm_sched_init()
1029 if (IS_ERR(sched->thread)) { in drm_sched_init()
1030 ret = PTR_ERR(sched->thread); in drm_sched_init()
1031 sched->thread = NULL; in drm_sched_init()
1032 DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name); in drm_sched_init()
1036 sched->ready = true; in drm_sched_init()
1048 void drm_sched_fini(struct drm_gpu_scheduler *sched) in drm_sched_fini() argument
1053 if (sched->thread) in drm_sched_fini()
1054 kthread_stop(sched->thread); in drm_sched_fini()
1057 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_fini()
1075 wake_up_all(&sched->job_scheduled); in drm_sched_fini()
1078 cancel_delayed_work_sync(&sched->work_tdr); in drm_sched_fini()
1080 sched->ready = false; in drm_sched_fini()
1096 struct drm_gpu_scheduler *sched = bad->sched; in drm_sched_increase_karma_ext() local
1110 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_increase_karma_ext()