1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27 
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30 
31 #include "gpu_scheduler_trace.h"
32 
33 #define to_drm_sched_job(sched_job)		\
34 		container_of((sched_job), struct drm_sched_job, queue_node)
35 
36 /**
37  * drm_sched_entity_init - Init a context entity used by scheduler when
38  * submit to HW ring.
39  *
40  * @entity: scheduler entity to init
41  * @priority: priority of the entity
42  * @sched_list: the list of drm scheds on which jobs from this
43  *           entity can be submitted
44  * @num_sched_list: number of drm sched in sched_list
45  * @guilty: atomic_t set to 1 when a job on this queue
46  *          is found to be guilty causing a timeout
47  *
48  * Note: the sched_list should have at least one element to schedule
49  *       the entity
50  *
51  * Returns 0 on success or a negative error code on failure.
52  */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)53 int drm_sched_entity_init(struct drm_sched_entity *entity,
54 			  enum drm_sched_priority priority,
55 			  struct drm_gpu_scheduler **sched_list,
56 			  unsigned int num_sched_list,
57 			  atomic_t *guilty)
58 {
59 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
60 		return -EINVAL;
61 
62 	memset(entity, 0, sizeof(struct drm_sched_entity));
63 	INIT_LIST_HEAD(&entity->list);
64 	entity->rq = NULL;
65 	entity->guilty = guilty;
66 	entity->num_sched_list = num_sched_list;
67 	entity->priority = priority;
68 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
69 	entity->last_scheduled = NULL;
70 
71 	if(num_sched_list)
72 		entity->rq = &sched_list[0]->sched_rq[entity->priority];
73 
74 	init_completion(&entity->entity_idle);
75 
76 	/* We start in an idle state. */
77 	complete(&entity->entity_idle);
78 
79 	spin_lock_init(&entity->rq_lock);
80 	spsc_queue_init(&entity->job_queue);
81 
82 	atomic_set(&entity->fence_seq, 0);
83 	entity->fence_context = dma_fence_context_alloc(2);
84 
85 	return 0;
86 }
87 EXPORT_SYMBOL(drm_sched_entity_init);
88 
89 /**
90  * drm_sched_entity_modify_sched - Modify sched of an entity
91  * @entity: scheduler entity to init
92  * @sched_list: the list of new drm scheds which will replace
93  *		 existing entity->sched_list
94  * @num_sched_list: number of drm sched in sched_list
95  */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)96 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
97 				    struct drm_gpu_scheduler **sched_list,
98 				    unsigned int num_sched_list)
99 {
100 	WARN_ON(!num_sched_list || !sched_list);
101 
102 	entity->sched_list = sched_list;
103 	entity->num_sched_list = num_sched_list;
104 }
105 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
106 
107 /**
108  * drm_sched_entity_is_idle - Check if entity is idle
109  *
110  * @entity: scheduler entity
111  *
112  * Returns true if the entity does not have any unscheduled jobs.
113  */
drm_sched_entity_is_idle(struct drm_sched_entity * entity)114 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
115 {
116 	rmb(); /* for list_empty to work without lock */
117 
118 	if (list_empty(&entity->list) ||
119 	    spsc_queue_count(&entity->job_queue) == 0 ||
120 	    entity->stopped)
121 		return true;
122 
123 	return false;
124 }
125 
126 /**
127  * drm_sched_entity_is_ready - Check if entity is ready
128  *
129  * @entity: scheduler entity
130  *
131  * Return true if entity could provide a job.
132  */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)133 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
134 {
135 	if (spsc_queue_peek(&entity->job_queue) == NULL)
136 		return false;
137 
138 	if (READ_ONCE(entity->dependency))
139 		return false;
140 
141 	return true;
142 }
143 
144 /**
145  * drm_sched_entity_flush - Flush a context entity
146  *
147  * @entity: scheduler entity
148  * @timeout: time to wait in for Q to become empty in jiffies.
149  *
150  * Splitting drm_sched_entity_fini() into two functions, The first one does the
151  * waiting, removes the entity from the runqueue and returns an error when the
152  * process was killed.
153  *
154  * Returns the remaining time in jiffies left from the input timeout
155  */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)156 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
157 {
158 	struct drm_gpu_scheduler *sched;
159 	struct task_struct *last_user;
160 	long ret = timeout;
161 
162 	if (!entity->rq)
163 		return 0;
164 
165 	sched = entity->rq->sched;
166 	/**
167 	 * The client will not queue more IBs during this fini, consume existing
168 	 * queued IBs or discard them on SIGKILL
169 	 */
170 	if (current->flags & PF_EXITING) {
171 		if (timeout)
172 			ret = wait_event_timeout(
173 					sched->job_scheduled,
174 					drm_sched_entity_is_idle(entity),
175 					timeout);
176 	} else {
177 		wait_event_killable(sched->job_scheduled,
178 				    drm_sched_entity_is_idle(entity));
179 	}
180 
181 	/* For killed process disable any more IBs enqueue right now */
182 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
183 	if ((!last_user || last_user == current->group_leader) &&
184 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
185 		spin_lock(&entity->rq_lock);
186 		entity->stopped = true;
187 		drm_sched_rq_remove_entity(entity->rq, entity);
188 		spin_unlock(&entity->rq_lock);
189 	}
190 
191 	return ret;
192 }
193 EXPORT_SYMBOL(drm_sched_entity_flush);
194 
195 /**
196  * drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs
197  *
198  * @f: signaled fence
199  * @cb: our callback structure
200  *
201  * Signal the scheduler finished fence when the entity in question is killed.
202  */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)203 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
204 					  struct dma_fence_cb *cb)
205 {
206 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
207 						 finish_cb);
208 
209 	drm_sched_fence_finished(job->s_fence);
210 	WARN_ON(job->s_fence->parent);
211 	job->sched->ops->free_job(job);
212 }
213 
214 /**
215  * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
216  *
217  * @entity: entity which is cleaned up
218  *
219  * Makes sure that all remaining jobs in an entity are killed before it is
220  * destroyed.
221  */
drm_sched_entity_kill_jobs(struct drm_sched_entity * entity)222 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
223 {
224 	struct drm_sched_job *job;
225 	struct dma_fence *f;
226 	int r;
227 
228 	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
229 		struct drm_sched_fence *s_fence = job->s_fence;
230 
231 		/* Wait for all dependencies to avoid data corruptions */
232 		while ((f = job->sched->ops->dependency(job, entity)))
233 			dma_fence_wait(f, false);
234 
235 		drm_sched_fence_scheduled(s_fence);
236 		dma_fence_set_error(&s_fence->finished, -ESRCH);
237 
238 		/*
239 		 * When pipe is hanged by older entity, new entity might
240 		 * not even have chance to submit it's first job to HW
241 		 * and so entity->last_scheduled will remain NULL
242 		 */
243 		if (!entity->last_scheduled) {
244 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
245 			continue;
246 		}
247 
248 		r = dma_fence_add_callback(entity->last_scheduled,
249 					   &job->finish_cb,
250 					   drm_sched_entity_kill_jobs_cb);
251 		if (r == -ENOENT)
252 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
253 		else if (r)
254 			DRM_ERROR("fence add callback failed (%d)\n", r);
255 	}
256 }
257 
258 /**
259  * drm_sched_entity_fini - Destroy a context entity
260  *
261  * @entity: scheduler entity
262  *
263  * This should be called after @drm_sched_entity_do_release. It goes over the
264  * entity and signals all jobs with an error code if the process was killed.
265  *
266  */
drm_sched_entity_fini(struct drm_sched_entity * entity)267 void drm_sched_entity_fini(struct drm_sched_entity *entity)
268 {
269 	struct drm_gpu_scheduler *sched = NULL;
270 
271 	if (entity->rq) {
272 		sched = entity->rq->sched;
273 		drm_sched_rq_remove_entity(entity->rq, entity);
274 	}
275 
276 	/* Consumption of existing IBs wasn't completed. Forcefully
277 	 * remove them here.
278 	 */
279 	if (spsc_queue_count(&entity->job_queue)) {
280 		if (sched) {
281 			/*
282 			 * Wait for thread to idle to make sure it isn't processing
283 			 * this entity.
284 			 */
285 			wait_for_completion(&entity->entity_idle);
286 
287 		}
288 		if (entity->dependency) {
289 			dma_fence_remove_callback(entity->dependency,
290 						  &entity->cb);
291 			dma_fence_put(entity->dependency);
292 			entity->dependency = NULL;
293 		}
294 
295 		drm_sched_entity_kill_jobs(entity);
296 	}
297 
298 	dma_fence_put(entity->last_scheduled);
299 	entity->last_scheduled = NULL;
300 }
301 EXPORT_SYMBOL(drm_sched_entity_fini);
302 
303 /**
304  * drm_sched_entity_destroy - Destroy a context entity
305  *
306  * @entity: scheduler entity
307  *
308  * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
309  */
drm_sched_entity_destroy(struct drm_sched_entity * entity)310 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
311 {
312 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
313 	drm_sched_entity_fini(entity);
314 }
315 EXPORT_SYMBOL(drm_sched_entity_destroy);
316 
317 /*
318  * drm_sched_entity_clear_dep - callback to clear the entities dependency
319  */
drm_sched_entity_clear_dep(struct dma_fence * f,struct dma_fence_cb * cb)320 static void drm_sched_entity_clear_dep(struct dma_fence *f,
321 				       struct dma_fence_cb *cb)
322 {
323 	struct drm_sched_entity *entity =
324 		container_of(cb, struct drm_sched_entity, cb);
325 
326 	entity->dependency = NULL;
327 	dma_fence_put(f);
328 }
329 
330 /*
331  * drm_sched_entity_clear_dep - callback to clear the entities dependency and
332  * wake up scheduler
333  */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)334 static void drm_sched_entity_wakeup(struct dma_fence *f,
335 				    struct dma_fence_cb *cb)
336 {
337 	struct drm_sched_entity *entity =
338 		container_of(cb, struct drm_sched_entity, cb);
339 
340 	drm_sched_entity_clear_dep(f, cb);
341 	drm_sched_wakeup(entity->rq->sched);
342 }
343 
344 /**
345  * drm_sched_entity_set_priority - Sets priority of the entity
346  *
347  * @entity: scheduler entity
348  * @priority: scheduler priority
349  *
350  * Update the priority of runqueus used for the entity.
351  */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)352 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
353 				   enum drm_sched_priority priority)
354 {
355 	spin_lock(&entity->rq_lock);
356 	entity->priority = priority;
357 	spin_unlock(&entity->rq_lock);
358 }
359 EXPORT_SYMBOL(drm_sched_entity_set_priority);
360 
361 /**
362  * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
363  *
364  * @entity: entity with dependency
365  *
366  * Add a callback to the current dependency of the entity to wake up the
367  * scheduler when the entity becomes available.
368  */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)369 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
370 {
371 	struct drm_gpu_scheduler *sched = entity->rq->sched;
372 	struct dma_fence *fence = entity->dependency;
373 	struct drm_sched_fence *s_fence;
374 
375 	if (fence->context == entity->fence_context ||
376 	    fence->context == entity->fence_context + 1) {
377 		/*
378 		 * Fence is a scheduled/finished fence from a job
379 		 * which belongs to the same entity, we can ignore
380 		 * fences from ourself
381 		 */
382 		dma_fence_put(entity->dependency);
383 		return false;
384 	}
385 
386 	s_fence = to_drm_sched_fence(fence);
387 	if (s_fence && s_fence->sched == sched) {
388 
389 		/*
390 		 * Fence is from the same scheduler, only need to wait for
391 		 * it to be scheduled
392 		 */
393 		fence = dma_fence_get(&s_fence->scheduled);
394 		dma_fence_put(entity->dependency);
395 		entity->dependency = fence;
396 		if (!dma_fence_add_callback(fence, &entity->cb,
397 					    drm_sched_entity_clear_dep))
398 			return true;
399 
400 		/* Ignore it when it is already scheduled */
401 		dma_fence_put(fence);
402 		return false;
403 	}
404 
405 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
406 				    drm_sched_entity_wakeup))
407 		return true;
408 
409 	dma_fence_put(entity->dependency);
410 	return false;
411 }
412 
413 /**
414  * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
415  *
416  * @entity: entity to get the job from
417  *
418  * Process all dependencies and try to get one job from the entities queue.
419  */
drm_sched_entity_pop_job(struct drm_sched_entity * entity)420 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
421 {
422 	struct drm_gpu_scheduler *sched = entity->rq->sched;
423 	struct drm_sched_job *sched_job;
424 
425 	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
426 	if (!sched_job)
427 		return NULL;
428 
429 	while ((entity->dependency =
430 			sched->ops->dependency(sched_job, entity))) {
431 		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
432 
433 		if (drm_sched_entity_add_dependency_cb(entity))
434 			return NULL;
435 	}
436 
437 	/* skip jobs from entity that marked guilty */
438 	if (entity->guilty && atomic_read(entity->guilty))
439 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
440 
441 	dma_fence_put(entity->last_scheduled);
442 	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
443 
444 	spsc_queue_pop(&entity->job_queue);
445 	return sched_job;
446 }
447 
448 /**
449  * drm_sched_entity_select_rq - select a new rq for the entity
450  *
451  * @entity: scheduler entity
452  *
453  * Check all prerequisites and select a new rq for the entity for load
454  * balancing.
455  */
drm_sched_entity_select_rq(struct drm_sched_entity * entity)456 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
457 {
458 	struct dma_fence *fence;
459 	struct drm_gpu_scheduler *sched;
460 	struct drm_sched_rq *rq;
461 
462 	if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
463 		return;
464 
465 	fence = READ_ONCE(entity->last_scheduled);
466 	if (fence && !dma_fence_is_signaled(fence))
467 		return;
468 
469 	spin_lock(&entity->rq_lock);
470 	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
471 	rq = sched ? &sched->sched_rq[entity->priority] : NULL;
472 	if (rq != entity->rq) {
473 		drm_sched_rq_remove_entity(entity->rq, entity);
474 		entity->rq = rq;
475 	}
476 	spin_unlock(&entity->rq_lock);
477 
478 	if (entity->num_sched_list == 1)
479 		entity->sched_list = NULL;
480 }
481 
482 /**
483  * drm_sched_entity_push_job - Submit a job to the entity's job queue
484  *
485  * @sched_job: job to submit
486  * @entity: scheduler entity
487  *
488  * Note: To guarantee that the order of insertion to queue matches
489  * the job's fence sequence number this function should be
490  * called with drm_sched_job_init under common lock.
491  *
492  * Returns 0 for success, negative error code otherwise.
493  */
drm_sched_entity_push_job(struct drm_sched_job * sched_job,struct drm_sched_entity * entity)494 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
495 			       struct drm_sched_entity *entity)
496 {
497 	bool first;
498 
499 	trace_drm_sched_job(sched_job, entity);
500 	atomic_inc(entity->rq->sched->score);
501 	WRITE_ONCE(entity->last_user, current->group_leader);
502 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
503 
504 	/* first job wakes up scheduler */
505 	if (first) {
506 		/* Add the entity to the run queue */
507 		spin_lock(&entity->rq_lock);
508 		if (entity->stopped) {
509 			spin_unlock(&entity->rq_lock);
510 
511 			DRM_ERROR("Trying to push to a killed entity\n");
512 			return;
513 		}
514 		drm_sched_rq_add_entity(entity->rq, entity);
515 		spin_unlock(&entity->rq_lock);
516 		drm_sched_wakeup(entity->rq->sched);
517 	}
518 }
519 EXPORT_SYMBOL(drm_sched_entity_push_job);
520