1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Etnaviv Project
4  */
5 
6 #include <linux/kthread.h>
7 
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
14 
15 static int etnaviv_job_hang_limit = 0;
16 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
17 static int etnaviv_hw_jobs_limit = 4;
18 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
19 
20 static struct dma_fence *
etnaviv_sched_dependency(struct drm_sched_job * sched_job,struct drm_sched_entity * entity)21 etnaviv_sched_dependency(struct drm_sched_job *sched_job,
22 			 struct drm_sched_entity *entity)
23 {
24 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
25 	struct dma_fence *fence;
26 	int i;
27 
28 	if (unlikely(submit->in_fence)) {
29 		fence = submit->in_fence;
30 		submit->in_fence = NULL;
31 
32 		if (!dma_fence_is_signaled(fence))
33 			return fence;
34 
35 		dma_fence_put(fence);
36 	}
37 
38 	for (i = 0; i < submit->nr_bos; i++) {
39 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
40 		int j;
41 
42 		if (bo->excl) {
43 			fence = bo->excl;
44 			bo->excl = NULL;
45 
46 			if (!dma_fence_is_signaled(fence))
47 				return fence;
48 
49 			dma_fence_put(fence);
50 		}
51 
52 		for (j = 0; j < bo->nr_shared; j++) {
53 			if (!bo->shared[j])
54 				continue;
55 
56 			fence = bo->shared[j];
57 			bo->shared[j] = NULL;
58 
59 			if (!dma_fence_is_signaled(fence))
60 				return fence;
61 
62 			dma_fence_put(fence);
63 		}
64 		kfree(bo->shared);
65 		bo->nr_shared = 0;
66 		bo->shared = NULL;
67 	}
68 
69 	return NULL;
70 }
71 
etnaviv_sched_run_job(struct drm_sched_job * sched_job)72 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
73 {
74 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
75 	struct dma_fence *fence = NULL;
76 
77 	if (likely(!sched_job->s_fence->finished.error))
78 		fence = etnaviv_gpu_submit(submit);
79 	else
80 		dev_dbg(submit->gpu->dev, "skipping bad job\n");
81 
82 	return fence;
83 }
84 
etnaviv_sched_timedout_job(struct drm_sched_job * sched_job)85 static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
86 {
87 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
88 	struct etnaviv_gpu *gpu = submit->gpu;
89 	u32 dma_addr;
90 	int change;
91 
92 	/*
93 	 * If the GPU managed to complete this jobs fence, the timout is
94 	 * spurious. Bail out.
95 	 */
96 	if (fence_completed(gpu, submit->out_fence->seqno))
97 		return;
98 
99 	/*
100 	 * If the GPU is still making forward progress on the front-end (which
101 	 * should never loop) we shift out the timeout to give it a chance to
102 	 * finish the job.
103 	 */
104 	dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
105 	change = dma_addr - gpu->hangcheck_dma_addr;
106 	if (change < 0 || change > 16) {
107 		gpu->hangcheck_dma_addr = dma_addr;
108 		schedule_delayed_work(&sched_job->work_tdr,
109 				      sched_job->sched->timeout);
110 		return;
111 	}
112 
113 	/* block scheduler */
114 	kthread_park(gpu->sched.thread);
115 	drm_sched_hw_job_reset(&gpu->sched, sched_job);
116 
117 	/* get the GPU back into the init state */
118 	etnaviv_core_dump(gpu);
119 	etnaviv_gpu_recover_hang(gpu);
120 
121 	/* restart scheduler after GPU is usable again */
122 	drm_sched_job_recovery(&gpu->sched);
123 	kthread_unpark(gpu->sched.thread);
124 }
125 
etnaviv_sched_free_job(struct drm_sched_job * sched_job)126 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
127 {
128 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
129 
130 	etnaviv_submit_put(submit);
131 }
132 
133 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
134 	.dependency = etnaviv_sched_dependency,
135 	.run_job = etnaviv_sched_run_job,
136 	.timedout_job = etnaviv_sched_timedout_job,
137 	.free_job = etnaviv_sched_free_job,
138 };
139 
etnaviv_sched_push_job(struct drm_sched_entity * sched_entity,struct etnaviv_gem_submit * submit)140 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
141 			   struct etnaviv_gem_submit *submit)
142 {
143 	int ret = 0;
144 
145 	/*
146 	 * Hold the fence lock across the whole operation to avoid jobs being
147 	 * pushed out of order with regard to their sched fence seqnos as
148 	 * allocated in drm_sched_job_init.
149 	 */
150 	mutex_lock(&submit->gpu->fence_lock);
151 
152 	ret = drm_sched_job_init(&submit->sched_job, sched_entity,
153 				 submit->cmdbuf.ctx);
154 	if (ret)
155 		goto out_unlock;
156 
157 	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
158 	submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
159 						submit->out_fence, 0,
160 						INT_MAX, GFP_KERNEL);
161 	if (submit->out_fence_id < 0) {
162 		ret = -ENOMEM;
163 		goto out_unlock;
164 	}
165 
166 	/* the scheduler holds on to the job now */
167 	kref_get(&submit->refcount);
168 
169 	drm_sched_entity_push_job(&submit->sched_job, sched_entity);
170 
171 out_unlock:
172 	mutex_unlock(&submit->gpu->fence_lock);
173 
174 	return ret;
175 }
176 
etnaviv_sched_init(struct etnaviv_gpu * gpu)177 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
178 {
179 	int ret;
180 
181 	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
182 			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
183 			     msecs_to_jiffies(500), dev_name(gpu->dev));
184 	if (ret)
185 		return ret;
186 
187 	return 0;
188 }
189 
etnaviv_sched_fini(struct etnaviv_gpu * gpu)190 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
191 {
192 	drm_sched_fini(&gpu->sched);
193 }
194