Lines Matching full:job
10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_free() local
63 v3d_job_put(job); in v3d_job_free()
67 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) in v3d_switch_perfmon() argument
69 if (job->perfmon != v3d->active_perfmon) in v3d_switch_perfmon()
72 if (job->perfmon && v3d->active_perfmon != job->perfmon) in v3d_switch_perfmon()
73 v3d_perfmon_start(v3d, job->perfmon); in v3d_switch_perfmon()
77 * Returns the fences that the job depends on, one by one.
86 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_dependency() local
92 if (!xa_empty(&job->deps)) in v3d_job_dependency()
93 return xa_erase(&job->deps, job->last_dep++); in v3d_job_dependency()
100 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_run() local
101 struct v3d_dev *v3d = job->base.v3d; in v3d_bin_job_run()
106 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_bin_job_run()
113 v3d->bin_job = job; in v3d_bin_job_run()
115 * reuse the overflow attached to a previous job. in v3d_bin_job_run()
126 if (job->base.irq_fence) in v3d_bin_job_run()
127 dma_fence_put(job->base.irq_fence); in v3d_bin_job_run()
128 job->base.irq_fence = dma_fence_get(fence); in v3d_bin_job_run()
131 job->start, job->end); in v3d_bin_job_run()
133 v3d_switch_perfmon(v3d, &job->base); in v3d_bin_job_run()
136 * Writing the end register is what starts the job. in v3d_bin_job_run()
138 if (job->qma) { in v3d_bin_job_run()
139 V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma); in v3d_bin_job_run()
140 V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms); in v3d_bin_job_run()
142 if (job->qts) { in v3d_bin_job_run()
145 job->qts); in v3d_bin_job_run()
147 V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start); in v3d_bin_job_run()
148 V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end); in v3d_bin_job_run()
155 struct v3d_render_job *job = to_render_job(sched_job); in v3d_render_job_run() local
156 struct v3d_dev *v3d = job->base.v3d; in v3d_render_job_run()
160 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_render_job_run()
163 v3d->render_job = job; in v3d_render_job_run()
177 if (job->base.irq_fence) in v3d_render_job_run()
178 dma_fence_put(job->base.irq_fence); in v3d_render_job_run()
179 job->base.irq_fence = dma_fence_get(fence); in v3d_render_job_run()
182 job->start, job->end); in v3d_render_job_run()
184 v3d_switch_perfmon(v3d, &job->base); in v3d_render_job_run()
189 * Writing the end register is what starts the job. in v3d_render_job_run()
191 V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start); in v3d_render_job_run()
192 V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end); in v3d_render_job_run()
200 struct v3d_tfu_job *job = to_tfu_job(sched_job); in v3d_tfu_job_run() local
201 struct v3d_dev *v3d = job->base.v3d; in v3d_tfu_job_run()
209 v3d->tfu_job = job; in v3d_tfu_job_run()
210 if (job->base.irq_fence) in v3d_tfu_job_run()
211 dma_fence_put(job->base.irq_fence); in v3d_tfu_job_run()
212 job->base.irq_fence = dma_fence_get(fence); in v3d_tfu_job_run()
216 V3D_WRITE(V3D_TFU_IIA, job->args.iia); in v3d_tfu_job_run()
217 V3D_WRITE(V3D_TFU_IIS, job->args.iis); in v3d_tfu_job_run()
218 V3D_WRITE(V3D_TFU_ICA, job->args.ica); in v3d_tfu_job_run()
219 V3D_WRITE(V3D_TFU_IUA, job->args.iua); in v3d_tfu_job_run()
220 V3D_WRITE(V3D_TFU_IOA, job->args.ioa); in v3d_tfu_job_run()
221 V3D_WRITE(V3D_TFU_IOS, job->args.ios); in v3d_tfu_job_run()
222 V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]); in v3d_tfu_job_run()
223 if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) { in v3d_tfu_job_run()
224 V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]); in v3d_tfu_job_run()
225 V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]); in v3d_tfu_job_run()
226 V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]); in v3d_tfu_job_run()
228 /* ICFG kicks off the job. */ in v3d_tfu_job_run()
229 V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC); in v3d_tfu_job_run()
237 struct v3d_csd_job *job = to_csd_job(sched_job); in v3d_csd_job_run() local
238 struct v3d_dev *v3d = job->base.v3d; in v3d_csd_job_run()
243 v3d->csd_job = job; in v3d_csd_job_run()
251 if (job->base.irq_fence) in v3d_csd_job_run()
252 dma_fence_put(job->base.irq_fence); in v3d_csd_job_run()
253 job->base.irq_fence = dma_fence_get(fence); in v3d_csd_job_run()
257 v3d_switch_perfmon(v3d, &job->base); in v3d_csd_job_run()
260 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); in v3d_csd_job_run()
261 /* CFG0 write kicks off the job. */ in v3d_csd_job_run()
262 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]); in v3d_csd_job_run()
270 struct v3d_job *job = to_v3d_job(sched_job); in v3d_cache_clean_job_run() local
271 struct v3d_dev *v3d = job->v3d; in v3d_cache_clean_job_run()
317 struct v3d_job *job = to_v3d_job(sched_job); in v3d_cl_job_timedout() local
318 struct v3d_dev *v3d = job->v3d; in v3d_cl_job_timedout()
334 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_timedout() local
337 &job->timedout_ctca, &job->timedout_ctra); in v3d_bin_job_timedout()
343 struct v3d_render_job *job = to_render_job(sched_job); in v3d_render_job_timedout() local
346 &job->timedout_ctca, &job->timedout_ctra); in v3d_render_job_timedout()
352 struct v3d_job *job = to_v3d_job(sched_job); in v3d_generic_job_timedout() local
354 return v3d_gpu_reset_for_timeout(job->v3d, sched_job); in v3d_generic_job_timedout()
360 struct v3d_csd_job *job = to_csd_job(sched_job); in v3d_csd_job_timedout() local
361 struct v3d_dev *v3d = job->base.v3d; in v3d_csd_job_timedout()
367 if (job->timedout_batches != batches) { in v3d_csd_job_timedout()
368 job->timedout_batches = batches; in v3d_csd_job_timedout()