Lines Matching full:gpu

80 		dev_dbg(submit->gpu->dev, "skipping bad job\n");  in etnaviv_sched_run_job()
89 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local
94 drm_sched_stop(&gpu->sched, sched_job); in etnaviv_sched_timedout_job()
97 * If the GPU managed to complete this jobs fence, the timout is in etnaviv_sched_timedout_job()
104 * If the GPU is still making forward progress on the front-end (which in etnaviv_sched_timedout_job()
108 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); in etnaviv_sched_timedout_job()
109 change = dma_addr - gpu->hangcheck_dma_addr; in etnaviv_sched_timedout_job()
111 gpu->hangcheck_dma_addr = dma_addr; in etnaviv_sched_timedout_job()
118 /* get the GPU back into the init state */ in etnaviv_sched_timedout_job()
120 etnaviv_gpu_recover_hang(gpu); in etnaviv_sched_timedout_job()
122 drm_sched_resubmit_jobs(&gpu->sched); in etnaviv_sched_timedout_job()
124 drm_sched_start(&gpu->sched, true); in etnaviv_sched_timedout_job()
128 /* restart scheduler after GPU is usable again */ in etnaviv_sched_timedout_job()
129 drm_sched_start(&gpu->sched, true); in etnaviv_sched_timedout_job()
159 mutex_lock(&submit->gpu->fence_lock); in etnaviv_sched_push_job()
167 submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr, in etnaviv_sched_push_job()
182 mutex_unlock(&submit->gpu->fence_lock); in etnaviv_sched_push_job()
187 int etnaviv_sched_init(struct etnaviv_gpu *gpu) in etnaviv_sched_init() argument
191 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, in etnaviv_sched_init()
194 dev_name(gpu->dev)); in etnaviv_sched_init()
201 void etnaviv_sched_fini(struct etnaviv_gpu *gpu) in etnaviv_sched_fini() argument
203 drm_sched_fini(&gpu->sched); in etnaviv_sched_fini()