Lines Matching full:job
36 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
45 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout()
52 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
58 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout()
60 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
66 amdgpu_device_gpu_recover(ring->adev, job); in amdgpu_job_timedout()
79 struct amdgpu_job **job, struct amdgpu_vm *vm) in amdgpu_job_alloc() argument
88 *job = kzalloc(size, GFP_KERNEL); in amdgpu_job_alloc()
89 if (!*job) in amdgpu_job_alloc()
96 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
97 (*job)->vm = vm; in amdgpu_job_alloc()
98 (*job)->ibs = (void *)&(*job)[1]; in amdgpu_job_alloc()
99 (*job)->num_ibs = num_ibs; in amdgpu_job_alloc()
101 amdgpu_sync_create(&(*job)->sync); in amdgpu_job_alloc()
102 amdgpu_sync_create(&(*job)->sched_sync); in amdgpu_job_alloc()
103 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); in amdgpu_job_alloc()
104 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; in amdgpu_job_alloc()
111 struct amdgpu_job **job) in amdgpu_job_alloc_with_ib() argument
115 r = amdgpu_job_alloc(adev, 1, job, NULL); in amdgpu_job_alloc_with_ib()
119 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); in amdgpu_job_alloc_with_ib()
121 kfree(*job); in amdgpu_job_alloc_with_ib()
126 void amdgpu_job_free_resources(struct amdgpu_job *job) in amdgpu_job_free_resources() argument
128 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); in amdgpu_job_free_resources()
133 if (job->hw_fence.ops == NULL) in amdgpu_job_free_resources()
134 hw_fence = job->external_hw_fence; in amdgpu_job_free_resources()
136 hw_fence = &job->hw_fence; in amdgpu_job_free_resources()
139 f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence; in amdgpu_job_free_resources()
140 for (i = 0; i < job->num_ibs; ++i) in amdgpu_job_free_resources()
141 amdgpu_ib_free(ring->adev, &job->ibs[i], f); in amdgpu_job_free_resources()
146 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_free_cb() local
150 amdgpu_sync_free(&job->sync); in amdgpu_job_free_cb()
151 amdgpu_sync_free(&job->sched_sync); in amdgpu_job_free_cb()
154 if (job->hw_fence.ops != NULL) in amdgpu_job_free_cb()
155 dma_fence_put(&job->hw_fence); in amdgpu_job_free_cb()
157 kfree(job); in amdgpu_job_free_cb()
160 void amdgpu_job_free(struct amdgpu_job *job) in amdgpu_job_free() argument
162 amdgpu_job_free_resources(job); in amdgpu_job_free()
163 amdgpu_sync_free(&job->sync); in amdgpu_job_free()
164 amdgpu_sync_free(&job->sched_sync); in amdgpu_job_free()
167 if (job->hw_fence.ops != NULL) in amdgpu_job_free()
168 dma_fence_put(&job->hw_fence); in amdgpu_job_free()
170 kfree(job); in amdgpu_job_free()
173 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, in amdgpu_job_submit() argument
181 r = drm_sched_job_init(&job->base, entity, owner); in amdgpu_job_submit()
185 *f = dma_fence_get(&job->base.s_fence->finished); in amdgpu_job_submit()
186 amdgpu_job_free_resources(job); in amdgpu_job_submit()
187 drm_sched_entity_push_job(&job->base, entity); in amdgpu_job_submit()
192 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, in amdgpu_job_submit_direct() argument
197 job->base.sched = &ring->sched; in amdgpu_job_submit_direct()
198 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence); in amdgpu_job_submit_direct()
200 job->external_hw_fence = dma_fence_get(*fence); in amdgpu_job_submit_direct()
204 amdgpu_job_free(job); in amdgpu_job_submit_direct()
214 struct amdgpu_job *job = to_amdgpu_job(sched_job); in amdgpu_job_dependency() local
215 struct amdgpu_vm *vm = job->vm; in amdgpu_job_dependency()
219 fence = amdgpu_sync_get_fence(&job->sync); in amdgpu_job_dependency()
221 r = amdgpu_sync_fence(&job->sched_sync, fence); in amdgpu_job_dependency()
226 while (fence == NULL && vm && !job->vmid) { in amdgpu_job_dependency()
227 r = amdgpu_vmid_grab(vm, ring, &job->sync, in amdgpu_job_dependency()
228 &job->base.s_fence->finished, in amdgpu_job_dependency()
229 job); in amdgpu_job_dependency()
233 fence = amdgpu_sync_get_fence(&job->sync); in amdgpu_job_dependency()
243 struct amdgpu_job *job; in amdgpu_job_run() local
246 job = to_amdgpu_job(sched_job); in amdgpu_job_run()
247 finished = &job->base.s_fence->finished; in amdgpu_job_run()
249 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); in amdgpu_job_run()
251 trace_amdgpu_sched_run_job(job); in amdgpu_job_run()
253 if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter)) in amdgpu_job_run()
259 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, in amdgpu_job_run()
265 if (!job->job_run_counter) in amdgpu_job_run()
268 dma_fence_put(&job->hw_fence); in amdgpu_job_run()
269 job->job_run_counter++; in amdgpu_job_run()
270 amdgpu_job_free_resources(job); in amdgpu_job_run()