Lines Matching full:job
37 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
47 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout()
55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
61 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout()
63 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
76 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); in amdgpu_job_timedout()
92 struct amdgpu_job **job, struct amdgpu_vm *vm) in amdgpu_job_alloc() argument
97 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); in amdgpu_job_alloc()
98 if (!*job) in amdgpu_job_alloc()
105 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
106 (*job)->vm = vm; in amdgpu_job_alloc()
108 amdgpu_sync_create(&(*job)->sync); in amdgpu_job_alloc()
109 amdgpu_sync_create(&(*job)->sched_sync); in amdgpu_job_alloc()
110 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); in amdgpu_job_alloc()
111 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; in amdgpu_job_alloc()
118 struct amdgpu_job **job) in amdgpu_job_alloc_with_ib() argument
122 r = amdgpu_job_alloc(adev, 1, job, NULL); in amdgpu_job_alloc_with_ib()
126 (*job)->num_ibs = 1; in amdgpu_job_alloc_with_ib()
127 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); in amdgpu_job_alloc_with_ib()
129 kfree(*job); in amdgpu_job_alloc_with_ib()
134 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, in amdgpu_job_set_resources() argument
138 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; in amdgpu_job_set_resources()
139 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; in amdgpu_job_set_resources()
142 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; in amdgpu_job_set_resources()
143 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; in amdgpu_job_set_resources()
146 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; in amdgpu_job_set_resources()
147 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; in amdgpu_job_set_resources()
151 void amdgpu_job_free_resources(struct amdgpu_job *job) in amdgpu_job_free_resources() argument
153 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); in amdgpu_job_free_resources()
158 f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence; in amdgpu_job_free_resources()
159 for (i = 0; i < job->num_ibs; ++i) in amdgpu_job_free_resources()
160 amdgpu_ib_free(ring->adev, &job->ibs[i], f); in amdgpu_job_free_resources()
165 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_free_cb() local
169 amdgpu_sync_free(&job->sync); in amdgpu_job_free_cb()
170 amdgpu_sync_free(&job->sched_sync); in amdgpu_job_free_cb()
173 if (!job->hw_fence.ops) in amdgpu_job_free_cb()
174 kfree(job); in amdgpu_job_free_cb()
176 dma_fence_put(&job->hw_fence); in amdgpu_job_free_cb()
179 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, in amdgpu_job_set_gang_leader() argument
184 WARN_ON(job->gang_submit); in amdgpu_job_set_gang_leader()
190 if (job != leader) in amdgpu_job_set_gang_leader()
192 job->gang_submit = fence; in amdgpu_job_set_gang_leader()
195 void amdgpu_job_free(struct amdgpu_job *job) in amdgpu_job_free() argument
197 amdgpu_job_free_resources(job); in amdgpu_job_free()
198 amdgpu_sync_free(&job->sync); in amdgpu_job_free()
199 amdgpu_sync_free(&job->sched_sync); in amdgpu_job_free()
200 if (job->gang_submit != &job->base.s_fence->scheduled) in amdgpu_job_free()
201 dma_fence_put(job->gang_submit); in amdgpu_job_free()
203 if (!job->hw_fence.ops) in amdgpu_job_free()
204 kfree(job); in amdgpu_job_free()
206 dma_fence_put(&job->hw_fence); in amdgpu_job_free()
209 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, in amdgpu_job_submit() argument
217 r = drm_sched_job_init(&job->base, entity, owner); in amdgpu_job_submit()
221 drm_sched_job_arm(&job->base); in amdgpu_job_submit()
223 *f = dma_fence_get(&job->base.s_fence->finished); in amdgpu_job_submit()
224 amdgpu_job_free_resources(job); in amdgpu_job_submit()
225 drm_sched_entity_push_job(&job->base); in amdgpu_job_submit()
230 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, in amdgpu_job_submit_direct() argument
235 job->base.sched = &ring->sched; in amdgpu_job_submit_direct()
236 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); in amdgpu_job_submit_direct()
241 amdgpu_job_free(job); in amdgpu_job_submit_direct()
249 struct amdgpu_job *job = to_amdgpu_job(sched_job); in amdgpu_job_dependency() local
250 struct amdgpu_vm *vm = job->vm; in amdgpu_job_dependency()
254 fence = amdgpu_sync_get_fence(&job->sync); in amdgpu_job_dependency()
256 r = amdgpu_sync_fence(&job->sched_sync, fence); in amdgpu_job_dependency()
261 if (!fence && job->gang_submit) in amdgpu_job_dependency()
262 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); in amdgpu_job_dependency()
264 while (fence == NULL && vm && !job->vmid) { in amdgpu_job_dependency()
265 r = amdgpu_vmid_grab(vm, ring, &job->sync, in amdgpu_job_dependency()
266 &job->base.s_fence->finished, in amdgpu_job_dependency()
267 job); in amdgpu_job_dependency()
271 fence = amdgpu_sync_get_fence(&job->sync); in amdgpu_job_dependency()
282 struct amdgpu_job *job; in amdgpu_job_run() local
285 job = to_amdgpu_job(sched_job); in amdgpu_job_run()
286 finished = &job->base.s_fence->finished; in amdgpu_job_run()
288 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); in amdgpu_job_run()
290 trace_amdgpu_sched_run_job(job); in amdgpu_job_run()
292 /* Skip job if VRAM is lost and never resubmit gangs */ in amdgpu_job_run()
293 if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) || in amdgpu_job_run()
294 (job->job_run_counter && job->gang_submit)) in amdgpu_job_run()
300 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, in amdgpu_job_run()
306 job->job_run_counter++; in amdgpu_job_run()
307 amdgpu_job_free_resources(job); in amdgpu_job_run()