Lines Matching +full:mmu +full:- +full:500
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
33 return -ENOMEM; in lima_sched_slab_init()
42 if (!--lima_fence_slab_refcnt) { in lima_sched_slab_fini()
62 return f->pipe->base.name; in lima_fence_get_timeline_name()
77 call_rcu(&f->base.rcu, lima_fence_release_rcu); in lima_fence_release()
94 fence->pipe = pipe; in lima_fence_create()
95 dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock, in lima_fence_create()
96 pipe->fence_context, ++pipe->fence_seqno); in lima_fence_create()
118 task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL); in lima_sched_task_init()
119 if (!task->bos) in lima_sched_task_init()
120 return -ENOMEM; in lima_sched_task_init()
123 drm_gem_object_get(&bos[i]->base.base); in lima_sched_task_init()
125 err = drm_sched_job_init(&task->base, &context->base, vm); in lima_sched_task_init()
127 kfree(task->bos); in lima_sched_task_init()
131 task->num_bos = num_bos; in lima_sched_task_init()
132 task->vm = lima_vm_get(vm); in lima_sched_task_init()
134 xa_init_flags(&task->deps, XA_FLAGS_ALLOC); in lima_sched_task_init()
145 drm_sched_job_cleanup(&task->base); in lima_sched_task_fini()
147 xa_for_each(&task->deps, index, fence) { in lima_sched_task_fini()
150 xa_destroy(&task->deps); in lima_sched_task_fini()
152 if (task->bos) { in lima_sched_task_fini()
153 for (i = 0; i < task->num_bos; i++) in lima_sched_task_fini()
154 drm_gem_object_put(&task->bos[i]->base.base); in lima_sched_task_fini()
155 kfree(task->bos); in lima_sched_task_fini()
158 lima_vm_put(task->vm); in lima_sched_task_fini()
165 struct drm_gpu_scheduler *sched = &pipe->base; in lima_sched_context_init()
167 return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL, in lima_sched_context_init()
174 drm_sched_entity_fini(&context->base); in lima_sched_context_fini()
180 struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); in lima_sched_context_queue_task()
183 drm_sched_entity_push_job(&task->base, &context->base); in lima_sched_context_queue_task()
192 if (!xa_empty(&task->deps)) in lima_sched_dependency()
193 return xa_erase(&task->deps, task->last_dep++); in lima_sched_dependency()
203 ret = pm_runtime_get_sync(ldev->dev); in lima_pm_busy()
207 lima_devfreq_record_busy(&ldev->devfreq); in lima_pm_busy()
213 lima_devfreq_record_idle(&ldev->devfreq); in lima_pm_idle()
216 pm_runtime_mark_last_busy(ldev->dev); in lima_pm_idle()
217 pm_runtime_put_autosuspend(ldev->dev); in lima_pm_idle()
223 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_run_job()
224 struct lima_device *ldev = pipe->ldev; in lima_sched_run_job()
230 if (job->s_fence->finished.error < 0) in lima_sched_run_job()
239 dma_fence_put(&fence->base); in lima_sched_run_job()
243 task->fence = &fence->base; in lima_sched_run_job()
248 ret = dma_fence_get(task->fence); in lima_sched_run_job()
250 pipe->current_task = task; in lima_sched_run_job()
252 /* this is needed for MMU to work correctly, otherwise GP/PP in lima_sched_run_job()
267 for (i = 0; i < pipe->num_l2_cache; i++) in lima_sched_run_job()
268 lima_l2_cache_flush(pipe->l2_cache[i]); in lima_sched_run_job()
270 lima_vm_put(pipe->current_vm); in lima_sched_run_job()
271 pipe->current_vm = lima_vm_get(task->vm); in lima_sched_run_job()
273 if (pipe->bcast_mmu) in lima_sched_run_job()
274 lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm); in lima_sched_run_job()
276 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_run_job()
277 lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm); in lima_sched_run_job()
282 pipe->error = false; in lima_sched_run_job()
283 pipe->task_run(pipe, task); in lima_sched_run_job()
285 return task->fence; in lima_sched_run_job()
291 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched); in lima_sched_build_error_task_list()
292 struct lima_ip *ip = pipe->processor[0]; in lima_sched_build_error_task_list()
293 int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp; in lima_sched_build_error_task_list()
294 struct lima_device *dev = ip->dev; in lima_sched_build_error_task_list()
296 container_of(task->base.entity, in lima_sched_build_error_task_list()
307 mutex_lock(&dev->error_task_list_lock); in lima_sched_build_error_task_list()
309 if (dev->dump.num_tasks >= lima_max_error_tasks) { in lima_sched_build_error_task_list()
310 dev_info(dev->dev, "fail to save task state from %s pid %d: " in lima_sched_build_error_task_list()
311 "error task list is full\n", ctx->pname, ctx->pid); in lima_sched_build_error_task_list()
316 size = sizeof(struct lima_dump_chunk) + pipe->frame_size; in lima_sched_build_error_task_list()
318 size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname); in lima_sched_build_error_task_list()
322 for (i = 0; i < task->num_bos; i++) { in lima_sched_build_error_task_list()
323 struct lima_bo *bo = task->bos[i]; in lima_sched_build_error_task_list()
326 size += bo->heap_size ? bo->heap_size : lima_bo_size(bo); in lima_sched_build_error_task_list()
333 dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n", in lima_sched_build_error_task_list()
338 et->data = et + 1; in lima_sched_build_error_task_list()
339 et->size = task_size; in lima_sched_build_error_task_list()
341 dt = et->data; in lima_sched_build_error_task_list()
343 dt->id = pipe_id; in lima_sched_build_error_task_list()
344 dt->size = size; in lima_sched_build_error_task_list()
348 chunk->id = LIMA_DUMP_CHUNK_FRAME; in lima_sched_build_error_task_list()
349 chunk->size = pipe->frame_size; in lima_sched_build_error_task_list()
350 memcpy(chunk + 1, task->frame, pipe->frame_size); in lima_sched_build_error_task_list()
351 dt->num_chunks++; in lima_sched_build_error_task_list()
353 chunk = (void *)(chunk + 1) + chunk->size; in lima_sched_build_error_task_list()
355 chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME; in lima_sched_build_error_task_list()
356 chunk->size = sizeof(ctx->pname); in lima_sched_build_error_task_list()
357 memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname)); in lima_sched_build_error_task_list()
358 dt->num_chunks++; in lima_sched_build_error_task_list()
360 pid_chunk = (void *)(chunk + 1) + chunk->size; in lima_sched_build_error_task_list()
362 pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID; in lima_sched_build_error_task_list()
363 pid_chunk->pid = ctx->pid; in lima_sched_build_error_task_list()
364 dt->num_chunks++; in lima_sched_build_error_task_list()
366 buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size; in lima_sched_build_error_task_list()
367 for (i = 0; i < task->num_bos; i++) { in lima_sched_build_error_task_list()
368 struct lima_bo *bo = task->bos[i]; in lima_sched_build_error_task_list()
372 buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER; in lima_sched_build_error_task_list()
373 buffer_chunk->va = lima_vm_get_va(task->vm, bo); in lima_sched_build_error_task_list()
375 if (bo->heap_size) { in lima_sched_build_error_task_list()
376 buffer_chunk->size = bo->heap_size; in lima_sched_build_error_task_list()
378 data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT, in lima_sched_build_error_task_list()
385 memcpy(buffer_chunk + 1, data, buffer_chunk->size); in lima_sched_build_error_task_list()
389 buffer_chunk->size = lima_bo_size(bo); in lima_sched_build_error_task_list()
391 data = drm_gem_shmem_vmap(&bo->base.base); in lima_sched_build_error_task_list()
397 memcpy(buffer_chunk + 1, data, buffer_chunk->size); in lima_sched_build_error_task_list()
399 drm_gem_shmem_vunmap(&bo->base.base, data); in lima_sched_build_error_task_list()
402 buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; in lima_sched_build_error_task_list()
403 dt->num_chunks++; in lima_sched_build_error_task_list()
406 list_add(&et->list, &dev->error_task_list); in lima_sched_build_error_task_list()
407 dev->dump.size += et->size; in lima_sched_build_error_task_list()
408 dev->dump.num_tasks++; in lima_sched_build_error_task_list()
410 dev_info(dev->dev, "save error task state success\n"); in lima_sched_build_error_task_list()
413 mutex_unlock(&dev->error_task_list_lock); in lima_sched_build_error_task_list()
418 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_timedout_job()
420 struct lima_device *ldev = pipe->ldev; in lima_sched_timedout_job()
422 if (!pipe->error) in lima_sched_timedout_job()
425 drm_sched_stop(&pipe->base, &task->base); in lima_sched_timedout_job()
427 drm_sched_increase_karma(&task->base); in lima_sched_timedout_job()
431 pipe->task_error(pipe); in lima_sched_timedout_job()
433 if (pipe->bcast_mmu) in lima_sched_timedout_job()
434 lima_mmu_page_fault_resume(pipe->bcast_mmu); in lima_sched_timedout_job()
438 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_timedout_job()
439 lima_mmu_page_fault_resume(pipe->mmu[i]); in lima_sched_timedout_job()
442 lima_vm_put(pipe->current_vm); in lima_sched_timedout_job()
443 pipe->current_vm = NULL; in lima_sched_timedout_job()
444 pipe->current_task = NULL; in lima_sched_timedout_job()
448 drm_sched_resubmit_jobs(&pipe->base); in lima_sched_timedout_job()
449 drm_sched_start(&pipe->base, true); in lima_sched_timedout_job()
455 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_free_job()
456 struct lima_vm *vm = task->vm; in lima_sched_free_job()
457 struct lima_bo **bos = task->bos; in lima_sched_free_job()
460 dma_fence_put(task->fence); in lima_sched_free_job()
462 for (i = 0; i < task->num_bos; i++) in lima_sched_free_job()
466 kmem_cache_free(pipe->task_slab, task); in lima_sched_free_job()
482 for (i = 0; i < pipe->num_l2_cache; i++) in lima_sched_recover_work()
483 lima_l2_cache_flush(pipe->l2_cache[i]); in lima_sched_recover_work()
485 if (pipe->bcast_mmu) { in lima_sched_recover_work()
486 lima_mmu_flush_tlb(pipe->bcast_mmu); in lima_sched_recover_work()
488 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_recover_work()
489 lima_mmu_flush_tlb(pipe->mmu[i]); in lima_sched_recover_work()
492 if (pipe->task_recover(pipe)) in lima_sched_recover_work()
493 drm_sched_fault(&pipe->base); in lima_sched_recover_work()
499 lima_sched_timeout_ms : 500; in lima_sched_pipe_init()
501 pipe->fence_context = dma_fence_context_alloc(1); in lima_sched_pipe_init()
502 spin_lock_init(&pipe->fence_lock); in lima_sched_pipe_init()
504 INIT_WORK(&pipe->recover_work, lima_sched_recover_work); in lima_sched_pipe_init()
506 return drm_sched_init(&pipe->base, &lima_sched_ops, 1, in lima_sched_pipe_init()
513 drm_sched_fini(&pipe->base); in lima_sched_pipe_fini()
518 struct lima_sched_task *task = pipe->current_task; in lima_sched_pipe_task_done()
519 struct lima_device *ldev = pipe->ldev; in lima_sched_pipe_task_done()
521 if (pipe->error) { in lima_sched_pipe_task_done()
522 if (task && task->recoverable) in lima_sched_pipe_task_done()
523 schedule_work(&pipe->recover_work); in lima_sched_pipe_task_done()
525 drm_sched_fault(&pipe->base); in lima_sched_pipe_task_done()
527 pipe->task_fini(pipe); in lima_sched_pipe_task_done()
528 dma_fence_signal(task->fence); in lima_sched_pipe_task_done()