Lines Matching full:exec
154 struct vc4_exec_info *exec[2]; in vc4_save_hang_state() local
166 exec[0] = vc4_first_bin_job(vc4); in vc4_save_hang_state()
167 exec[1] = vc4_first_render_job(vc4); in vc4_save_hang_state()
168 if (!exec[0] && !exec[1]) { in vc4_save_hang_state()
176 if (!exec[i]) in vc4_save_hang_state()
180 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) in vc4_save_hang_state()
182 state->bo_count += exec[i]->bo_count + unref_list_count; in vc4_save_hang_state()
195 if (!exec[i]) in vc4_save_hang_state()
198 for (j = 0; j < exec[i]->bo_count; j++) { in vc4_save_hang_state()
199 bo = to_vc4_bo(&exec[i]->bo[j]->base); in vc4_save_hang_state()
207 drm_gem_object_get(&exec[i]->bo[j]->base); in vc4_save_hang_state()
208 kernel_state->bo[k++] = &exec[i]->bo[j]->base; in vc4_save_hang_state()
211 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { in vc4_save_hang_state()
222 if (exec[0]) in vc4_save_hang_state()
223 state->start_bin = exec[0]->ct0ca; in vc4_save_hang_state()
224 if (exec[1]) in vc4_save_hang_state()
225 state->start_render = exec[1]->ct1ca; in vc4_save_hang_state()
469 struct vc4_exec_info *exec; in vc4_submit_next_bin_job() local
472 exec = vc4_first_bin_job(vc4); in vc4_submit_next_bin_job()
473 if (!exec) in vc4_submit_next_bin_job()
481 if (exec->perfmon && vc4->active_perfmon != exec->perfmon) in vc4_submit_next_bin_job()
482 vc4_perfmon_start(vc4, exec->perfmon); in vc4_submit_next_bin_job()
487 if (exec->ct0ca != exec->ct0ea) { in vc4_submit_next_bin_job()
488 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea); in vc4_submit_next_bin_job()
492 vc4_move_job_to_render(dev, exec); in vc4_submit_next_bin_job()
500 if (next && next->perfmon == exec->perfmon) in vc4_submit_next_bin_job()
509 struct vc4_exec_info *exec = vc4_first_render_job(vc4); in vc4_submit_next_render_job() local
511 if (!exec) in vc4_submit_next_render_job()
522 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); in vc4_submit_next_render_job()
526 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) in vc4_move_job_to_render() argument
531 list_move_tail(&exec->head, &vc4->render_job_list); in vc4_move_job_to_render()
537 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) in vc4_update_bo_seqnos() argument
542 for (i = 0; i < exec->bo_count; i++) { in vc4_update_bo_seqnos()
543 bo = to_vc4_bo(&exec->bo[i]->base); in vc4_update_bo_seqnos()
546 dma_resv_add_shared_fence(bo->base.base.resv, exec->fence); in vc4_update_bo_seqnos()
549 list_for_each_entry(bo, &exec->unref_list, unref_head) { in vc4_update_bo_seqnos()
553 for (i = 0; i < exec->rcl_write_bo_count; i++) { in vc4_update_bo_seqnos()
554 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); in vc4_update_bo_seqnos()
557 dma_resv_add_excl_fence(bo->base.base.resv, exec->fence); in vc4_update_bo_seqnos()
563 struct vc4_exec_info *exec, in vc4_unlock_bo_reservations() argument
568 for (i = 0; i < exec->bo_count; i++) { in vc4_unlock_bo_reservations()
569 struct drm_gem_object *bo = &exec->bo[i]->base; in vc4_unlock_bo_reservations()
581 * (all of which are on exec->unref_list). They're entirely private
586 struct vc4_exec_info *exec, in vc4_lock_bo_reservations() argument
597 bo = &exec->bo[contended_lock]->base; in vc4_lock_bo_reservations()
605 for (i = 0; i < exec->bo_count; i++) { in vc4_lock_bo_reservations()
609 bo = &exec->bo[i]->base; in vc4_lock_bo_reservations()
616 bo = &exec->bo[j]->base; in vc4_lock_bo_reservations()
621 bo = &exec->bo[contended_lock]->base; in vc4_lock_bo_reservations()
641 for (i = 0; i < exec->bo_count; i++) { in vc4_lock_bo_reservations()
642 bo = &exec->bo[i]->base; in vc4_lock_bo_reservations()
646 vc4_unlock_bo_reservations(dev, exec, acquire_ctx); in vc4_lock_bo_reservations()
664 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, in vc4_queue_submit() argument
682 exec->seqno = seqno; in vc4_queue_submit()
685 vc4->dma_fence_context, exec->seqno); in vc4_queue_submit()
686 fence->seqno = exec->seqno; in vc4_queue_submit()
687 exec->fence = &fence->base; in vc4_queue_submit()
690 drm_syncobj_replace_fence(out_sync, exec->fence); in vc4_queue_submit()
692 vc4_update_bo_seqnos(exec, seqno); in vc4_queue_submit()
694 vc4_unlock_bo_reservations(dev, exec, acquire_ctx); in vc4_queue_submit()
696 list_add_tail(&exec->head, &vc4->bin_job_list); in vc4_queue_submit()
704 if (vc4_first_bin_job(vc4) == exec && in vc4_queue_submit()
705 (!renderjob || renderjob->perfmon == exec->perfmon)) { in vc4_queue_submit()
716 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
720 * @exec: V3D job being set up
729 struct vc4_exec_info *exec) in vc4_cl_lookup_bos() argument
731 struct drm_vc4_submit_cl *args = exec->args; in vc4_cl_lookup_bos()
736 exec->bo_count = args->bo_handle_count; in vc4_cl_lookup_bos()
738 if (!exec->bo_count) { in vc4_cl_lookup_bos()
746 exec->bo = kvmalloc_array(exec->bo_count, in vc4_cl_lookup_bos()
749 if (!exec->bo) { in vc4_cl_lookup_bos()
754 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL); in vc4_cl_lookup_bos()
762 exec->bo_count * sizeof(uint32_t))) { in vc4_cl_lookup_bos()
769 for (i = 0; i < exec->bo_count; i++) { in vc4_cl_lookup_bos()
780 exec->bo[i] = (struct drm_gem_cma_object *)bo; in vc4_cl_lookup_bos()
787 for (i = 0; i < exec->bo_count; i++) { in vc4_cl_lookup_bos()
788 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base)); in vc4_cl_lookup_bos()
802 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release' in vc4_cl_lookup_bos()
806 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base)); in vc4_cl_lookup_bos()
810 for (i = 0; i < exec->bo_count && exec->bo[i]; i++) in vc4_cl_lookup_bos()
811 drm_gem_object_put(&exec->bo[i]->base); in vc4_cl_lookup_bos()
815 kvfree(exec->bo); in vc4_cl_lookup_bos()
816 exec->bo = NULL; in vc4_cl_lookup_bos()
821 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) in vc4_get_bcl() argument
823 struct drm_vc4_submit_cl *args = exec->args; in vc4_get_bcl()
843 DRM_DEBUG("overflow in exec arguments\n"); in vc4_get_bcl()
863 exec->shader_rec_u = temp + shader_rec_offset; in vc4_get_bcl()
864 exec->uniforms_u = temp + uniforms_offset; in vc4_get_bcl()
865 exec->shader_state = temp + exec_size; in vc4_get_bcl()
866 exec->shader_state_size = args->shader_rec_count; in vc4_get_bcl()
875 if (copy_from_user(exec->shader_rec_u, in vc4_get_bcl()
882 if (copy_from_user(exec->uniforms_u, in vc4_get_bcl()
895 exec->exec_bo = &bo->base; in vc4_get_bcl()
897 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head, in vc4_get_bcl()
898 &exec->unref_list); in vc4_get_bcl()
900 exec->ct0ca = exec->exec_bo->paddr + bin_offset; in vc4_get_bcl()
902 exec->bin_u = bin; in vc4_get_bcl()
904 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset; in vc4_get_bcl()
905 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset; in vc4_get_bcl()
906 exec->shader_rec_size = args->shader_rec_size; in vc4_get_bcl()
908 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset; in vc4_get_bcl()
909 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset; in vc4_get_bcl()
910 exec->uniforms_size = args->uniforms_size; in vc4_get_bcl()
913 exec->exec_bo->vaddr + bin_offset, in vc4_get_bcl()
915 exec); in vc4_get_bcl()
919 ret = vc4_validate_shader_recs(dev, exec); in vc4_get_bcl()
923 if (exec->found_tile_binning_mode_config_packet) { in vc4_get_bcl()
924 ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used); in vc4_get_bcl()
933 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true); in vc4_get_bcl()
941 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) in vc4_complete_exec() argument
950 if (exec->fence) { in vc4_complete_exec()
951 dma_fence_signal(exec->fence); in vc4_complete_exec()
952 dma_fence_put(exec->fence); in vc4_complete_exec()
955 if (exec->bo) { in vc4_complete_exec()
956 for (i = 0; i < exec->bo_count; i++) { in vc4_complete_exec()
957 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); in vc4_complete_exec()
960 drm_gem_object_put(&exec->bo[i]->base); in vc4_complete_exec()
962 kvfree(exec->bo); in vc4_complete_exec()
965 while (!list_empty(&exec->unref_list)) { in vc4_complete_exec()
966 struct vc4_bo *bo = list_first_entry(&exec->unref_list, in vc4_complete_exec()
974 vc4->bin_alloc_used &= ~exec->bin_slots; in vc4_complete_exec()
978 if (exec->bin_bo_used) in vc4_complete_exec()
982 vc4_perfmon_put(exec->perfmon); in vc4_complete_exec()
986 kfree(exec); in vc4_complete_exec()
997 struct vc4_exec_info *exec = in vc4_job_handle_completed() local
1000 list_del(&exec->head); in vc4_job_handle_completed()
1003 vc4_complete_exec(&vc4->base, exec); in vc4_job_handle_completed()
1047 * jobs that had completed and unrefs their BOs and frees their exec
1133 struct vc4_exec_info *exec; in vc4_submit_cl_ioctl() local
1156 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); in vc4_submit_cl_ioctl()
1157 if (!exec) { in vc4_submit_cl_ioctl()
1158 DRM_ERROR("malloc failure on exec struct\n"); in vc4_submit_cl_ioctl()
1164 kfree(exec); in vc4_submit_cl_ioctl()
1168 exec->args = args; in vc4_submit_cl_ioctl()
1169 INIT_LIST_HEAD(&exec->unref_list); in vc4_submit_cl_ioctl()
1171 ret = vc4_cl_lookup_bos(dev, file_priv, exec); in vc4_submit_cl_ioctl()
1176 exec->perfmon = vc4_perfmon_find(vc4file, in vc4_submit_cl_ioctl()
1178 if (!exec->perfmon) { in vc4_submit_cl_ioctl()
1207 if (exec->args->bin_cl_size != 0) { in vc4_submit_cl_ioctl()
1208 ret = vc4_get_bcl(dev, exec); in vc4_submit_cl_ioctl()
1212 exec->ct0ca = 0; in vc4_submit_cl_ioctl()
1213 exec->ct0ea = 0; in vc4_submit_cl_ioctl()
1216 ret = vc4_get_rcl(dev, exec); in vc4_submit_cl_ioctl()
1220 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx); in vc4_submit_cl_ioctl()
1241 exec->args = NULL; in vc4_submit_cl_ioctl()
1243 ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync); in vc4_submit_cl_ioctl()
1245 /* The syncobj isn't part of the exec data and we need to free our in vc4_submit_cl_ioctl()
1260 vc4_complete_exec(&vc4->base, exec); in vc4_submit_cl_ioctl()
1295 /* Waiting for exec to finish would need to be done before in vc4_gem_destroy()