Lines Matching +full:host1x +full:- +full:class

1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/dma-fence-array.h>
5 #include <linux/dma-mapping.h>
7 #include <linux/host1x.h>
27 dev_err_ratelimited(context->client->base.dev, \
29 current->comm, ##__VA_ARGS__)
46 kref_get(&bo->ref); in gather_bo_get()
55 dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma, in gather_bo_release()
64 kref_put(&bo->ref, gather_bo_release); in gather_bo_put()
76 return ERR_PTR(-ENOMEM); in gather_bo_pin()
78 kref_init(&map->ref); in gather_bo_pin()
79 map->bo = host1x_bo_get(bo); in gather_bo_pin()
80 map->direction = direction; in gather_bo_pin()
81 map->dev = dev; in gather_bo_pin()
83 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL); in gather_bo_pin()
84 if (!map->sgt) { in gather_bo_pin()
85 err = -ENOMEM; in gather_bo_pin()
89 err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma, in gather_bo_pin()
90 gather->gather_data_words * 4); in gather_bo_pin()
94 err = dma_map_sgtable(dev, map->sgt, direction, 0); in gather_bo_pin()
98 map->phys = sg_dma_address(map->sgt->sgl); in gather_bo_pin()
99 map->size = gather->gather_data_words * 4; in gather_bo_pin()
100 map->chunks = err; in gather_bo_pin()
105 sg_free_table(map->sgt); in gather_bo_pin()
106 kfree(map->sgt); in gather_bo_pin()
117 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0); in gather_bo_unpin()
118 sg_free_table(map->sgt); in gather_bo_unpin()
119 kfree(map->sgt); in gather_bo_unpin()
120 host1x_bo_put(map->bo); in gather_bo_unpin()
129 return bo->gather_data; in gather_bo_mmap()
150 xa_lock(&context->mappings); in tegra_drm_mapping_get()
152 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get()
154 kref_get(&mapping->ref); in tegra_drm_mapping_get()
156 xa_unlock(&context->mappings); in tegra_drm_mapping_get()
167 return ERR_PTR(-EINVAL); in alloc_copy_user_array()
170 return ERR_PTR(-E2BIG); in alloc_copy_user_array()
174 return ERR_PTR(-ENOMEM); in alloc_copy_user_array()
178 return ERR_PTR(-EFAULT); in alloc_copy_user_array()
191 if (args->gather_data_words == 0) { in submit_copy_gather_data()
193 return -EINVAL; in submit_copy_gather_data()
196 if (check_mul_overflow((size_t)args->gather_data_words, (size_t)4, &copy_len)) { in submit_copy_gather_data()
198 return -EINVAL; in submit_copy_gather_data()
204 return -ENOMEM; in submit_copy_gather_data()
207 host1x_bo_init(&bo->base, &gather_bo_ops); in submit_copy_gather_data()
208 kref_init(&bo->ref); in submit_copy_gather_data()
209 bo->dev = dev; in submit_copy_gather_data()
211 bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma, in submit_copy_gather_data()
213 if (!bo->gather_data) { in submit_copy_gather_data()
216 return -ENOMEM; in submit_copy_gather_data()
219 if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) { in submit_copy_gather_data()
221 dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0); in submit_copy_gather_data()
223 return -EFAULT; in submit_copy_gather_data()
226 bo->gather_data_words = args->gather_data_words; in submit_copy_gather_data()
237 dma_addr_t iova = mapping->iova + buf->reloc.target_offset; in submit_write_reloc()
241 if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) in submit_write_reloc()
245 written_ptr = iova >> buf->reloc.shift; in submit_write_reloc()
247 if (buf->reloc.gather_offset_words >= bo->gather_data_words) { in submit_write_reloc()
250 buf->reloc.gather_offset_words, bo->gather_data_words); in submit_write_reloc()
251 return -EINVAL; in submit_write_reloc()
254 buf->reloc.gather_offset_words = array_index_nospec(buf->reloc.gather_offset_words, in submit_write_reloc()
255 bo->gather_data_words); in submit_write_reloc()
257 bo->gather_data[buf->reloc.gather_offset_words] = written_ptr; in submit_write_reloc()
271 bufs = alloc_copy_user_array(u64_to_user_ptr(args->bufs_ptr), args->num_bufs, in submit_process_bufs()
278 mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL); in submit_process_bufs()
281 err = -ENOMEM; in submit_process_bufs()
285 for (i = 0; i < args->num_bufs; i++) { in submit_process_bufs()
289 if (buf->flags & ~DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) { in submit_process_bufs()
291 err = -EINVAL; in submit_process_bufs()
295 mapping = tegra_drm_mapping_get(context, buf->mapping); in submit_process_bufs()
297 SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping); in submit_process_bufs()
298 err = -EINVAL; in submit_process_bufs()
309 mappings[i].flags = buf->flags; in submit_process_bufs()
312 job_data->used_mappings = mappings; in submit_process_bufs()
313 job_data->num_used_mappings = i; in submit_process_bufs()
320 while (i--) in submit_process_bufs()
324 job_data->used_mappings = NULL; in submit_process_bufs()
337 if (args->syncpt.flags) { in submit_get_syncpt()
339 return -EINVAL; in submit_get_syncpt()
343 sp = xa_load(syncpoints, args->syncpt.id); in submit_get_syncpt()
346 return -EINVAL; in submit_get_syncpt()
349 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt()
350 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt()
359 u32 *class) in submit_job_add_gather() argument
363 if (cmd->reserved[0] || cmd->reserved[1] || cmd->reserved[2]) { in submit_job_add_gather()
364 SUBMIT_ERR(context, "non-zero reserved field in GATHER_UPTR command"); in submit_job_add_gather()
365 return -EINVAL; in submit_job_add_gather()
369 if (cmd->words > 16383) { in submit_job_add_gather()
371 return -EINVAL; in submit_job_add_gather()
374 if (check_add_overflow(*offset, cmd->words, &next_offset)) { in submit_job_add_gather()
376 return -EINVAL; in submit_job_add_gather()
379 if (next_offset > bo->gather_data_words) { in submit_job_add_gather()
381 return -EINVAL; in submit_job_add_gather()
384 if (tegra_drm_fw_validate(context->client, bo->gather_data, *offset, in submit_job_add_gather()
385 cmd->words, job_data, class)) { in submit_job_add_gather()
387 return -EINVAL; in submit_job_add_gather()
390 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather()
403 u32 i, gather_offset = 0, class; in submit_create_job() local
407 /* Set initial class for firewall. */ in submit_create_job()
408 class = context->client->base.class; in submit_create_job()
410 cmds = alloc_copy_user_array(u64_to_user_ptr(args->cmds_ptr), args->num_cmds, in submit_create_job()
417 job = host1x_job_alloc(context->channel, args->num_cmds, 0, true); in submit_create_job()
420 job = ERR_PTR(-ENOMEM); in submit_create_job()
428 job->client = &context->client->base; in submit_create_job()
429 job->class = context->client->base.class; in submit_create_job()
430 job->serialize = true; in submit_create_job()
432 for (i = 0; i < args->num_cmds; i++) { in submit_create_job()
435 if (cmd->flags) { in submit_create_job()
437 err = -EINVAL; in submit_create_job()
441 if (cmd->type == DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR) { in submit_create_job()
442 err = submit_job_add_gather(job, context, &cmd->gather_uptr, bo, in submit_create_job()
443 &gather_offset, job_data, &class); in submit_create_job()
446 } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT) { in submit_create_job()
447 if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) { in submit_create_job()
448 SUBMIT_ERR(context, "non-zero reserved value"); in submit_create_job()
449 err = -EINVAL; in submit_create_job()
453 host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value, in submit_create_job()
454 false, class); in submit_create_job()
455 } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE) { in submit_create_job()
456 if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) { in submit_create_job()
457 SUBMIT_ERR(context, "non-zero reserved value"); in submit_create_job()
458 err = -EINVAL; in submit_create_job()
462 if (cmd->wait_syncpt.id != args->syncpt.id) { in submit_create_job()
464 err = -EINVAL; in submit_create_job()
468 host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value, in submit_create_job()
469 true, class); in submit_create_job()
472 err = -EINVAL; in submit_create_job()
479 err = -EINVAL; in submit_create_job()
497 struct tegra_drm_client *client = container_of(job->client, struct tegra_drm_client, base); in release_job()
498 struct tegra_drm_submit_data *job_data = job->user_data; in release_job()
501 if (job->memory_context) in release_job()
502 host1x_memory_context_put(job->memory_context); in release_job()
504 for (i = 0; i < job_data->num_used_mappings; i++) in release_job()
505 tegra_drm_mapping_put(job_data->used_mappings[i].mapping); in release_job()
507 kfree(job_data->used_mappings); in release_job()
510 pm_runtime_mark_last_busy(client->base.dev); in release_job()
511 pm_runtime_put_autosuspend(client->base.dev); in release_job()
517 struct tegra_drm_file *fpriv = file->driver_priv; in tegra_drm_ioctl_channel_submit()
527 mutex_lock(&fpriv->lock); in tegra_drm_ioctl_channel_submit()
529 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_submit()
531 mutex_unlock(&fpriv->lock); in tegra_drm_ioctl_channel_submit()
533 current->comm, args->context); in tegra_drm_ioctl_channel_submit()
534 return -EINVAL; in tegra_drm_ioctl_channel_submit()
537 if (args->syncobj_in) { in tegra_drm_ioctl_channel_submit()
540 err = drm_syncobj_find_fence(file, args->syncobj_in, 0, 0, &fence); in tegra_drm_ioctl_channel_submit()
542 SUBMIT_ERR(context, "invalid syncobj_in '%#x'", args->syncobj_in); in tegra_drm_ioctl_channel_submit()
554 if (args->syncobj_out) { in tegra_drm_ioctl_channel_submit()
555 syncobj = drm_syncobj_find(file, args->syncobj_out); in tegra_drm_ioctl_channel_submit()
557 SUBMIT_ERR(context, "invalid syncobj_out '%#x'", args->syncobj_out); in tegra_drm_ioctl_channel_submit()
558 err = -ENOENT; in tegra_drm_ioctl_channel_submit()
564 err = submit_copy_gather_data(&bo, drm->dev, context, args); in tegra_drm_ioctl_channel_submit()
571 err = -ENOMEM; in tegra_drm_ioctl_channel_submit()
581 job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints); in tegra_drm_ioctl_channel_submit()
587 /* Map gather data for Host1x. */ in tegra_drm_ioctl_channel_submit()
588 err = host1x_job_pin(job, context->client->base.dev); in tegra_drm_ioctl_channel_submit()
594 if (context->client->ops->get_streamid_offset) { in tegra_drm_ioctl_channel_submit()
595 err = context->client->ops->get_streamid_offset( in tegra_drm_ioctl_channel_submit()
596 context->client, &job->engine_streamid_offset); in tegra_drm_ioctl_channel_submit()
603 if (context->memory_context && context->client->ops->can_use_memory_ctx) { in tegra_drm_ioctl_channel_submit()
606 err = context->client->ops->can_use_memory_ctx(context->client, &supported); in tegra_drm_ioctl_channel_submit()
613 job->memory_context = context->memory_context; in tegra_drm_ioctl_channel_submit()
614 host1x_memory_context_get(job->memory_context); in tegra_drm_ioctl_channel_submit()
616 } else if (context->client->ops->get_streamid_offset) { in tegra_drm_ioctl_channel_submit()
624 spec = dev_iommu_fwspec_get(context->client->base.dev); in tegra_drm_ioctl_channel_submit()
625 if (spec && spec->num_ids > 0) in tegra_drm_ioctl_channel_submit()
626 job->engine_fallback_streamid = spec->ids[0] & 0xffff; in tegra_drm_ioctl_channel_submit()
628 job->engine_fallback_streamid = 0x7f; in tegra_drm_ioctl_channel_submit()
630 job->engine_fallback_streamid = 0x7f; in tegra_drm_ioctl_channel_submit()
635 err = pm_runtime_resume_and_get(context->client->base.dev); in tegra_drm_ioctl_channel_submit()
641 job->user_data = job_data; in tegra_drm_ioctl_channel_submit()
642 job->release = release_job; in tegra_drm_ioctl_channel_submit()
643 job->timeout = 10000; in tegra_drm_ioctl_channel_submit()
654 SUBMIT_ERR(context, "host1x job submission failed: %d", err); in tegra_drm_ioctl_channel_submit()
659 args->syncpt.value = job->syncpt_end; in tegra_drm_ioctl_channel_submit()
662 struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end); in tegra_drm_ioctl_channel_submit()
674 if (job->memory_context) in tegra_drm_ioctl_channel_submit()
675 host1x_memory_context_put(job->memory_context); in tegra_drm_ioctl_channel_submit()
681 if (job_data && job_data->used_mappings) { in tegra_drm_ioctl_channel_submit()
682 for (i = 0; i < job_data->num_used_mappings; i++) in tegra_drm_ioctl_channel_submit()
683 tegra_drm_mapping_put(job_data->used_mappings[i].mapping); in tegra_drm_ioctl_channel_submit()
685 kfree(job_data->used_mappings); in tegra_drm_ioctl_channel_submit()
691 gather_bo_put(&bo->base); in tegra_drm_ioctl_channel_submit()
696 mutex_unlock(&fpriv->lock); in tegra_drm_ioctl_channel_submit()