Lines Matching +full:non +full:- +full:secure +full:- +full:domain

1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
85 /* Protection Domain(PD) ids */
263 bool secure; member
270 bool secure; member
298 if (map->table) { in fastrpc_free_map()
299 if (map->attr & FASTRPC_ATTR_SECUREMAP) { in fastrpc_free_map()
305 err = qcom_scm_assign_mem(map->phys, map->size, in fastrpc_free_map()
306 &(map->fl->cctx->vmperms[0].vmid), &perm, 1); in fastrpc_free_map()
308 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", in fastrpc_free_map()
309 map->phys, map->size, err); in fastrpc_free_map()
313 dma_buf_unmap_attachment(map->attach, map->table, in fastrpc_free_map()
315 dma_buf_detach(map->buf, map->attach); in fastrpc_free_map()
316 dma_buf_put(map->buf); in fastrpc_free_map()
325 kref_put(&map->refcount, fastrpc_free_map); in fastrpc_map_put()
331 kref_get(&map->refcount); in fastrpc_map_get()
340 mutex_lock(&fl->mutex); in fastrpc_map_lookup()
341 list_for_each_entry(map, &fl->maps, node) { in fastrpc_map_lookup()
342 if (map->fd == fd) { in fastrpc_map_lookup()
344 mutex_unlock(&fl->mutex); in fastrpc_map_lookup()
348 mutex_unlock(&fl->mutex); in fastrpc_map_lookup()
350 return -ENOENT; in fastrpc_map_lookup()
366 dma_free_coherent(buf->dev, buf->size, buf->virt, in fastrpc_buf_free()
367 FASTRPC_PHYS(buf->phys)); in fastrpc_buf_free()
378 return -ENOMEM; in fastrpc_buf_alloc()
380 INIT_LIST_HEAD(&buf->attachments); in fastrpc_buf_alloc()
381 INIT_LIST_HEAD(&buf->node); in fastrpc_buf_alloc()
382 mutex_init(&buf->lock); in fastrpc_buf_alloc()
384 buf->fl = fl; in fastrpc_buf_alloc()
385 buf->virt = NULL; in fastrpc_buf_alloc()
386 buf->phys = 0; in fastrpc_buf_alloc()
387 buf->size = size; in fastrpc_buf_alloc()
388 buf->dev = dev; in fastrpc_buf_alloc()
389 buf->raddr = 0; in fastrpc_buf_alloc()
391 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, in fastrpc_buf_alloc()
393 if (!buf->virt) { in fastrpc_buf_alloc()
394 mutex_destroy(&buf->lock); in fastrpc_buf_alloc()
396 return -ENOMEM; in fastrpc_buf_alloc()
399 if (fl->sctx && fl->sctx->sid) in fastrpc_buf_alloc()
400 buf->phys += ((u64)fl->sctx->sid << 32); in fastrpc_buf_alloc()
418 kref_get(&cctx->refcount); in fastrpc_channel_ctx_get()
423 kref_put(&cctx->refcount, fastrpc_channel_ctx_free); in fastrpc_channel_ctx_put()
434 cctx = ctx->cctx; in fastrpc_context_free()
436 for (i = 0; i < ctx->nbufs; i++) in fastrpc_context_free()
437 fastrpc_map_put(ctx->maps[i]); in fastrpc_context_free()
439 if (ctx->buf) in fastrpc_context_free()
440 fastrpc_buf_free(ctx->buf); in fastrpc_context_free()
442 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_context_free()
443 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); in fastrpc_context_free()
444 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_free()
446 kfree(ctx->maps); in fastrpc_context_free()
447 kfree(ctx->olaps); in fastrpc_context_free()
455 kref_get(&ctx->refcount); in fastrpc_context_get()
460 kref_put(&ctx->refcount, fastrpc_context_free); in fastrpc_context_put()
471 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
477 int st = CMP(pa->start, pb->start); in olaps_cmp()
479 int ed = CMP(pb->end, pa->end); in olaps_cmp()
489 for (i = 0; i < ctx->nbufs; ++i) { in fastrpc_get_buff_overlaps()
490 ctx->olaps[i].start = ctx->args[i].ptr; in fastrpc_get_buff_overlaps()
491 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length; in fastrpc_get_buff_overlaps()
492 ctx->olaps[i].raix = i; in fastrpc_get_buff_overlaps()
495 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL); in fastrpc_get_buff_overlaps()
497 for (i = 0; i < ctx->nbufs; ++i) { in fastrpc_get_buff_overlaps()
499 if (ctx->olaps[i].start < max_end) { in fastrpc_get_buff_overlaps()
500 ctx->olaps[i].mstart = max_end; in fastrpc_get_buff_overlaps()
501 ctx->olaps[i].mend = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
502 ctx->olaps[i].offset = max_end - ctx->olaps[i].start; in fastrpc_get_buff_overlaps()
504 if (ctx->olaps[i].end > max_end) { in fastrpc_get_buff_overlaps()
505 max_end = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
507 ctx->olaps[i].mend = 0; in fastrpc_get_buff_overlaps()
508 ctx->olaps[i].mstart = 0; in fastrpc_get_buff_overlaps()
512 ctx->olaps[i].mend = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
513 ctx->olaps[i].mstart = ctx->olaps[i].start; in fastrpc_get_buff_overlaps()
514 ctx->olaps[i].offset = 0; in fastrpc_get_buff_overlaps()
515 max_end = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
524 struct fastrpc_channel_ctx *cctx = user->cctx; in fastrpc_context_alloc()
531 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
533 INIT_LIST_HEAD(&ctx->node); in fastrpc_context_alloc()
534 ctx->fl = user; in fastrpc_context_alloc()
535 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc); in fastrpc_context_alloc()
536 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) + in fastrpc_context_alloc()
539 if (ctx->nscalars) { in fastrpc_context_alloc()
540 ctx->maps = kcalloc(ctx->nscalars, in fastrpc_context_alloc()
541 sizeof(*ctx->maps), GFP_KERNEL); in fastrpc_context_alloc()
542 if (!ctx->maps) { in fastrpc_context_alloc()
544 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
546 ctx->olaps = kcalloc(ctx->nscalars, in fastrpc_context_alloc()
547 sizeof(*ctx->olaps), GFP_KERNEL); in fastrpc_context_alloc()
548 if (!ctx->olaps) { in fastrpc_context_alloc()
549 kfree(ctx->maps); in fastrpc_context_alloc()
551 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
553 ctx->args = args; in fastrpc_context_alloc()
560 ctx->sc = sc; in fastrpc_context_alloc()
561 ctx->retval = -1; in fastrpc_context_alloc()
562 ctx->pid = current->pid; in fastrpc_context_alloc()
563 ctx->tgid = user->tgid; in fastrpc_context_alloc()
564 ctx->cctx = cctx; in fastrpc_context_alloc()
565 init_completion(&ctx->work); in fastrpc_context_alloc()
566 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq); in fastrpc_context_alloc()
568 spin_lock(&user->lock); in fastrpc_context_alloc()
569 list_add_tail(&ctx->node, &user->pending); in fastrpc_context_alloc()
570 spin_unlock(&user->lock); in fastrpc_context_alloc()
572 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_context_alloc()
573 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, in fastrpc_context_alloc()
576 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_alloc()
579 ctx->ctxid = ret << 4; in fastrpc_context_alloc()
580 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_alloc()
582 kref_init(&ctx->refcount); in fastrpc_context_alloc()
586 spin_lock(&user->lock); in fastrpc_context_alloc()
587 list_del(&ctx->node); in fastrpc_context_alloc()
588 spin_unlock(&user->lock); in fastrpc_context_alloc()
590 kfree(ctx->maps); in fastrpc_context_alloc()
591 kfree(ctx->olaps); in fastrpc_context_alloc()
601 struct fastrpc_dma_buf_attachment *a = attachment->priv; in fastrpc_map_dma_buf()
605 table = &a->sgt; in fastrpc_map_dma_buf()
607 ret = dma_map_sgtable(attachment->dev, table, dir, 0); in fastrpc_map_dma_buf()
617 dma_unmap_sgtable(attach->dev, table, dir, 0); in fastrpc_unmap_dma_buf()
622 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_release()
631 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_dma_buf_attach()
636 return -ENOMEM; in fastrpc_dma_buf_attach()
638 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, in fastrpc_dma_buf_attach()
639 FASTRPC_PHYS(buffer->phys), buffer->size); in fastrpc_dma_buf_attach()
641 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); in fastrpc_dma_buf_attach()
643 return -EINVAL; in fastrpc_dma_buf_attach()
646 a->dev = attachment->dev; in fastrpc_dma_buf_attach()
647 INIT_LIST_HEAD(&a->node); in fastrpc_dma_buf_attach()
648 attachment->priv = a; in fastrpc_dma_buf_attach()
650 mutex_lock(&buffer->lock); in fastrpc_dma_buf_attach()
651 list_add(&a->node, &buffer->attachments); in fastrpc_dma_buf_attach()
652 mutex_unlock(&buffer->lock); in fastrpc_dma_buf_attach()
660 struct fastrpc_dma_buf_attachment *a = attachment->priv; in fastrpc_dma_buf_detatch()
661 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_dma_buf_detatch()
663 mutex_lock(&buffer->lock); in fastrpc_dma_buf_detatch()
664 list_del(&a->node); in fastrpc_dma_buf_detatch()
665 mutex_unlock(&buffer->lock); in fastrpc_dma_buf_detatch()
666 sg_free_table(&a->sgt); in fastrpc_dma_buf_detatch()
672 struct fastrpc_buf *buf = dmabuf->priv; in fastrpc_vmap()
674 iosys_map_set_vaddr(map, buf->virt); in fastrpc_vmap()
682 struct fastrpc_buf *buf = dmabuf->priv; in fastrpc_mmap()
683 size_t size = vma->vm_end - vma->vm_start; in fastrpc_mmap()
685 return dma_mmap_coherent(buf->dev, vma, buf->virt, in fastrpc_mmap()
686 FASTRPC_PHYS(buf->phys), size); in fastrpc_mmap()
702 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_create()
711 return -ENOMEM; in fastrpc_map_create()
713 INIT_LIST_HEAD(&map->node); in fastrpc_map_create()
714 map->fl = fl; in fastrpc_map_create()
715 map->fd = fd; in fastrpc_map_create()
716 map->buf = dma_buf_get(fd); in fastrpc_map_create()
717 if (IS_ERR(map->buf)) { in fastrpc_map_create()
718 err = PTR_ERR(map->buf); in fastrpc_map_create()
722 map->attach = dma_buf_attach(map->buf, sess->dev); in fastrpc_map_create()
723 if (IS_ERR(map->attach)) { in fastrpc_map_create()
724 dev_err(sess->dev, "Failed to attach dmabuf\n"); in fastrpc_map_create()
725 err = PTR_ERR(map->attach); in fastrpc_map_create()
729 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL); in fastrpc_map_create()
730 if (IS_ERR(map->table)) { in fastrpc_map_create()
731 err = PTR_ERR(map->table); in fastrpc_map_create()
735 map->phys = sg_dma_address(map->table->sgl); in fastrpc_map_create()
736 map->phys += ((u64)fl->sctx->sid << 32); in fastrpc_map_create()
737 map->size = len; in fastrpc_map_create()
738 map->va = sg_virt(map->table->sgl); in fastrpc_map_create()
739 map->len = len; in fastrpc_map_create()
740 kref_init(&map->refcount); in fastrpc_map_create()
749 map->attr = attr; in fastrpc_map_create()
750 err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms, in fastrpc_map_create()
751 fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_map_create()
753 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", in fastrpc_map_create()
754 map->phys, map->size, err); in fastrpc_map_create()
758 spin_lock(&fl->lock); in fastrpc_map_create()
759 list_add_tail(&map->node, &fl->maps); in fastrpc_map_create()
760 spin_unlock(&fl->lock); in fastrpc_map_create()
766 dma_buf_detach(map->buf, map->attach); in fastrpc_map_create()
768 dma_buf_put(map->buf); in fastrpc_map_create()
779 * +---------------------------------+
782 * | (0 - N) |
783 * +---------------------------------+
786 * | (0 - N) |
787 * +---------------------------------+
790 * | (0 - N) |
791 * +---------------------------------+
794 * +---------------------------------+
796 * +---------------------------------+
798 * | (0-N) |
799 * +---------------------------------+
808 sizeof(struct fastrpc_phy_page)) * ctx->nscalars + in fastrpc_get_meta_size()
821 for (oix = 0; oix < ctx->nbufs; oix++) { in fastrpc_get_payload_size()
822 int i = ctx->olaps[oix].raix; in fastrpc_get_payload_size()
824 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { in fastrpc_get_payload_size()
826 if (ctx->olaps[oix].offset == 0) in fastrpc_get_payload_size()
829 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart); in fastrpc_get_payload_size()
838 struct device *dev = ctx->fl->sctx->dev; in fastrpc_create_maps()
841 for (i = 0; i < ctx->nscalars; ++i) { in fastrpc_create_maps()
843 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 || in fastrpc_create_maps()
844 ctx->args[i].length == 0) in fastrpc_create_maps()
847 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, in fastrpc_create_maps()
848 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); in fastrpc_create_maps()
851 return -EINVAL; in fastrpc_create_maps()
870 struct device *dev = ctx->fl->sctx->dev; in fastrpc_get_args()
880 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); in fastrpc_get_args()
888 ctx->msg_sz = pkt_size; in fastrpc_get_args()
890 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
894 rpra = ctx->buf->virt; in fastrpc_get_args()
895 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); in fastrpc_get_args()
896 pages = fastrpc_phy_page_start(list, ctx->nscalars); in fastrpc_get_args()
897 args = (uintptr_t)ctx->buf->virt + metalen; in fastrpc_get_args()
898 rlen = pkt_size - metalen; in fastrpc_get_args()
899 ctx->rpra = rpra; in fastrpc_get_args()
901 for (oix = 0; oix < ctx->nbufs; ++oix) { in fastrpc_get_args()
904 i = ctx->olaps[oix].raix; in fastrpc_get_args()
905 len = ctx->args[i].length; in fastrpc_get_args()
915 if (ctx->maps[i]) { in fastrpc_get_args()
918 rpra[i].buf.pv = (u64) ctx->args[i].ptr; in fastrpc_get_args()
919 pages[i].addr = ctx->maps[i]->phys; in fastrpc_get_args()
921 mmap_read_lock(current->mm); in fastrpc_get_args()
922 vma = find_vma(current->mm, ctx->args[i].ptr); in fastrpc_get_args()
924 pages[i].addr += ctx->args[i].ptr - in fastrpc_get_args()
925 vma->vm_start; in fastrpc_get_args()
926 mmap_read_unlock(current->mm); in fastrpc_get_args()
928 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT; in fastrpc_get_args()
929 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >> in fastrpc_get_args()
931 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; in fastrpc_get_args()
935 if (ctx->olaps[oix].offset == 0) { in fastrpc_get_args()
936 rlen -= ALIGN(args, FASTRPC_ALIGN) - args; in fastrpc_get_args()
940 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart; in fastrpc_get_args()
945 rpra[i].buf.pv = args - ctx->olaps[oix].offset; in fastrpc_get_args()
946 pages[i].addr = ctx->buf->phys - in fastrpc_get_args()
947 ctx->olaps[oix].offset + in fastrpc_get_args()
948 (pkt_size - rlen); in fastrpc_get_args()
952 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT; in fastrpc_get_args()
953 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; in fastrpc_get_args()
955 rlen -= mlen; in fastrpc_get_args()
958 if (i < inbufs && !ctx->maps[i]) { in fastrpc_get_args()
960 void *src = (void *)(uintptr_t)ctx->args[i].ptr; in fastrpc_get_args()
965 err = -EFAULT; in fastrpc_get_args()
974 for (i = ctx->nbufs; i < ctx->nscalars; ++i) { in fastrpc_get_args()
975 list[i].num = ctx->args[i].length ? 1 : 0; in fastrpc_get_args()
977 if (ctx->maps[i]) { in fastrpc_get_args()
978 pages[i].addr = ctx->maps[i]->phys; in fastrpc_get_args()
979 pages[i].size = ctx->maps[i]->size; in fastrpc_get_args()
981 rpra[i].dma.fd = ctx->args[i].fd; in fastrpc_get_args()
982 rpra[i].dma.len = ctx->args[i].length; in fastrpc_get_args()
983 rpra[i].dma.offset = (u64) ctx->args[i].ptr; in fastrpc_get_args()
996 union fastrpc_remote_arg *rpra = ctx->rpra; in fastrpc_put_args()
997 struct fastrpc_user *fl = ctx->fl; in fastrpc_put_args()
1004 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); in fastrpc_put_args()
1005 outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); in fastrpc_put_args()
1006 handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc); in fastrpc_put_args()
1007 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); in fastrpc_put_args()
1008 pages = fastrpc_phy_page_start(list, ctx->nscalars); in fastrpc_put_args()
1011 for (i = inbufs; i < ctx->nbufs; ++i) { in fastrpc_put_args()
1012 if (!ctx->maps[i]) { in fastrpc_put_args()
1014 void *dst = (void *)(uintptr_t)ctx->args[i].ptr; in fastrpc_put_args()
1019 return -EFAULT; in fastrpc_put_args()
1041 struct fastrpc_user *fl = ctx->fl; in fastrpc_invoke_send()
1042 struct fastrpc_msg *msg = &ctx->msg; in fastrpc_invoke_send()
1045 cctx = fl->cctx; in fastrpc_invoke_send()
1046 msg->pid = fl->tgid; in fastrpc_invoke_send()
1047 msg->tid = current->pid; in fastrpc_invoke_send()
1050 msg->pid = 0; in fastrpc_invoke_send()
1052 msg->ctx = ctx->ctxid | fl->pd; in fastrpc_invoke_send()
1053 msg->handle = handle; in fastrpc_invoke_send()
1054 msg->sc = ctx->sc; in fastrpc_invoke_send()
1055 msg->addr = ctx->buf ? ctx->buf->phys : 0; in fastrpc_invoke_send()
1056 msg->size = roundup(ctx->msg_sz, PAGE_SIZE); in fastrpc_invoke_send()
1059 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); in fastrpc_invoke_send()
1075 if (!fl->sctx) in fastrpc_internal_invoke()
1076 return -EINVAL; in fastrpc_internal_invoke()
1078 if (!fl->cctx->rpdev) in fastrpc_internal_invoke()
1079 return -EPIPE; in fastrpc_internal_invoke()
1082 …dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle… in fastrpc_internal_invoke()
1083 return -EPERM; in fastrpc_internal_invoke()
1090 if (ctx->nscalars) { in fastrpc_internal_invoke()
1099 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); in fastrpc_internal_invoke()
1104 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ)) in fastrpc_internal_invoke()
1105 err = -ETIMEDOUT; in fastrpc_internal_invoke()
1107 err = wait_for_completion_interruptible(&ctx->work); in fastrpc_internal_invoke()
1114 err = ctx->retval; in fastrpc_internal_invoke()
1118 if (ctx->nscalars) { in fastrpc_internal_invoke()
1128 if (err != -ERESTARTSYS && err != -ETIMEDOUT) { in fastrpc_internal_invoke()
1130 spin_lock(&fl->lock); in fastrpc_internal_invoke()
1131 list_del(&ctx->node); in fastrpc_internal_invoke()
1132 spin_unlock(&fl->lock); in fastrpc_internal_invoke()
1136 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); in fastrpc_internal_invoke()
1143 /* Check if the device node is non-secure and channel is secure*/ in is_session_rejected()
1144 if (!fl->is_secure_dev && fl->cctx->secure) { in is_session_rejected()
1147 * channel is configured as secure and block untrusted apps on channel in is_session_rejected()
1150 if (!fl->cctx->unsigned_support || !unsigned_pd_request) { in is_session_rejected()
1151 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD"); in is_session_rejected()
1182 return -ENOMEM; in fastrpc_init_create_process()
1185 err = -EFAULT; in fastrpc_init_create_process()
1193 err = -ECONNREFUSED; in fastrpc_init_create_process()
1198 err = -EINVAL; in fastrpc_init_create_process()
1202 inbuf.pgid = fl->tgid; in fastrpc_init_create_process()
1203 inbuf.namelen = strlen(current->comm) + 1; in fastrpc_init_create_process()
1208 fl->pd = USER_PD; in fastrpc_init_create_process()
1218 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, in fastrpc_init_create_process()
1223 fl->init_mem = imem; in fastrpc_init_create_process()
1226 args[0].fd = -1; in fastrpc_init_create_process()
1228 args[1].ptr = (u64)(uintptr_t)current->comm; in fastrpc_init_create_process()
1230 args[1].fd = -1; in fastrpc_init_create_process()
1236 pages[0].addr = imem->phys; in fastrpc_init_create_process()
1237 pages[0].size = imem->size; in fastrpc_init_create_process()
1241 args[3].fd = -1; in fastrpc_init_create_process()
1245 args[4].fd = -1; in fastrpc_init_create_process()
1249 args[5].fd = -1; in fastrpc_init_create_process()
1265 fl->init_mem = NULL; in fastrpc_init_create_process()
1269 spin_lock(&fl->lock); in fastrpc_init_create_process()
1270 list_del(&map->node); in fastrpc_init_create_process()
1271 spin_unlock(&fl->lock); in fastrpc_init_create_process()
1287 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_session_alloc()
1288 for (i = 0; i < cctx->sesscount; i++) { in fastrpc_session_alloc()
1289 if (!cctx->session[i].used && cctx->session[i].valid) { in fastrpc_session_alloc()
1290 cctx->session[i].used = true; in fastrpc_session_alloc()
1291 session = &cctx->session[i]; in fastrpc_session_alloc()
1295 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_session_alloc()
1305 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_session_free()
1306 session->used = false; in fastrpc_session_free()
1307 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_session_free()
1316 tgid = fl->tgid; in fastrpc_release_current_dsp_process()
1319 args[0].fd = -1; in fastrpc_release_current_dsp_process()
1328 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_release()
1329 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_device_release()
1337 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_device_release()
1338 list_del(&fl->user); in fastrpc_device_release()
1339 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_device_release()
1341 if (fl->init_mem) in fastrpc_device_release()
1342 fastrpc_buf_free(fl->init_mem); in fastrpc_device_release()
1344 list_for_each_entry_safe(ctx, n, &fl->pending, node) { in fastrpc_device_release()
1345 list_del(&ctx->node); in fastrpc_device_release()
1349 list_for_each_entry_safe(map, m, &fl->maps, node) { in fastrpc_device_release()
1350 list_del(&map->node); in fastrpc_device_release()
1354 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_device_release()
1355 list_del(&buf->node); in fastrpc_device_release()
1359 fastrpc_session_free(cctx, fl->sctx); in fastrpc_device_release()
1362 mutex_destroy(&fl->mutex); in fastrpc_device_release()
1364 file->private_data = NULL; in fastrpc_device_release()
1376 fdevice = miscdev_to_fdevice(filp->private_data); in fastrpc_device_open()
1377 cctx = fdevice->cctx; in fastrpc_device_open()
1381 return -ENOMEM; in fastrpc_device_open()
1386 filp->private_data = fl; in fastrpc_device_open()
1387 spin_lock_init(&fl->lock); in fastrpc_device_open()
1388 mutex_init(&fl->mutex); in fastrpc_device_open()
1389 INIT_LIST_HEAD(&fl->pending); in fastrpc_device_open()
1390 INIT_LIST_HEAD(&fl->maps); in fastrpc_device_open()
1391 INIT_LIST_HEAD(&fl->mmaps); in fastrpc_device_open()
1392 INIT_LIST_HEAD(&fl->user); in fastrpc_device_open()
1393 fl->tgid = current->tgid; in fastrpc_device_open()
1394 fl->cctx = cctx; in fastrpc_device_open()
1395 fl->is_secure_dev = fdevice->secure; in fastrpc_device_open()
1397 fl->sctx = fastrpc_session_alloc(cctx); in fastrpc_device_open()
1398 if (!fl->sctx) { in fastrpc_device_open()
1399 dev_err(&cctx->rpdev->dev, "No session available\n"); in fastrpc_device_open()
1400 mutex_destroy(&fl->mutex); in fastrpc_device_open()
1403 return -EBUSY; in fastrpc_device_open()
1406 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_device_open()
1407 list_add_tail(&fl->user, &cctx->users); in fastrpc_device_open()
1408 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_device_open()
1421 return -EFAULT; in fastrpc_dmabuf_alloc()
1423 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); in fastrpc_dmabuf_alloc()
1430 buf->dmabuf = dma_buf_export(&exp_info); in fastrpc_dmabuf_alloc()
1431 if (IS_ERR(buf->dmabuf)) { in fastrpc_dmabuf_alloc()
1432 err = PTR_ERR(buf->dmabuf); in fastrpc_dmabuf_alloc()
1437 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); in fastrpc_dmabuf_alloc()
1439 dma_buf_put(buf->dmabuf); in fastrpc_dmabuf_alloc()
1440 return -EINVAL; in fastrpc_dmabuf_alloc()
1452 return -EFAULT; in fastrpc_dmabuf_alloc()
1461 int tgid = fl->tgid; in fastrpc_init_attach()
1466 args[0].fd = -1; in fastrpc_init_attach()
1468 fl->pd = pd; in fastrpc_init_attach()
1482 return -EFAULT; in fastrpc_invoke()
1489 return -ENOMEM; in fastrpc_invoke()
1494 return -EFAULT; in fastrpc_invoke()
1514 args[0].fd = -1; in fastrpc_get_info_from_dsp()
1517 args[1].fd = -1; in fastrpc_get_info_from_dsp()
1518 fl->pd = USER_PD; in fastrpc_get_info_from_dsp()
1527 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_get_info_from_kernel()
1528 uint32_t attribute_id = cap->attribute_id; in fastrpc_get_info_from_kernel()
1531 uint32_t domain = cap->domain; in fastrpc_get_info_from_kernel() local
1534 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1536 if (cctx->valid_attributes) { in fastrpc_get_info_from_kernel()
1537 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1540 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1544 return -ENOMEM; in fastrpc_get_info_from_kernel()
1548 dev_info(&cctx->rpdev->dev, in fastrpc_get_info_from_kernel()
1549 "Warning: DSP capabilities not supported on domain: %d\n", domain); in fastrpc_get_info_from_kernel()
1551 return -EOPNOTSUPP; in fastrpc_get_info_from_kernel()
1553 dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err); in fastrpc_get_info_from_kernel()
1558 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1559 memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN); in fastrpc_get_info_from_kernel()
1560 cctx->valid_attributes = true; in fastrpc_get_info_from_kernel()
1561 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1564 cap->capability = cctx->dsp_attributes[attribute_id]; in fastrpc_get_info_from_kernel()
1574 return -EFAULT; in fastrpc_get_dsp_info()
1577 if (cap.domain >= FASTRPC_DEV_MAX) { in fastrpc_get_dsp_info()
1578 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n", in fastrpc_get_dsp_info()
1579 cap.domain, err); in fastrpc_get_dsp_info()
1580 return -ECHRNG; in fastrpc_get_dsp_info()
1583 /* Fastrpc Capablities does not support modem domain */ in fastrpc_get_dsp_info()
1584 if (cap.domain == MDSP_DOMAIN_ID) { in fastrpc_get_dsp_info()
1585 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err); in fastrpc_get_dsp_info()
1586 return -ECHRNG; in fastrpc_get_dsp_info()
1590 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n", in fastrpc_get_dsp_info()
1592 return -EOVERFLOW; in fastrpc_get_dsp_info()
1600 return -EFAULT; in fastrpc_get_dsp_info()
1611 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap_impl()
1615 spin_lock(&fl->lock); in fastrpc_req_munmap_impl()
1616 list_for_each_entry_safe(iter, b, &fl->mmaps, node) { in fastrpc_req_munmap_impl()
1617 if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) { in fastrpc_req_munmap_impl()
1622 spin_unlock(&fl->lock); in fastrpc_req_munmap_impl()
1626 return -EINVAL; in fastrpc_req_munmap_impl()
1629 req_msg.pgid = fl->tgid; in fastrpc_req_munmap_impl()
1630 req_msg.size = buf->size; in fastrpc_req_munmap_impl()
1631 req_msg.vaddr = buf->raddr; in fastrpc_req_munmap_impl()
1640 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr); in fastrpc_req_munmap_impl()
1641 spin_lock(&fl->lock); in fastrpc_req_munmap_impl()
1642 list_del(&buf->node); in fastrpc_req_munmap_impl()
1643 spin_unlock(&fl->lock); in fastrpc_req_munmap_impl()
1646 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr); in fastrpc_req_munmap_impl()
1657 return -EFAULT; in fastrpc_req_munmap()
1671 struct device *dev = fl->sctx->dev; in fastrpc_req_mmap()
1676 return -EFAULT; in fastrpc_req_mmap()
1680 return -EINVAL; in fastrpc_req_mmap()
1685 return -EINVAL; in fastrpc_req_mmap()
1688 err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf); in fastrpc_req_mmap()
1694 req_msg.pgid = fl->tgid; in fastrpc_req_mmap()
1702 pages.addr = buf->phys; in fastrpc_req_mmap()
1703 pages.size = buf->size; in fastrpc_req_mmap()
1715 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); in fastrpc_req_mmap()
1720 buf->raddr = (uintptr_t) rsp_msg.vaddr; in fastrpc_req_mmap()
1725 spin_lock(&fl->lock); in fastrpc_req_mmap()
1726 list_add_tail(&buf->node, &fl->mmaps); in fastrpc_req_mmap()
1727 spin_unlock(&fl->lock); in fastrpc_req_mmap()
1731 req_unmap.vaddrout = buf->raddr; in fastrpc_req_mmap()
1732 req_unmap.size = buf->size; in fastrpc_req_mmap()
1734 return -EFAULT; in fastrpc_req_mmap()
1738 buf->raddr, buf->size); in fastrpc_req_mmap()
1755 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_unmap_impl()
1757 spin_lock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1758 list_for_each_entry_safe(iter, m, &fl->maps, node) { in fastrpc_req_mem_unmap_impl()
1759 if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) { in fastrpc_req_mem_unmap_impl()
1765 spin_unlock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1769 return -EINVAL; in fastrpc_req_mem_unmap_impl()
1772 req_msg.pgid = fl->tgid; in fastrpc_req_mem_unmap_impl()
1773 req_msg.len = map->len; in fastrpc_req_mem_unmap_impl()
1774 req_msg.vaddrin = map->raddr; in fastrpc_req_mem_unmap_impl()
1775 req_msg.fd = map->fd; in fastrpc_req_mem_unmap_impl()
1785 dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr); in fastrpc_req_mem_unmap_impl()
1795 return -EFAULT; in fastrpc_req_mem_unmap()
1808 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_map()
1814 return -EFAULT; in fastrpc_req_mem_map()
1823 req_msg.pgid = fl->tgid; in fastrpc_req_mem_map()
1827 map->va = (void *) (uintptr_t) req.vaddrin; in fastrpc_req_mem_map()
1835 pages.addr = map->phys; in fastrpc_req_mem_map()
1836 pages.size = map->size; in fastrpc_req_mem_map()
1851 req.fd, req.vaddrin, map->size); in fastrpc_req_mem_map()
1856 map->raddr = rsp_msg.vaddr; in fastrpc_req_mem_map()
1864 req_unmap.length = map->size; in fastrpc_req_mem_map()
1866 return -EFAULT; in fastrpc_req_mem_map()
1880 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_ioctl()
1916 err = -ENOTTY; in fastrpc_device_ioctl()
1934 struct device *dev = &pdev->dev; in fastrpc_cb_probe()
1939 cctx = dev_get_drvdata(dev->parent); in fastrpc_cb_probe()
1941 return -EINVAL; in fastrpc_cb_probe()
1943 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); in fastrpc_cb_probe()
1945 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_cb_probe()
1946 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) { in fastrpc_cb_probe()
1947 dev_err(&pdev->dev, "too many sessions\n"); in fastrpc_cb_probe()
1948 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_probe()
1949 return -ENOSPC; in fastrpc_cb_probe()
1951 sess = &cctx->session[cctx->sesscount++]; in fastrpc_cb_probe()
1952 sess->used = false; in fastrpc_cb_probe()
1953 sess->valid = true; in fastrpc_cb_probe()
1954 sess->dev = dev; in fastrpc_cb_probe()
1957 if (of_property_read_u32(dev->of_node, "reg", &sess->sid)) in fastrpc_cb_probe()
1964 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) in fastrpc_cb_probe()
1966 dup_sess = &cctx->session[cctx->sesscount++]; in fastrpc_cb_probe()
1970 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_probe()
1973 dev_err(dev, "32-bit DMA enable failed\n"); in fastrpc_cb_probe()
1982 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); in fastrpc_cb_remove()
1983 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); in fastrpc_cb_remove()
1987 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_cb_remove()
1989 if (cctx->session[i].sid == sess->sid) { in fastrpc_cb_remove()
1990 cctx->session[i].valid = false; in fastrpc_cb_remove()
1991 cctx->sesscount--; in fastrpc_cb_remove()
1994 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_remove()
2000 { .compatible = "qcom,fastrpc-compute-cb", },
2008 .name = "qcom,fastrpc-cb",
2015 bool is_secured, const char *domain) in fastrpc_device_register() argument
2022 return -ENOMEM; in fastrpc_device_register()
2024 fdev->secure = is_secured; in fastrpc_device_register()
2025 fdev->cctx = cctx; in fastrpc_device_register()
2026 fdev->miscdev.minor = MISC_DYNAMIC_MINOR; in fastrpc_device_register()
2027 fdev->miscdev.fops = &fastrpc_fops; in fastrpc_device_register()
2028 fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s", in fastrpc_device_register()
2029 domain, is_secured ? "-secure" : ""); in fastrpc_device_register()
2030 err = misc_register(&fdev->miscdev); in fastrpc_device_register()
2033 cctx->secure_fdevice = fdev; in fastrpc_device_register()
2035 cctx->fdevice = fdev; in fastrpc_device_register()
2043 struct device *rdev = &rpdev->dev; in fastrpc_rpmsg_probe()
2045 int i, err, domain_id = -1, vmcount; in fastrpc_rpmsg_probe()
2046 const char *domain; in fastrpc_rpmsg_probe() local
2050 err = of_property_read_string(rdev->of_node, "label", &domain); in fastrpc_rpmsg_probe()
2052 dev_info(rdev, "FastRPC Domain not specified in DT\n"); in fastrpc_rpmsg_probe()
2057 if (!strcmp(domains[i], domain)) { in fastrpc_rpmsg_probe()
2064 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id); in fastrpc_rpmsg_probe()
2065 return -EINVAL; in fastrpc_rpmsg_probe()
2068 vmcount = of_property_read_variable_u32_array(rdev->of_node, in fastrpc_rpmsg_probe()
2073 return -EPROBE_DEFER; in fastrpc_rpmsg_probe()
2077 return -ENOMEM; in fastrpc_rpmsg_probe()
2080 data->vmcount = vmcount; in fastrpc_rpmsg_probe()
2081 data->perms = BIT(QCOM_SCM_VMID_HLOS); in fastrpc_rpmsg_probe()
2082 for (i = 0; i < data->vmcount; i++) { in fastrpc_rpmsg_probe()
2083 data->vmperms[i].vmid = vmids[i]; in fastrpc_rpmsg_probe()
2084 data->vmperms[i].perm = QCOM_SCM_PERM_RWX; in fastrpc_rpmsg_probe()
2088 secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain")); in fastrpc_rpmsg_probe()
2089 data->secure = secure_dsp; in fastrpc_rpmsg_probe()
2096 data->unsigned_support = false; in fastrpc_rpmsg_probe()
2102 data->unsigned_support = true; in fastrpc_rpmsg_probe()
2113 err = -EINVAL; in fastrpc_rpmsg_probe()
2117 kref_init(&data->refcount); in fastrpc_rpmsg_probe()
2119 dev_set_drvdata(&rpdev->dev, data); in fastrpc_rpmsg_probe()
2121 INIT_LIST_HEAD(&data->users); in fastrpc_rpmsg_probe()
2122 spin_lock_init(&data->lock); in fastrpc_rpmsg_probe()
2123 idr_init(&data->ctx_idr); in fastrpc_rpmsg_probe()
2124 data->domain_id = domain_id; in fastrpc_rpmsg_probe()
2125 data->rpdev = rpdev; in fastrpc_rpmsg_probe()
2127 return of_platform_populate(rdev->of_node, NULL, NULL, rdev); in fastrpc_rpmsg_probe()
2137 spin_lock(&user->lock); in fastrpc_notify_users()
2138 list_for_each_entry(ctx, &user->pending, node) in fastrpc_notify_users()
2139 complete(&ctx->work); in fastrpc_notify_users()
2140 spin_unlock(&user->lock); in fastrpc_notify_users()
2145 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); in fastrpc_rpmsg_remove()
2149 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_rpmsg_remove()
2150 list_for_each_entry(user, &cctx->users, user) in fastrpc_rpmsg_remove()
2152 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_rpmsg_remove()
2154 if (cctx->fdevice) in fastrpc_rpmsg_remove()
2155 misc_deregister(&cctx->fdevice->miscdev); in fastrpc_rpmsg_remove()
2157 if (cctx->secure_fdevice) in fastrpc_rpmsg_remove()
2158 misc_deregister(&cctx->secure_fdevice->miscdev); in fastrpc_rpmsg_remove()
2160 of_platform_depopulate(&rpdev->dev); in fastrpc_rpmsg_remove()
2162 cctx->rpdev = NULL; in fastrpc_rpmsg_remove()
2169 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); in fastrpc_rpmsg_callback()
2176 return -EINVAL; in fastrpc_rpmsg_callback()
2178 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); in fastrpc_rpmsg_callback()
2180 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_rpmsg_callback()
2181 ctx = idr_find(&cctx->ctx_idr, ctxid); in fastrpc_rpmsg_callback()
2182 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_rpmsg_callback()
2185 dev_err(&rpdev->dev, "No context ID matches response\n"); in fastrpc_rpmsg_callback()
2186 return -ENOENT; in fastrpc_rpmsg_callback()
2189 ctx->retval = rsp->retval; in fastrpc_rpmsg_callback()
2190 complete(&ctx->work); in fastrpc_rpmsg_callback()
2197 schedule_work(&ctx->put_work); in fastrpc_rpmsg_callback()