Lines Matching +full:shared +full:- +full:memory

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
36 return -ENOMEM; in shm_get_kernel_pages()
47 return -ENOMEM; in shm_get_kernel_pages()
63 if (shm->pages) { in release_registered_pages()
64 if (shm->flags & TEE_SHM_USER_MAPPED) in release_registered_pages()
65 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages()
67 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages()
69 kfree(shm->pages); in release_registered_pages()
75 if (shm->flags & TEE_SHM_POOL) { in tee_shm_release()
76 teedev->pool->ops->free(teedev->pool, shm); in tee_shm_release()
77 } else if (shm->flags & TEE_SHM_DYNAMIC) { in tee_shm_release()
78 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); in tee_shm_release()
81 dev_err(teedev->dev.parent, in tee_shm_release()
87 teedev_ctx_put(shm->ctx); in tee_shm_release()
97 struct tee_device *teedev = ctx->teedev; in shm_alloc_helper()
103 return ERR_PTR(-EINVAL); in shm_alloc_helper()
105 if (!teedev->pool) { in shm_alloc_helper()
107 ret = ERR_PTR(-EINVAL); in shm_alloc_helper()
113 ret = ERR_PTR(-ENOMEM); in shm_alloc_helper()
117 refcount_set(&shm->refcount, 1); in shm_alloc_helper()
118 shm->flags = flags; in shm_alloc_helper()
119 shm->id = id; in shm_alloc_helper()
124 * to call teedev_ctx_get() or clear shm->ctx in case it's not in shm_alloc_helper()
127 shm->ctx = ctx; in shm_alloc_helper()
129 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); in shm_alloc_helper()
145 * tee_shm_alloc_user_buf() - Allocate shared memory for user space
146 * @ctx: Context that allocates the shared memory
147 * @size: Requested size of shared memory
149 * Memory allocated as user space shared memory is automatically freed when
152 * memory.
159 struct tee_device *teedev = ctx->teedev; in tee_shm_alloc_user_buf()
164 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
165 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); in tee_shm_alloc_user_buf()
166 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
172 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
173 idr_remove(&teedev->idr, id); in tee_shm_alloc_user_buf()
174 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
178 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
179 ret = idr_replace(&teedev->idr, shm, id); in tee_shm_alloc_user_buf()
180 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
190 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
191 * @ctx: Context that allocates the shared memory
192 * @size: Requested size of shared memory
194 * The returned memory registered in secure world and is suitable to be
195 * passed as a memory buffer in parameter argument to
196 * tee_client_invoke_func(). The memory allocated is later freed with a
205 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); in tee_shm_alloc_kernel_buf()
210 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
212 * @ctx: Context that allocates the shared memory
213 * @size: Requested size of shared memory
215 * This function returns similar shared memory as
216 * tee_shm_alloc_kernel_buf(), but with the difference that the memory
218 * passing memory not registered in advance.
229 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); in tee_shm_alloc_priv_buf()
237 struct tee_device *teedev = ctx->teedev; in register_shm_helper()
245 return ERR_PTR(-EINVAL); in register_shm_helper()
247 if (!teedev->desc->ops->shm_register || in register_shm_helper()
248 !teedev->desc->ops->shm_unregister) { in register_shm_helper()
249 ret = ERR_PTR(-ENOTSUPP); in register_shm_helper()
257 ret = ERR_PTR(-ENOMEM); in register_shm_helper()
261 refcount_set(&shm->refcount, 1); in register_shm_helper()
262 shm->flags = flags; in register_shm_helper()
263 shm->ctx = ctx; in register_shm_helper()
264 shm->id = id; in register_shm_helper()
267 shm->offset = addr - start; in register_shm_helper()
268 shm->size = length; in register_shm_helper()
269 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; in register_shm_helper()
270 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper()
271 if (!shm->pages) { in register_shm_helper()
272 ret = ERR_PTR(-ENOMEM); in register_shm_helper()
278 shm->pages); in register_shm_helper()
280 rc = shm_get_kernel_pages(start, num_pages, shm->pages); in register_shm_helper()
282 shm->num_pages = rc; in register_shm_helper()
285 rc = -ENOMEM; in register_shm_helper()
290 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, in register_shm_helper()
291 shm->num_pages, start); in register_shm_helper()
300 unpin_user_pages(shm->pages, shm->num_pages); in register_shm_helper()
302 shm_put_kernel_pages(shm->pages, shm->num_pages); in register_shm_helper()
303 kfree(shm->pages); in register_shm_helper()
314 * tee_shm_register_user_buf() - Register a userspace shared memory buffer
315 * @ctx: Context that registers the shared memory
316 * @addr: The userspace address of the shared buffer
317 * @length: Length of the shared buffer
325 struct tee_device *teedev = ctx->teedev; in tee_shm_register_user_buf()
331 return ERR_PTR(-EFAULT); in tee_shm_register_user_buf()
333 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
334 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); in tee_shm_register_user_buf()
335 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
341 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
342 idr_remove(&teedev->idr, id); in tee_shm_register_user_buf()
343 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
347 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
348 ret = idr_replace(&teedev->idr, shm, id); in tee_shm_register_user_buf()
349 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
359 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
361 * @ctx: Context that registers the shared memory
373 return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1); in tee_shm_register_kernel_buf()
379 tee_shm_put(filp->private_data); in tee_shm_fop_release()
385 struct tee_shm *shm = filp->private_data; in tee_shm_fop_mmap()
386 size_t size = vma->vm_end - vma->vm_start; in tee_shm_fop_mmap()
388 /* Refuse sharing shared memory provided by application */ in tee_shm_fop_mmap()
389 if (shm->flags & TEE_SHM_USER_MAPPED) in tee_shm_fop_mmap()
390 return -EINVAL; in tee_shm_fop_mmap()
393 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) in tee_shm_fop_mmap()
394 return -EINVAL; in tee_shm_fop_mmap()
396 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, in tee_shm_fop_mmap()
397 size, vma->vm_page_prot); in tee_shm_fop_mmap()
407 * tee_shm_get_fd() - Increase reference count and return file descriptor
408 * @shm: Shared memory handle
409 * @returns user space file descriptor to shared memory
415 if (shm->id < 0) in tee_shm_get_fd()
416 return -EINVAL; in tee_shm_get_fd()
419 refcount_inc(&shm->refcount); in tee_shm_get_fd()
427 * tee_shm_free() - Free shared memory
428 * @shm: Handle to shared memory to free
437 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
438 * @shm: Shared memory handle
439 * @offs: Offset from start of this shared memory
440 * @returns virtual address of the shared memory + offs if offs is within
441 * the bounds of this shared memory, else an ERR_PTR
445 if (!shm->kaddr) in tee_shm_get_va()
446 return ERR_PTR(-EINVAL); in tee_shm_get_va()
447 if (offs >= shm->size) in tee_shm_get_va()
448 return ERR_PTR(-EINVAL); in tee_shm_get_va()
449 return (char *)shm->kaddr + offs; in tee_shm_get_va()
454 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
455 * @shm: Shared memory handle
456 * @offs: Offset from start of this shared memory
458 * @returns 0 if offs is within the bounds of this shared memory, else an
463 if (offs >= shm->size) in tee_shm_get_pa()
464 return -EINVAL; in tee_shm_get_pa()
466 *pa = shm->paddr + offs; in tee_shm_get_pa()
472 * tee_shm_get_from_id() - Find shared memory object and increase reference
474 * @ctx: Context owning the shared memory
475 * @id: Id of shared memory object
484 return ERR_PTR(-EINVAL); in tee_shm_get_from_id()
486 teedev = ctx->teedev; in tee_shm_get_from_id()
487 mutex_lock(&teedev->mutex); in tee_shm_get_from_id()
488 shm = idr_find(&teedev->idr, id); in tee_shm_get_from_id()
494 if (!shm || shm->ctx != ctx) in tee_shm_get_from_id()
495 shm = ERR_PTR(-EINVAL); in tee_shm_get_from_id()
497 refcount_inc(&shm->refcount); in tee_shm_get_from_id()
498 mutex_unlock(&teedev->mutex); in tee_shm_get_from_id()
504 * tee_shm_put() - Decrease reference count on a shared memory handle
505 * @shm: Shared memory handle
509 struct tee_device *teedev = shm->ctx->teedev; in tee_shm_put()
512 mutex_lock(&teedev->mutex); in tee_shm_put()
513 if (refcount_dec_and_test(&shm->refcount)) { in tee_shm_put()
520 if (shm->id >= 0) in tee_shm_put()
521 idr_remove(&teedev->idr, shm->id); in tee_shm_put()
524 mutex_unlock(&teedev->mutex); in tee_shm_put()