Lines Matching +full:scatter +full:- +full:gather
1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-buf.h>
47 if (dev->driver->gem_create_object) in __drm_gem_shmem_create()
48 obj = dev->driver->gem_create_object(dev, size); in __drm_gem_shmem_create()
52 return ERR_PTR(-ENOMEM); in __drm_gem_shmem_create()
56 if (!obj->funcs) in __drm_gem_shmem_create()
57 obj->funcs = &drm_gem_shmem_funcs; in __drm_gem_shmem_create()
61 shmem->map_wc = false; /* dma-buf mappings use always writecombine */ in __drm_gem_shmem_create()
72 mutex_init(&shmem->pages_lock); in __drm_gem_shmem_create()
73 mutex_init(&shmem->vmap_lock); in __drm_gem_shmem_create()
74 INIT_LIST_HEAD(&shmem->madv_list); in __drm_gem_shmem_create()
84 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | in __drm_gem_shmem_create()
98 * drm_gem_shmem_create - Allocate an object with the given size
105 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
115 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
126 WARN_ON(shmem->vmap_use_count); in drm_gem_shmem_free_object()
128 if (obj->import_attach) { in drm_gem_shmem_free_object()
129 drm_prime_gem_destroy(obj, shmem->sgt); in drm_gem_shmem_free_object()
131 if (shmem->sgt) { in drm_gem_shmem_free_object()
132 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, in drm_gem_shmem_free_object()
134 sg_free_table(shmem->sgt); in drm_gem_shmem_free_object()
135 kfree(shmem->sgt); in drm_gem_shmem_free_object()
137 if (shmem->pages) in drm_gem_shmem_free_object()
141 WARN_ON(shmem->pages_use_count); in drm_gem_shmem_free_object()
144 mutex_destroy(&shmem->pages_lock); in drm_gem_shmem_free_object()
145 mutex_destroy(&shmem->vmap_lock); in drm_gem_shmem_free_object()
152 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_get_pages_locked()
155 if (shmem->pages_use_count++ > 0) in drm_gem_shmem_get_pages_locked()
161 shmem->pages_use_count = 0; in drm_gem_shmem_get_pages_locked()
165 shmem->pages = pages; in drm_gem_shmem_get_pages_locked()
171 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
184 WARN_ON(shmem->base.import_attach); in drm_gem_shmem_get_pages()
186 ret = mutex_lock_interruptible(&shmem->pages_lock); in drm_gem_shmem_get_pages()
190 mutex_unlock(&shmem->pages_lock); in drm_gem_shmem_get_pages()
198 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_put_pages_locked()
200 if (WARN_ON_ONCE(!shmem->pages_use_count)) in drm_gem_shmem_put_pages_locked()
203 if (--shmem->pages_use_count > 0) in drm_gem_shmem_put_pages_locked()
206 drm_gem_put_pages(obj, shmem->pages, in drm_gem_shmem_put_pages_locked()
207 shmem->pages_mark_dirty_on_put, in drm_gem_shmem_put_pages_locked()
208 shmem->pages_mark_accessed_on_put); in drm_gem_shmem_put_pages_locked()
209 shmem->pages = NULL; in drm_gem_shmem_put_pages_locked()
213 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
220 mutex_lock(&shmem->pages_lock); in drm_gem_shmem_put_pages()
222 mutex_unlock(&shmem->pages_lock); in drm_gem_shmem_put_pages()
227 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
241 WARN_ON(shmem->base.import_attach); in drm_gem_shmem_pin()
248 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
258 WARN_ON(shmem->base.import_attach); in drm_gem_shmem_unpin()
266 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_vmap_locked()
269 if (shmem->vmap_use_count++ > 0) { in drm_gem_shmem_vmap_locked()
270 dma_buf_map_set_vaddr(map, shmem->vaddr); in drm_gem_shmem_vmap_locked()
274 if (obj->import_attach) { in drm_gem_shmem_vmap_locked()
275 ret = dma_buf_vmap(obj->import_attach->dmabuf, map); in drm_gem_shmem_vmap_locked()
277 if (WARN_ON(map->is_iomem)) { in drm_gem_shmem_vmap_locked()
278 ret = -EIO; in drm_gem_shmem_vmap_locked()
281 shmem->vaddr = map->vaddr; in drm_gem_shmem_vmap_locked()
290 if (shmem->map_wc) in drm_gem_shmem_vmap_locked()
292 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, in drm_gem_shmem_vmap_locked()
294 if (!shmem->vaddr) in drm_gem_shmem_vmap_locked()
295 ret = -ENOMEM; in drm_gem_shmem_vmap_locked()
297 dma_buf_map_set_vaddr(map, shmem->vaddr); in drm_gem_shmem_vmap_locked()
308 if (!obj->import_attach) in drm_gem_shmem_vmap_locked()
311 shmem->vmap_use_count = 0; in drm_gem_shmem_vmap_locked()
317 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
327 * differences between dma-buf imported and natively allocated objects.
339 ret = mutex_lock_interruptible(&shmem->vmap_lock); in drm_gem_shmem_vmap()
343 mutex_unlock(&shmem->vmap_lock); in drm_gem_shmem_vmap()
352 struct drm_gem_object *obj = &shmem->base; in drm_gem_shmem_vunmap_locked()
354 if (WARN_ON_ONCE(!shmem->vmap_use_count)) in drm_gem_shmem_vunmap_locked()
357 if (--shmem->vmap_use_count > 0) in drm_gem_shmem_vunmap_locked()
360 if (obj->import_attach) { in drm_gem_shmem_vunmap_locked()
361 dma_buf_vunmap(obj->import_attach->dmabuf, map); in drm_gem_shmem_vunmap_locked()
363 vunmap(shmem->vaddr); in drm_gem_shmem_vunmap_locked()
367 shmem->vaddr = NULL; in drm_gem_shmem_vunmap_locked()
371 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
381 * differences between dma-buf imported and natively allocated objects.
387 mutex_lock(&shmem->vmap_lock); in drm_gem_shmem_vunmap()
389 mutex_unlock(&shmem->vmap_lock); in drm_gem_shmem_vunmap()
409 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); in drm_gem_shmem_create_with_handle()
410 /* drop reference from allocate - handle holds it now. */ in drm_gem_shmem_create_with_handle()
411 drm_gem_object_put(&shmem->base); in drm_gem_shmem_create_with_handle()
420 * false or -errno.
426 mutex_lock(&shmem->pages_lock); in drm_gem_shmem_madvise()
428 if (shmem->madv >= 0) in drm_gem_shmem_madvise()
429 shmem->madv = madv; in drm_gem_shmem_madvise()
431 madv = shmem->madv; in drm_gem_shmem_madvise()
433 mutex_unlock(&shmem->pages_lock); in drm_gem_shmem_madvise()
441 struct drm_device *dev = obj->dev; in drm_gem_shmem_purge_locked()
446 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); in drm_gem_shmem_purge_locked()
447 sg_free_table(shmem->sgt); in drm_gem_shmem_purge_locked()
448 kfree(shmem->sgt); in drm_gem_shmem_purge_locked()
449 shmem->sgt = NULL; in drm_gem_shmem_purge_locked()
453 shmem->madv = -1; in drm_gem_shmem_purge_locked()
455 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); in drm_gem_shmem_purge_locked()
463 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); in drm_gem_shmem_purge_locked()
465 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, in drm_gem_shmem_purge_locked()
466 0, (loff_t)-1); in drm_gem_shmem_purge_locked()
474 if (!mutex_trylock(&shmem->pages_lock)) in drm_gem_shmem_purge()
477 mutex_unlock(&shmem->pages_lock); in drm_gem_shmem_purge()
484 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
503 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); in drm_gem_shmem_dumb_create()
506 if (!args->pitch || !args->size) { in drm_gem_shmem_dumb_create()
507 args->pitch = min_pitch; in drm_gem_shmem_dumb_create()
508 args->size = PAGE_ALIGN(args->pitch * args->height); in drm_gem_shmem_dumb_create()
511 if (args->pitch < min_pitch) in drm_gem_shmem_dumb_create()
512 args->pitch = min_pitch; in drm_gem_shmem_dumb_create()
513 if (args->size < args->pitch * args->height) in drm_gem_shmem_dumb_create()
514 args->size = PAGE_ALIGN(args->pitch * args->height); in drm_gem_shmem_dumb_create()
517 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); in drm_gem_shmem_dumb_create()
525 struct vm_area_struct *vma = vmf->vma; in drm_gem_shmem_fault()
526 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_fault()
528 loff_t num_pages = obj->size >> PAGE_SHIFT; in drm_gem_shmem_fault()
533 /* We don't use vmf->pgoff since that has the fake offset */ in drm_gem_shmem_fault()
534 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in drm_gem_shmem_fault()
536 mutex_lock(&shmem->pages_lock); in drm_gem_shmem_fault()
539 WARN_ON_ONCE(!shmem->pages) || in drm_gem_shmem_fault()
540 shmem->madv < 0) { in drm_gem_shmem_fault()
543 page = shmem->pages[page_offset]; in drm_gem_shmem_fault()
545 ret = vmf_insert_page(vma, vmf->address, page); in drm_gem_shmem_fault()
548 mutex_unlock(&shmem->pages_lock); in drm_gem_shmem_fault()
555 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_vm_open()
559 WARN_ON(shmem->base.import_attach); in drm_gem_shmem_vm_open()
569 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_shmem_vm_close()
583 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
599 if (obj->import_attach) { in drm_gem_shmem_mmap()
602 vma->vm_private_data = NULL; in drm_gem_shmem_mmap()
604 return dma_buf_mmap(obj->dma_buf, vma, 0); in drm_gem_shmem_mmap()
615 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; in drm_gem_shmem_mmap()
616 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in drm_gem_shmem_mmap()
617 if (shmem->map_wc) in drm_gem_shmem_mmap()
618 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in drm_gem_shmem_mmap()
619 vma->vm_ops = &drm_gem_shmem_vm_ops; in drm_gem_shmem_mmap()
626 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
638 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); in drm_gem_shmem_print_info()
639 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); in drm_gem_shmem_print_info()
640 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); in drm_gem_shmem_print_info()
645 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
649 * This function exports a scatter/gather table suitable for PRIME usage by
654 * Drivers who need to acquire an scatter/gather table for objects need to call
658 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
664 WARN_ON(shmem->base.import_attach); in drm_gem_shmem_get_sg_table()
666 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); in drm_gem_shmem_get_sg_table()
671 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
672 * scatter/gather table for a shmem GEM object.
675 * This function returns a scatter/gather table suitable for driver usage. If
676 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
680 * and difference between dma-buf imported and natively allocated objects.
684 * A pointer to the scatter/gather table of pinned pages or errno on failure.
692 if (shmem->sgt) in drm_gem_shmem_get_pages_sgt()
693 return shmem->sgt; in drm_gem_shmem_get_pages_sgt()
695 WARN_ON(obj->import_attach); in drm_gem_shmem_get_pages_sgt()
701 sgt = drm_gem_shmem_get_sg_table(&shmem->base); in drm_gem_shmem_get_pages_sgt()
707 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); in drm_gem_shmem_get_pages_sgt()
711 shmem->sgt = sgt; in drm_gem_shmem_get_pages_sgt()
725 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
726 * another driver's scatter/gather table of pinned pages
728 * @attach: DMA-BUF attachment
729 * @sgt: Scatter/gather table of pinned pages
731 * This function imports a scatter/gather table exported via DMA-BUF by
736 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
744 size_t size = PAGE_ALIGN(attach->dmabuf->size); in drm_gem_shmem_prime_import_sg_table()
751 shmem->sgt = sgt; in drm_gem_shmem_prime_import_sg_table()
755 return &shmem->base; in drm_gem_shmem_prime_import_sg_table()