Lines Matching +full:non +full:- +full:continuous

1 // SPDX-License-Identifier: GPL-2.0-or-later
11 #include <linux/dma-mapping.h>
12 #include <linux/dma-map-ops.h>
25 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
26 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
38 if (WARN_ON_ONCE(!ops || !ops->alloc)) in __snd_dma_alloc_pages()
40 return ops->alloc(dmab, size); in __snd_dma_alloc_pages()
44 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
52 * Calls the memory-allocator function for the corresponding
63 return -ENXIO; in snd_dma_alloc_dir_pages()
65 return -ENXIO; in snd_dma_alloc_dir_pages()
68 dmab->dev.type = type; in snd_dma_alloc_dir_pages()
69 dmab->dev.dev = device; in snd_dma_alloc_dir_pages()
70 dmab->dev.dir = dir; in snd_dma_alloc_dir_pages()
71 dmab->bytes = 0; in snd_dma_alloc_dir_pages()
72 dmab->addr = 0; in snd_dma_alloc_dir_pages()
73 dmab->private_data = NULL; in snd_dma_alloc_dir_pages()
74 dmab->area = __snd_dma_alloc_pages(dmab, size); in snd_dma_alloc_dir_pages()
75 if (!dmab->area) in snd_dma_alloc_dir_pages()
76 return -ENOMEM; in snd_dma_alloc_dir_pages()
77 dmab->bytes = size; in snd_dma_alloc_dir_pages()
83 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
89 * Calls the memory-allocator function for the corresponding
103 if (err != -ENOMEM) in snd_dma_alloc_pages_fallback()
106 return -ENOMEM; in snd_dma_alloc_pages_fallback()
110 if (! dmab->area) in snd_dma_alloc_pages_fallback()
111 return -ENOMEM; in snd_dma_alloc_pages_fallback()
117 * snd_dma_free_pages - release the allocated buffer
126 if (ops && ops->free) in snd_dma_free_pages()
127 ops->free(dmab); in snd_dma_free_pages()
138 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
180 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
192 return -ENOENT; in snd_dma_buffer_mmap()
194 if (ops && ops->mmap) in snd_dma_buffer_mmap()
195 return ops->mmap(dmab, area); in snd_dma_buffer_mmap()
197 return -ENOENT; in snd_dma_buffer_mmap()
203 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
212 if (!dmab || !dmab->dev.need_sync) in snd_dma_buffer_sync()
215 if (ops && ops->sync) in snd_dma_buffer_sync()
216 ops->sync(dmab, mode); in snd_dma_buffer_sync()
222 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
232 if (ops && ops->get_addr) in snd_sgbuf_get_addr()
233 return ops->get_addr(dmab, offset); in snd_sgbuf_get_addr()
235 return dmab->addr + offset; in snd_sgbuf_get_addr()
240 * snd_sgbuf_get_page - return the physical page at the corresponding offset
250 if (ops && ops->get_page) in snd_sgbuf_get_page()
251 return ops->get_page(dmab, offset); in snd_sgbuf_get_page()
253 return virt_to_page(dmab->area + offset); in snd_sgbuf_get_page()
258 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
259 * on sg-buffer
271 if (ops && ops->get_chunk_size) in snd_sgbuf_get_chunk_size()
272 return ops->get_chunk_size(dmab, ofs, size); in snd_sgbuf_get_chunk_size()
279 * Continuous pages allocator
294 if ((*addr + size - 1) & ~dev->coherent_dma_mask) { in do_alloc_pages()
323 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false); in snd_dma_continuous_alloc()
328 do_free_pages(dmab->area, dmab->bytes, false); in snd_dma_continuous_free()
334 return remap_pfn_range(area, area->vm_start, in snd_dma_continuous_mmap()
335 dmab->addr >> PAGE_SHIFT, in snd_dma_continuous_mmap()
336 area->vm_end - area->vm_start, in snd_dma_continuous_mmap()
337 area->vm_page_prot); in snd_dma_continuous_mmap()
356 vfree(dmab->area); in snd_dma_vmalloc_free()
362 return remap_vmalloc_range(area, dmab->area, 0); in snd_dma_vmalloc_mmap()
366 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
377 return vmalloc_to_page(dmab->area + offset); in snd_dma_vmalloc_get_page()
388 end = ofs + size - 1; /* the last byte address */ in snd_dma_vmalloc_get_chunk_size()
397 return start - ofs; in snd_dma_vmalloc_get_chunk_size()
399 /* ok, all on continuous pages */ in snd_dma_vmalloc_get_chunk_size()
419 struct device *dev = dmab->dev.dev; in snd_dma_iram_alloc()
423 if (dev->of_node) { in snd_dma_iram_alloc()
424 pool = of_gen_pool_get(dev->of_node, "iram", 0); in snd_dma_iram_alloc()
426 dmab->private_data = pool; in snd_dma_iram_alloc()
428 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); in snd_dma_iram_alloc()
436 dmab->dev.type = SNDRV_DMA_TYPE_DEV; in snd_dma_iram_alloc()
442 struct gen_pool *pool = dmab->private_data; in snd_dma_iram_free()
444 if (pool && dmab->area) in snd_dma_iram_free()
445 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); in snd_dma_iram_free()
451 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_iram_mmap()
452 return remap_pfn_range(area, area->vm_start, in snd_dma_iram_mmap()
453 dmab->addr >> PAGE_SHIFT, in snd_dma_iram_mmap()
454 area->vm_end - area->vm_start, in snd_dma_iram_mmap()
455 area->vm_page_prot); in snd_dma_iram_mmap()
470 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_dev_alloc()
475 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_dev_free()
481 return dma_mmap_coherent(dmab->dev.dev, area, in snd_dma_dev_mmap()
482 dmab->area, dmab->addr, dmab->bytes); in snd_dma_dev_mmap()
492 * Write-combined pages
494 /* x86-specific allocations */
498 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true); in snd_dma_wc_alloc()
503 do_free_pages(dmab->area, dmab->bytes, true); in snd_dma_wc_free()
509 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_wc_mmap()
515 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_wc_alloc()
520 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_wc_free()
526 return dma_mmap_wc(dmab->dev.dev, area, in snd_dma_wc_mmap()
527 dmab->area, dmab->addr, dmab->bytes); in snd_dma_wc_mmap()
538 * Non-contiguous pages allocator
545 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, in snd_dma_noncontig_alloc()
548 if (!sgt && !get_dma_ops(dmab->dev.dev)) { in snd_dma_noncontig_alloc()
549 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) in snd_dma_noncontig_alloc()
550 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK; in snd_dma_noncontig_alloc()
552 dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK; in snd_dma_noncontig_alloc()
559 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, in snd_dma_noncontig_alloc()
560 sg_dma_address(sgt->sgl)); in snd_dma_noncontig_alloc()
561 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); in snd_dma_noncontig_alloc()
563 dmab->private_data = sgt; in snd_dma_noncontig_alloc()
565 dmab->addr = snd_sgbuf_get_addr(dmab, 0); in snd_dma_noncontig_alloc()
567 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); in snd_dma_noncontig_alloc()
574 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); in snd_dma_noncontig_free()
575 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, in snd_dma_noncontig_free()
576 dmab->dev.dir); in snd_dma_noncontig_free()
582 return dma_mmap_noncontiguous(dmab->dev.dev, area, in snd_dma_noncontig_mmap()
583 dmab->bytes, dmab->private_data); in snd_dma_noncontig_mmap()
590 if (dmab->dev.dir == DMA_TO_DEVICE) in snd_dma_noncontig_sync()
592 invalidate_kernel_vmap_range(dmab->area, dmab->bytes); in snd_dma_noncontig_sync()
593 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, in snd_dma_noncontig_sync()
594 dmab->dev.dir); in snd_dma_noncontig_sync()
596 if (dmab->dev.dir == DMA_FROM_DEVICE) in snd_dma_noncontig_sync()
598 flush_kernel_vmap_range(dmab->area, dmab->bytes); in snd_dma_noncontig_sync()
599 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, in snd_dma_noncontig_sync()
600 dmab->dev.dir); in snd_dma_noncontig_sync()
608 struct sg_table *sgt = dmab->private_data; in snd_dma_noncontig_iter_set()
610 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, in snd_dma_noncontig_iter_set()
643 end = ofs + size - 1; /* the last byte address */ in snd_dma_noncontig_get_chunk_size()
656 return start - ofs; in snd_dma_noncontig_get_chunk_size()
658 /* ok, all on continuous pages */ in snd_dma_noncontig_get_chunk_size()
672 /* x86-specific SG-buffer with WC pages */
679 struct sg_table *sgt = dmab->private_data; in snd_dma_sg_wc_alloc()
684 if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG) in snd_dma_sg_wc_alloc()
693 struct sg_table *sgt = dmab->private_data; in snd_dma_sg_wc_free()
704 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_sg_wc_mmap()
705 return dma_mmap_noncontiguous(dmab->dev.dev, area, in snd_dma_sg_wc_mmap()
706 dmab->bytes, dmab->private_data); in snd_dma_sg_wc_mmap()
719 /* Fallback SG-buffer allocations for x86 */
729 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK; in __snd_dma_sg_fallback_free()
732 for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++) in __snd_dma_sg_fallback_free()
733 do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc); in __snd_dma_sg_fallback_free()
734 kvfree(sgbuf->pages); in __snd_dma_sg_fallback_free()
735 kvfree(sgbuf->addrs); in __snd_dma_sg_fallback_free()
745 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK; in snd_dma_sg_fallback_alloc()
754 sgbuf->pages = pages; in snd_dma_sg_fallback_alloc()
755 sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL); in snd_dma_sg_fallback_alloc()
756 if (!sgbuf->addrs) in snd_dma_sg_fallback_alloc()
759 for (i = 0; i < count; sgbuf->count++, i++) { in snd_dma_sg_fallback_alloc()
760 p = do_alloc_pages(dmab->dev.dev, PAGE_SIZE, &sgbuf->addrs[i], wc); in snd_dma_sg_fallback_alloc()
763 sgbuf->pages[i] = virt_to_page(p); in snd_dma_sg_fallback_alloc()
769 dmab->private_data = sgbuf; in snd_dma_sg_fallback_alloc()
771 dmab->addr = snd_sgbuf_get_addr(dmab, 0); in snd_dma_sg_fallback_alloc()
781 vunmap(dmab->area); in snd_dma_sg_fallback_free()
782 __snd_dma_sg_fallback_free(dmab, dmab->private_data); in snd_dma_sg_fallback_free()
788 struct snd_dma_sg_fallback *sgbuf = dmab->private_data; in snd_dma_sg_fallback_mmap()
790 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) in snd_dma_sg_fallback_mmap()
791 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_sg_fallback_mmap()
792 return vm_map_pages(area, sgbuf->pages, sgbuf->count); in snd_dma_sg_fallback_mmap()
807 * Non-coherent pages allocator
813 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, in snd_dma_noncoherent_alloc()
814 dmab->dev.dir, DEFAULT_GFP); in snd_dma_noncoherent_alloc()
816 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); in snd_dma_noncoherent_alloc()
822 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, in snd_dma_noncoherent_free()
823 dmab->addr, dmab->dev.dir); in snd_dma_noncoherent_free()
829 area->vm_page_prot = vm_get_page_prot(area->vm_flags); in snd_dma_noncoherent_mmap()
830 return dma_mmap_pages(dmab->dev.dev, area, in snd_dma_noncoherent_mmap()
831 area->vm_end - area->vm_start, in snd_dma_noncoherent_mmap()
832 virt_to_page(dmab->area)); in snd_dma_noncoherent_mmap()
839 if (dmab->dev.dir != DMA_TO_DEVICE) in snd_dma_noncoherent_sync()
840 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, in snd_dma_noncoherent_sync()
841 dmab->bytes, dmab->dev.dir); in snd_dma_noncoherent_sync()
843 if (dmab->dev.dir != DMA_FROM_DEVICE) in snd_dma_noncoherent_sync()
844 dma_sync_single_for_device(dmab->dev.dev, dmab->addr, in snd_dma_noncoherent_sync()
845 dmab->bytes, dmab->dev.dir); in snd_dma_noncoherent_sync()
886 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || in snd_dma_get_ops()
887 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) in snd_dma_get_ops()
889 return snd_dma_ops[dmab->dev.type]; in snd_dma_get_ops()