Lines Matching +full:non +full:- +full:continuous

1 // SPDX-License-Identifier: GPL-2.0-or-later
11 #include <linux/dma-mapping.h>
22 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
26 if (!dmab->dev.dev) in snd_mem_get_gfp_flags()
29 return (__force gfp_t)(unsigned long)dmab->dev.dev; in snd_mem_get_gfp_flags()
36 if (WARN_ON_ONCE(!ops || !ops->alloc)) in __snd_dma_alloc_pages()
38 return ops->alloc(dmab, size); in __snd_dma_alloc_pages()
42 * snd_dma_alloc_pages - allocate the buffer area according to the given type
48 * Calls the memory-allocator function for the corresponding
58 return -ENXIO; in snd_dma_alloc_pages()
60 return -ENXIO; in snd_dma_alloc_pages()
63 dmab->dev.type = type; in snd_dma_alloc_pages()
64 dmab->dev.dev = device; in snd_dma_alloc_pages()
65 dmab->bytes = 0; in snd_dma_alloc_pages()
66 dmab->addr = 0; in snd_dma_alloc_pages()
67 dmab->private_data = NULL; in snd_dma_alloc_pages()
68 dmab->area = __snd_dma_alloc_pages(dmab, size); in snd_dma_alloc_pages()
69 if (!dmab->area) in snd_dma_alloc_pages()
70 return -ENOMEM; in snd_dma_alloc_pages()
71 dmab->bytes = size; in snd_dma_alloc_pages()
77 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
83 * Calls the memory-allocator function for the corresponding
97 if (err != -ENOMEM) in snd_dma_alloc_pages_fallback()
100 return -ENOMEM; in snd_dma_alloc_pages_fallback()
104 if (! dmab->area) in snd_dma_alloc_pages_fallback()
105 return -ENOMEM; in snd_dma_alloc_pages_fallback()
111 * snd_dma_free_pages - release the allocated buffer
120 if (ops && ops->free) in snd_dma_free_pages()
121 ops->free(dmab); in snd_dma_free_pages()
132 * snd_devm_alloc_pages - allocate the buffer and manage with devres
172 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
181 if (ops && ops->mmap) in snd_dma_buffer_mmap()
182 return ops->mmap(dmab, area); in snd_dma_buffer_mmap()
184 return -ENOENT; in snd_dma_buffer_mmap()
189 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
197 if (ops && ops->get_addr) in snd_sgbuf_get_addr()
198 return ops->get_addr(dmab, offset); in snd_sgbuf_get_addr()
200 return dmab->addr + offset; in snd_sgbuf_get_addr()
205 * snd_sgbuf_get_page - return the physical page at the corresponding offset
213 if (ops && ops->get_page) in snd_sgbuf_get_page()
214 return ops->get_page(dmab, offset); in snd_sgbuf_get_page()
216 return virt_to_page(dmab->area + offset); in snd_sgbuf_get_page()
221 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
222 * on sg-buffer
232 if (ops && ops->get_chunk_size) in snd_sgbuf_get_chunk_size()
233 return ops->get_chunk_size(dmab, ofs, size); in snd_sgbuf_get_chunk_size()
240 * Continuous pages allocator
248 dmab->addr = page_to_phys(virt_to_page(p)); in snd_dma_continuous_alloc()
254 free_pages_exact(dmab->area, dmab->bytes); in snd_dma_continuous_free()
260 return remap_pfn_range(area, area->vm_start, in snd_dma_continuous_mmap()
261 dmab->addr >> PAGE_SHIFT, in snd_dma_continuous_mmap()
262 area->vm_end - area->vm_start, in snd_dma_continuous_mmap()
263 area->vm_page_prot); in snd_dma_continuous_mmap()
284 vfree(dmab->area); in snd_dma_vmalloc_free()
290 return remap_vmalloc_range(area, dmab->area, 0); in snd_dma_vmalloc_mmap()
294 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
305 return vmalloc_to_page(dmab->area + offset); in snd_dma_vmalloc_get_page()
316 end = ofs + size - 1; /* the last byte address */ in snd_dma_vmalloc_get_chunk_size()
325 return start - ofs; in snd_dma_vmalloc_get_chunk_size()
327 /* ok, all on continuous pages */ in snd_dma_vmalloc_get_chunk_size()
347 struct device *dev = dmab->dev.dev; in snd_dma_iram_alloc()
351 if (dev->of_node) { in snd_dma_iram_alloc()
352 pool = of_gen_pool_get(dev->of_node, "iram", 0); in snd_dma_iram_alloc()
354 dmab->private_data = pool; in snd_dma_iram_alloc()
356 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); in snd_dma_iram_alloc()
364 dmab->dev.type = SNDRV_DMA_TYPE_DEV; in snd_dma_iram_alloc()
370 struct gen_pool *pool = dmab->private_data; in snd_dma_iram_free()
372 if (pool && dmab->area) in snd_dma_iram_free()
373 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); in snd_dma_iram_free()
379 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_iram_mmap()
380 return remap_pfn_range(area, area->vm_start, in snd_dma_iram_mmap()
381 dmab->addr >> PAGE_SHIFT, in snd_dma_iram_mmap()
382 area->vm_end - area->vm_start, in snd_dma_iram_mmap()
383 area->vm_page_prot); in snd_dma_iram_mmap()
396 __GFP_NORETRY | /* don't trigger OOM-killer */ \
397 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
406 p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_dev_alloc()
408 if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) in snd_dma_dev_alloc()
417 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) in snd_dma_dev_free()
418 set_memory_wb((unsigned long)dmab->area, in snd_dma_dev_free()
419 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); in snd_dma_dev_free()
421 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_dev_free()
428 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) in snd_dma_dev_mmap()
429 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_dev_mmap()
431 return dma_mmap_coherent(dmab->dev.dev, area, in snd_dma_dev_mmap()
432 dmab->area, dmab->addr, dmab->bytes); in snd_dma_dev_mmap()
442 * Write-combined pages
450 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_wc_alloc()
455 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_wc_free()
461 return dma_mmap_wc(dmab->dev.dev, area, in snd_dma_wc_mmap()
462 dmab->area, dmab->addr, dmab->bytes); in snd_dma_wc_mmap()
494 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || in snd_dma_get_ops()
495 dmab->dev.type >= ARRAY_SIZE(dma_ops))) in snd_dma_get_ops()
497 return dma_ops[dmab->dev.type]; in snd_dma_get_ops()