Lines Matching +full:reserved +full:- +full:memory

1 // SPDX-License-Identifier: GPL-2.0+
3 * Contiguous Memory Allocator for DMA mapping framework
4 * Copyright (c) 2010-2011 by Samsung Electronics.
9 * Contiguous Memory Allocator
11 * The Contiguous Memory Allocator (CMA) makes it possible to
12 * allocate big contiguous chunks of memory after the system has
17 * Various devices on embedded systems have no scatter-getter and/or
18 * IO map support and require contiguous blocks of memory to
22 * Such devices often require big memory buffers (a full HD frame
24 * MB of memory), which makes mechanisms such as kmalloc() or
27 * At the same time, a solution where a big memory region is
28 * reserved for a device is suboptimal since often more memory is
29 * reserved then strictly required and, moreover, the memory is
32 * CMA tries to solve this issue by operating on memory regions
34 * can use the memory for pagecache and when device driver requests
51 #include <linux/dma-map-ops.h>
66 * The size can be set in bytes or as a percentage of the total memory
74 static phys_addr_t size_cmdline __initdata = -1;
82 return -EINVAL; in early_cma()
89 if (*p != '-') { in early_cma()
152 pr_debug("%s: reserved %llu MiB on node %d\n", __func__, in dma_pernuma_cma_reserve()
159 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
160 * @limit: End address of the reserved memory (optional, 0 for any).
162 * This function reserves memory from early allocator. It should be
164 * has been activated and all other subsystems have already allocated/reserved
165 * memory.
176 if (size_cmdline != -1) { in dma_contiguous_reserve()
211 * dma_contiguous_reserve_area() - reserve custom contiguous area
212 * @size: Size of the reserved area (in bytes),
213 * @base: Base address of the reserved area optional, use 0 for any
214 * @limit: End address of the reserved memory (optional, 0 for any).
216 * @fixed: hint about where to place the reserved area
218 * This function reserves memory from early allocator. It should be
220 * has been activated and all other subsystems have already allocated/reserved
221 * memory. This function allows to create custom reserved areas for specific
234 "reserved", res_cma); in dma_contiguous_reserve_area()
238 /* Architecture specific contiguous memory fixup. */ in dma_contiguous_reserve_area()
246 * dma_alloc_from_contiguous() - allocate pages from contiguous area
252 * This function allocates memory buffer for specified device. It uses
253 * device specific contiguous memory area if available or the default
267 * dma_release_from_contiguous() - release allocated pages
272 * This function releases memory allocated by dma_alloc_from_contiguous().
290 * dma_alloc_contiguous() - allocate contiguous pages
295 * tries to use device specific contiguous memory area if available, or it
296 * tries to use per-numa cma, if the allocation fails, it will fallback to
299 * Note that it bypass one-page size of allocations from the per-numa and
313 if (dev->cma_area) in dma_alloc_contiguous()
314 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
337 * dma_free_contiguous() - release allocated pages
342 * This function releases memory allocated by dma_alloc_contiguous(). As the
345 * upon a false-return.
352 if (dev->cma_area) { in dma_free_contiguous()
353 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous()
357 * otherwise, page is from either per-numa cma or default cma in dma_free_contiguous()
373 * Support for reserved memory regions defined in device tree
385 dev->cma_area = rmem->priv; in rmem_cma_device_init()
392 dev->cma_area = NULL; in rmem_cma_device_release()
402 unsigned long node = rmem->fdt_node; in rmem_cma_setup()
403 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); in rmem_cma_setup()
407 if (size_cmdline != -1 && default_cma) { in rmem_cma_setup()
408 pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", in rmem_cma_setup()
409 rmem->name); in rmem_cma_setup()
410 return -EBUSY; in rmem_cma_setup()
414 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_cma_setup()
415 return -EINVAL; in rmem_cma_setup()
417 if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) { in rmem_cma_setup()
418 pr_err("Reserved memory: incorrect alignment of CMA region\n"); in rmem_cma_setup()
419 return -EINVAL; in rmem_cma_setup()
422 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); in rmem_cma_setup()
424 pr_err("Reserved memory: unable to setup CMA region\n"); in rmem_cma_setup()
427 /* Architecture specific contiguous memory fixup. */ in rmem_cma_setup()
428 dma_contiguous_early_fixup(rmem->base, rmem->size); in rmem_cma_setup()
433 rmem->ops = &rmem_cma_ops; in rmem_cma_setup()
434 rmem->priv = cma; in rmem_cma_setup()
436 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", in rmem_cma_setup()
437 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_cma_setup()
441 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);