Lines Matching +full:reserved +full:- +full:memory
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Contiguous Memory Allocator
5 * Copyright (c) 2010-2011 by Samsung Electronics.
43 return PFN_PHYS(cma->base_pfn); in cma_get_base()
48 return cma->count << PAGE_SHIFT; in cma_get_size()
53 return cma->name; in cma_get_name()
59 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask()
61 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask()
71 return (cma->base_pfn & ((1UL << align_order) - 1)) in cma_bitmap_aligned_offset()
72 >> cma->order_per_bit; in cma_bitmap_aligned_offset()
78 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
87 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
90 spin_lock_irqsave(&cma->lock, flags); in cma_clear_bitmap()
91 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); in cma_clear_bitmap()
92 spin_unlock_irqrestore(&cma->lock, flags); in cma_clear_bitmap()
97 unsigned long base_pfn = cma->base_pfn, pfn; in cma_activate_area()
100 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); in cma_activate_area()
101 if (!cma->bitmap) in cma_activate_area()
111 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { in cma_activate_area()
117 for (pfn = base_pfn; pfn < base_pfn + cma->count; in cma_activate_area()
121 spin_lock_init(&cma->lock); in cma_activate_area()
124 INIT_HLIST_HEAD(&cma->mem_head); in cma_activate_area()
125 spin_lock_init(&cma->mem_head_lock); in cma_activate_area()
131 bitmap_free(cma->bitmap); in cma_activate_area()
134 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) in cma_activate_area()
136 totalcma_pages -= cma->count; in cma_activate_area()
137 cma->count = 0; in cma_activate_area()
138 pr_err("CMA area %s could not be activated\n", cma->name); in cma_activate_area()
154 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
155 * @base: Base address of the reserved area
156 * @size: Size of the reserved area (in bytes),
163 * This function creates custom contiguous area from already reserved memory.
175 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_init_reserved_mem()
176 return -ENOSPC; in cma_init_reserved_mem()
180 return -EINVAL; in cma_init_reserved_mem()
184 max_t(unsigned long, MAX_ORDER - 1, pageblock_order); in cma_init_reserved_mem()
188 return -EINVAL; in cma_init_reserved_mem()
191 return -EINVAL; in cma_init_reserved_mem()
194 * Each reserved area must be initialised later, when more kernel in cma_init_reserved_mem()
200 snprintf(cma->name, CMA_MAX_NAME, name); in cma_init_reserved_mem()
202 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); in cma_init_reserved_mem()
204 cma->base_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
205 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem()
206 cma->order_per_bit = order_per_bit; in cma_init_reserved_mem()
215 * cma_declare_contiguous_nid() - reserve custom contiguous area
216 * @base: Base address of the reserved area optional, use 0 for any
217 * @size: Size of the reserved area (in bytes),
218 * @limit: End address of the reserved memory (optional, 0 for any).
221 * @fixed: hint about where to place the reserved area
226 * This function reserves memory from early allocator. It should be
228 * has been activated and all other subsystems have already allocated/reserved
229 * memory. This function allows to create custom reserved areas.
250 highmem_start = __pa(high_memory - 1) + 1; in cma_declare_contiguous_nid()
255 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_declare_contiguous_nid()
256 return -ENOSPC; in cma_declare_contiguous_nid()
260 return -EINVAL; in cma_declare_contiguous_nid()
263 return -EINVAL; in cma_declare_contiguous_nid()
269 * you couldn't get a contiguous memory, which is not what we want. in cma_declare_contiguous_nid()
272 max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); in cma_declare_contiguous_nid()
273 if (fixed && base & (alignment - 1)) { in cma_declare_contiguous_nid()
274 ret = -EINVAL; in cma_declare_contiguous_nid()
281 limit &= ~(alignment - 1); in cma_declare_contiguous_nid()
288 return -EINVAL; in cma_declare_contiguous_nid()
292 * low/high memory boundary. in cma_declare_contiguous_nid()
295 ret = -EINVAL; in cma_declare_contiguous_nid()
296 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", in cma_declare_contiguous_nid()
310 ret = -EINVAL; in cma_declare_contiguous_nid()
316 /* Reserve memory */ in cma_declare_contiguous_nid()
320 ret = -EBUSY; in cma_declare_contiguous_nid()
327 * All pages in the reserved area must come from the same zone. in cma_declare_contiguous_nid()
328 * If the requested region crosses the low/high memory boundary, in cma_declare_contiguous_nid()
329 * try allocating from high memory first and fall back to low in cma_declare_contiguous_nid()
330 * memory in case of failure. in cma_declare_contiguous_nid()
339 * If there is enough memory, try a bottom-up allocation first. in cma_declare_contiguous_nid()
359 ret = -ENOMEM; in cma_declare_contiguous_nid()
376 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, in cma_declare_contiguous_nid()
395 spin_lock_irq(&cma->lock); in cma_debug_show_areas()
398 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); in cma_debug_show_areas()
401 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); in cma_debug_show_areas()
402 nr_zero = next_set_bit - next_zero_bit; in cma_debug_show_areas()
403 nr_part = nr_zero << cma->order_per_bit; in cma_debug_show_areas()
409 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); in cma_debug_show_areas()
410 spin_unlock_irq(&cma->lock); in cma_debug_show_areas()
417 * cma_alloc() - allocate pages from contiguous area
418 * @cma: Contiguous memory region for which the allocation is performed.
423 * This function allocates part of contiguous memory on specific
424 * contiguous memory area.
430 unsigned long pfn = -1; in cma_alloc()
435 int ret = -ENOMEM; in cma_alloc()
437 if (!cma || !cma->count || !cma->bitmap) in cma_alloc()
446 trace_cma_alloc_start(cma->name, count, align); in cma_alloc()
457 spin_lock_irq(&cma->lock); in cma_alloc()
458 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, in cma_alloc()
462 spin_unlock_irq(&cma->lock); in cma_alloc()
465 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); in cma_alloc()
471 spin_unlock_irq(&cma->lock); in cma_alloc()
473 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); in cma_alloc()
483 if (ret != -EBUSY) in cma_alloc()
486 pr_debug("%s(): memory range at %p is busy, retrying\n", in cma_alloc()
489 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), in cma_alloc()
491 /* try again with a bit different memory target */ in cma_alloc()
495 trace_cma_alloc_finish(cma->name, pfn, page, count, align); in cma_alloc()
508 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", in cma_alloc()
509 __func__, cma->name, count, ret); in cma_alloc()
528 * cma_release() - release allocated pages
529 * @cma: Contiguous memory region for which the allocation is performed.
533 * This function releases memory allocated by cma_alloc().
549 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) in cma_release()
552 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); in cma_release()
556 trace_cma_release(cma->name, pfn, pages, count); in cma_release()