1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contiguous Memory Allocator
4  *
5  * Copyright (c) 2010-2011 by Samsung Electronics.
6  * Copyright IBM Corporation, 2013
7  * Copyright LG Electronics Inc., 2014
8  * Written by:
9  *	Marek Szyprowski <m.szyprowski@samsung.com>
10  *	Michal Nazarewicz <mina86@mina86.com>
11  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13  */
14 
15 #define pr_fmt(fmt) "cma: " fmt
16 
17 #ifdef CONFIG_CMA_DEBUG
18 #ifndef DEBUG
19 #  define DEBUG
20 #endif
21 #endif
22 #define CREATE_TRACE_POINTS
23 
24 #include <linux/memblock.h>
25 #include <linux/err.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <linux/sizes.h>
29 #include <linux/slab.h>
30 #include <linux/log2.h>
31 #include <linux/cma.h>
32 #include <linux/highmem.h>
33 #include <linux/io.h>
34 #include <linux/kmemleak.h>
35 #include <trace/events/cma.h>
36 
37 #include "cma.h"
38 
39 struct cma cma_areas[MAX_CMA_AREAS];
40 unsigned cma_area_count;
41 static DEFINE_MUTEX(cma_mutex);
42 
cma_get_base(const struct cma * cma)43 phys_addr_t cma_get_base(const struct cma *cma)
44 {
45 	return PFN_PHYS(cma->base_pfn);
46 }
47 
cma_get_size(const struct cma * cma)48 unsigned long cma_get_size(const struct cma *cma)
49 {
50 	return cma->count << PAGE_SHIFT;
51 }
52 
cma_get_name(const struct cma * cma)53 const char *cma_get_name(const struct cma *cma)
54 {
55 	return cma->name ? cma->name : "(undefined)";
56 }
57 
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)58 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 					     unsigned int align_order)
60 {
61 	if (align_order <= cma->order_per_bit)
62 		return 0;
63 	return (1UL << (align_order - cma->order_per_bit)) - 1;
64 }
65 
66 /*
67  * Find the offset of the base PFN from the specified align_order.
68  * The value returned is represented in order_per_bits.
69  */
cma_bitmap_aligned_offset(const struct cma * cma,unsigned int align_order)70 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 					       unsigned int align_order)
72 {
73 	return (cma->base_pfn & ((1UL << align_order) - 1))
74 		>> cma->order_per_bit;
75 }
76 
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)77 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78 					      unsigned long pages)
79 {
80 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81 }
82 
cma_clear_bitmap(struct cma * cma,unsigned long pfn,unsigned int count)83 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
84 			     unsigned int count)
85 {
86 	unsigned long bitmap_no, bitmap_count;
87 
88 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
89 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
90 
91 	mutex_lock(&cma->lock);
92 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
93 	mutex_unlock(&cma->lock);
94 }
95 
cma_activate_area(struct cma * cma)96 static int __init cma_activate_area(struct cma *cma)
97 {
98 	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
99 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
100 	unsigned i = cma->count >> pageblock_order;
101 	struct zone *zone;
102 
103 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
104 
105 	if (!cma->bitmap) {
106 		cma->count = 0;
107 		return -ENOMEM;
108 	}
109 
110 	WARN_ON_ONCE(!pfn_valid(pfn));
111 	zone = page_zone(pfn_to_page(pfn));
112 
113 	do {
114 		unsigned j;
115 
116 		base_pfn = pfn;
117 		for (j = pageblock_nr_pages; j; --j, pfn++) {
118 			WARN_ON_ONCE(!pfn_valid(pfn));
119 			/*
120 			 * alloc_contig_range requires the pfn range
121 			 * specified to be in the same zone. Make this
122 			 * simple by forcing the entire CMA resv range
123 			 * to be in the same zone.
124 			 */
125 			if (page_zone(pfn_to_page(pfn)) != zone)
126 				goto not_in_zone;
127 		}
128 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
129 	} while (--i);
130 
131 	mutex_init(&cma->lock);
132 
133 #ifdef CONFIG_CMA_DEBUGFS
134 	INIT_HLIST_HEAD(&cma->mem_head);
135 	spin_lock_init(&cma->mem_head_lock);
136 #endif
137 
138 	return 0;
139 
140 not_in_zone:
141 	pr_err("CMA area %s could not be activated\n", cma->name);
142 	kfree(cma->bitmap);
143 	cma->count = 0;
144 	return -EINVAL;
145 }
146 
cma_init_reserved_areas(void)147 static int __init cma_init_reserved_areas(void)
148 {
149 	int i;
150 
151 	for (i = 0; i < cma_area_count; i++) {
152 		int ret = cma_activate_area(&cma_areas[i]);
153 
154 		if (ret)
155 			return ret;
156 	}
157 
158 	return 0;
159 }
160 core_initcall(cma_init_reserved_areas);
161 
162 /**
163  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
164  * @base: Base address of the reserved area
165  * @size: Size of the reserved area (in bytes),
166  * @order_per_bit: Order of pages represented by one bit on bitmap.
167  * @name: The name of the area. If this parameter is NULL, the name of
168  *        the area will be set to "cmaN", where N is a running counter of
169  *        used areas.
170  * @res_cma: Pointer to store the created cma region.
171  *
172  * This function creates custom contiguous area from already reserved memory.
173  */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,const char * name,struct cma ** res_cma)174 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
175 				 unsigned int order_per_bit,
176 				 const char *name,
177 				 struct cma **res_cma)
178 {
179 	struct cma *cma;
180 	phys_addr_t alignment;
181 
182 	/* Sanity checks */
183 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
184 		pr_err("Not enough slots for CMA reserved regions!\n");
185 		return -ENOSPC;
186 	}
187 
188 	if (!size || !memblock_is_region_reserved(base, size))
189 		return -EINVAL;
190 
191 	/* ensure minimal alignment required by mm core */
192 	alignment = PAGE_SIZE <<
193 			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
194 
195 	/* alignment should be aligned with order_per_bit */
196 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
197 		return -EINVAL;
198 
199 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
200 		return -EINVAL;
201 
202 	/*
203 	 * Each reserved area must be initialised later, when more kernel
204 	 * subsystems (like slab allocator) are available.
205 	 */
206 	cma = &cma_areas[cma_area_count];
207 	if (name) {
208 		cma->name = name;
209 	} else {
210 		cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
211 		if (!cma->name)
212 			return -ENOMEM;
213 	}
214 	cma->base_pfn = PFN_DOWN(base);
215 	cma->count = size >> PAGE_SHIFT;
216 	cma->order_per_bit = order_per_bit;
217 	*res_cma = cma;
218 	cma_area_count++;
219 	totalcma_pages += (size / PAGE_SIZE);
220 
221 	return 0;
222 }
223 
224 /**
225  * cma_declare_contiguous() - reserve custom contiguous area
226  * @base: Base address of the reserved area optional, use 0 for any
227  * @size: Size of the reserved area (in bytes),
228  * @limit: End address of the reserved memory (optional, 0 for any).
229  * @alignment: Alignment for the CMA area, should be power of 2 or zero
230  * @order_per_bit: Order of pages represented by one bit on bitmap.
231  * @fixed: hint about where to place the reserved area
232  * @name: The name of the area. See function cma_init_reserved_mem()
233  * @res_cma: Pointer to store the created cma region.
234  *
235  * This function reserves memory from early allocator. It should be
236  * called by arch specific code once the early allocator (memblock or bootmem)
237  * has been activated and all other subsystems have already allocated/reserved
238  * memory. This function allows to create custom reserved areas.
239  *
240  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
241  * reserve in range from @base to @limit.
242  */
cma_declare_contiguous(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma)243 int __init cma_declare_contiguous(phys_addr_t base,
244 			phys_addr_t size, phys_addr_t limit,
245 			phys_addr_t alignment, unsigned int order_per_bit,
246 			bool fixed, const char *name, struct cma **res_cma)
247 {
248 	phys_addr_t memblock_end = memblock_end_of_DRAM();
249 	phys_addr_t highmem_start;
250 	int ret = 0;
251 
252 	/*
253 	 * We can't use __pa(high_memory) directly, since high_memory
254 	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
255 	 * complain. Find the boundary by adding one to the last valid
256 	 * address.
257 	 */
258 	highmem_start = __pa(high_memory - 1) + 1;
259 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
260 		__func__, &size, &base, &limit, &alignment);
261 
262 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
263 		pr_err("Not enough slots for CMA reserved regions!\n");
264 		return -ENOSPC;
265 	}
266 
267 	if (!size)
268 		return -EINVAL;
269 
270 	if (alignment && !is_power_of_2(alignment))
271 		return -EINVAL;
272 
273 	/*
274 	 * Sanitise input arguments.
275 	 * Pages both ends in CMA area could be merged into adjacent unmovable
276 	 * migratetype page by page allocator's buddy algorithm. In the case,
277 	 * you couldn't get a contiguous memory, which is not what we want.
278 	 */
279 	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
280 			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
281 	if (fixed && base & (alignment - 1)) {
282 		ret = -EINVAL;
283 		pr_err("Region at %pa must be aligned to %pa bytes\n",
284 			&base, &alignment);
285 		goto err;
286 	}
287 	base = ALIGN(base, alignment);
288 	size = ALIGN(size, alignment);
289 	limit &= ~(alignment - 1);
290 
291 	if (!base)
292 		fixed = false;
293 
294 	/* size should be aligned with order_per_bit */
295 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
296 		return -EINVAL;
297 
298 	/*
299 	 * If allocating at a fixed base the request region must not cross the
300 	 * low/high memory boundary.
301 	 */
302 	if (fixed && base < highmem_start && base + size > highmem_start) {
303 		ret = -EINVAL;
304 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
305 			&base, &highmem_start);
306 		goto err;
307 	}
308 
309 	/*
310 	 * If the limit is unspecified or above the memblock end, its effective
311 	 * value will be the memblock end. Set it explicitly to simplify further
312 	 * checks.
313 	 */
314 	if (limit == 0 || limit > memblock_end)
315 		limit = memblock_end;
316 
317 	if (base + size > limit) {
318 		ret = -EINVAL;
319 		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
320 			&size, &base, &limit);
321 		goto err;
322 	}
323 
324 	/* Reserve memory */
325 	if (fixed) {
326 		if (memblock_is_region_reserved(base, size) ||
327 		    memblock_reserve(base, size) < 0) {
328 			ret = -EBUSY;
329 			goto err;
330 		}
331 	} else {
332 		phys_addr_t addr = 0;
333 
334 		/*
335 		 * All pages in the reserved area must come from the same zone.
336 		 * If the requested region crosses the low/high memory boundary,
337 		 * try allocating from high memory first and fall back to low
338 		 * memory in case of failure.
339 		 */
340 		if (base < highmem_start && limit > highmem_start) {
341 			addr = memblock_phys_alloc_range(size, alignment,
342 							 highmem_start, limit);
343 			limit = highmem_start;
344 		}
345 
346 		if (!addr) {
347 			addr = memblock_phys_alloc_range(size, alignment, base,
348 							 limit);
349 			if (!addr) {
350 				ret = -ENOMEM;
351 				goto err;
352 			}
353 		}
354 
355 		/*
356 		 * kmemleak scans/reads tracked objects for pointers to other
357 		 * objects but this address isn't mapped and accessible
358 		 */
359 		kmemleak_ignore_phys(addr);
360 		base = addr;
361 	}
362 
363 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
364 	if (ret)
365 		goto free_mem;
366 
367 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
368 		&base);
369 	return 0;
370 
371 free_mem:
372 	memblock_free(base, size);
373 err:
374 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
375 	return ret;
376 }
377 
378 #ifdef CONFIG_CMA_DEBUG
cma_debug_show_areas(struct cma * cma)379 static void cma_debug_show_areas(struct cma *cma)
380 {
381 	unsigned long next_zero_bit, next_set_bit, nr_zero;
382 	unsigned long start = 0;
383 	unsigned long nr_part, nr_total = 0;
384 	unsigned long nbits = cma_bitmap_maxno(cma);
385 
386 	mutex_lock(&cma->lock);
387 	pr_info("number of available pages: ");
388 	for (;;) {
389 		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
390 		if (next_zero_bit >= nbits)
391 			break;
392 		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
393 		nr_zero = next_set_bit - next_zero_bit;
394 		nr_part = nr_zero << cma->order_per_bit;
395 		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
396 			next_zero_bit);
397 		nr_total += nr_part;
398 		start = next_zero_bit + nr_zero;
399 	}
400 	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
401 	mutex_unlock(&cma->lock);
402 }
403 #else
cma_debug_show_areas(struct cma * cma)404 static inline void cma_debug_show_areas(struct cma *cma) { }
405 #endif
406 
407 /**
408  * cma_alloc() - allocate pages from contiguous area
409  * @cma:   Contiguous memory region for which the allocation is performed.
410  * @count: Requested number of pages.
411  * @align: Requested alignment of pages (in PAGE_SIZE order).
412  * @no_warn: Avoid printing message about failed allocation
413  *
414  * This function allocates part of contiguous memory on specific
415  * contiguous memory area.
416  */
cma_alloc(struct cma * cma,size_t count,unsigned int align,bool no_warn)417 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
418 		       bool no_warn)
419 {
420 	unsigned long mask, offset;
421 	unsigned long pfn = -1;
422 	unsigned long start = 0;
423 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
424 	size_t i;
425 	struct page *page = NULL;
426 	int ret = -ENOMEM;
427 
428 	if (!cma || !cma->count)
429 		return NULL;
430 
431 	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
432 		 count, align);
433 
434 	if (!count)
435 		return NULL;
436 
437 	mask = cma_bitmap_aligned_mask(cma, align);
438 	offset = cma_bitmap_aligned_offset(cma, align);
439 	bitmap_maxno = cma_bitmap_maxno(cma);
440 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
441 
442 	if (bitmap_count > bitmap_maxno)
443 		return NULL;
444 
445 	for (;;) {
446 		mutex_lock(&cma->lock);
447 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
448 				bitmap_maxno, start, bitmap_count, mask,
449 				offset);
450 		if (bitmap_no >= bitmap_maxno) {
451 			mutex_unlock(&cma->lock);
452 			break;
453 		}
454 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
455 		/*
456 		 * It's safe to drop the lock here. We've marked this region for
457 		 * our exclusive use. If the migration fails we will take the
458 		 * lock again and unmark it.
459 		 */
460 		mutex_unlock(&cma->lock);
461 
462 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
463 		mutex_lock(&cma_mutex);
464 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
465 				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
466 		mutex_unlock(&cma_mutex);
467 		if (ret == 0) {
468 			page = pfn_to_page(pfn);
469 			break;
470 		}
471 
472 		cma_clear_bitmap(cma, pfn, count);
473 		if (ret != -EBUSY)
474 			break;
475 
476 		pr_debug("%s(): memory range at %p is busy, retrying\n",
477 			 __func__, pfn_to_page(pfn));
478 		/* try again with a bit different memory target */
479 		start = bitmap_no + mask + 1;
480 	}
481 
482 	trace_cma_alloc(pfn, page, count, align);
483 
484 	/*
485 	 * CMA can allocate multiple page blocks, which results in different
486 	 * blocks being marked with different tags. Reset the tags to ignore
487 	 * those page blocks.
488 	 */
489 	if (page) {
490 		for (i = 0; i < count; i++)
491 			page_kasan_tag_reset(page + i);
492 	}
493 
494 	if (ret && !no_warn) {
495 		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
496 			__func__, count, ret);
497 		cma_debug_show_areas(cma);
498 	}
499 
500 	pr_debug("%s(): returned %p\n", __func__, page);
501 	return page;
502 }
503 
504 /**
505  * cma_release() - release allocated pages
506  * @cma:   Contiguous memory region for which the allocation is performed.
507  * @pages: Allocated pages.
508  * @count: Number of allocated pages.
509  *
510  * This function releases memory allocated by cma_alloc().
511  * It returns false when provided pages do not belong to contiguous area and
512  * true otherwise.
513  */
cma_release(struct cma * cma,const struct page * pages,unsigned int count)514 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
515 {
516 	unsigned long pfn;
517 
518 	if (!cma || !pages)
519 		return false;
520 
521 	pr_debug("%s(page %p)\n", __func__, (void *)pages);
522 
523 	pfn = page_to_pfn(pages);
524 
525 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
526 		return false;
527 
528 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
529 
530 	free_contig_range(pfn, count);
531 	cma_clear_bitmap(cma, pfn, count);
532 	trace_cma_release(pfn, pages, count);
533 
534 	return true;
535 }
536 
cma_for_each_area(int (* it)(struct cma * cma,void * data),void * data)537 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
538 {
539 	int i;
540 
541 	for (i = 0; i < cma_area_count; i++) {
542 		int ret = it(&cma_areas[i], data);
543 
544 		if (ret)
545 			return ret;
546 	}
547 
548 	return 0;
549 }
550