1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 //         Keyon Jie <yang.jie@linux.intel.com>
7 
8 #include <rtos/panic.h>
9 #include <rtos/alloc.h>
10 #include <rtos/cache.h>
11 #include <sof/lib/cpu.h>
12 #include <sof/lib/dma.h>
13 #include <sof/lib/memory.h>
14 #include <sof/lib/mm_heap.h>
15 #include <sof/lib/uuid.h>
16 #include <sof/math/numbers.h>
17 #include <rtos/spinlock.h>
18 #include <rtos/string.h>
19 #include <ipc/topology.h>
20 #include <ipc/trace.h>
21 
22 #include <errno.h>
23 #include <stdbool.h>
24 #include <stddef.h>
25 #include <stdint.h>
26 
27 LOG_MODULE_REGISTER(memory, CONFIG_SOF_LOG_LEVEL);
28 
29 /* 425d6e68-145c-4455-b0b2-c7260b0600a5 */
30 DECLARE_SOF_UUID("memory", mem_uuid, 0x425d6e68, 0x145c, 0x4455,
31 		 0xb0, 0xb2, 0xc7, 0x26, 0x0b, 0x06, 0x00, 0xa5);
32 
33 DECLARE_TR_CTX(mem_tr, SOF_UUID(mem_uuid), LOG_LEVEL_INFO);
34 
35 /* debug to set memory value on every allocation */
36 #if CONFIG_DEBUG_BLOCK_FREE
37 #define DEBUG_BLOCK_FREE_VALUE_8BIT ((uint8_t)0xa5)
38 #define DEBUG_BLOCK_FREE_VALUE_32BIT ((uint32_t)0xa5a5a5a5)
39 #endif
40 
41 /* We have 3 memory pools
42  *
43  * 1) System memory pool does not have a map and it's size is fixed at build
44  *    time. Memory cannot be freed from this pool. Used by device drivers
45  *    and any system core. Saved as part of PM context.
46  * 2) Runtime memory pool has variable size allocation map and memory is freed
47  *    on calls to rfree(). Saved as part of PM context. Global size
48  *    set at build time.
49  * 3) Buffer memory pool has fixed size allocation map and can be freed on
50  *    module removal or calls to rfree(). Saved as part of PM context.
51  */
52 
53 #if CONFIG_DEBUG_BLOCK_FREE
54 /* Check whole memory region for debug pattern to find if memory was freed
55  * second time
56  */
validate_memory(void * ptr,size_t size)57 static void validate_memory(void *ptr, size_t size)
58 {
59 	uint32_t *ptr_32 = ptr;
60 	int i, not_matching = 0;
61 
62 	for (i = 0; i < size / 4; i++) {
63 		if (ptr_32[i] != DEBUG_BLOCK_FREE_VALUE_32BIT)
64 			not_matching = 1;
65 	}
66 
67 	if (not_matching) {
68 		tr_info(&mem_tr, "validate_memory() pointer: %p freed pattern not detected",
69 			ptr);
70 	} else {
71 		tr_err(&mem_tr, "validate_memory() freeing pointer: %p double free detected",
72 		       ptr);
73 	}
74 }
75 #endif
76 
77 /* total size of block */
block_get_size(struct block_map * map)78 static inline uint32_t block_get_size(struct block_map *map)
79 {
80 	uint32_t size = sizeof(*map) + map->count *
81 		(map->block_size + sizeof(struct block_hdr));
82 
83 	return size;
84 }
85 
86 /* total size of heap */
heap_get_size(struct mm_heap * heap)87 static inline uint32_t heap_get_size(struct mm_heap *heap)
88 {
89 	uint32_t size = sizeof(struct mm_heap);
90 	int i;
91 
92 	for (i = 0; i < heap->blocks; i++)
93 		size += block_get_size(&heap->map[i]);
94 
95 	return size;
96 }
97 
98 #if CONFIG_DEBUG_BLOCK_FREE
write_pattern(struct mm_heap * heap_map,int heap_depth,uint8_t pattern)99 static void write_pattern(struct mm_heap *heap_map, int heap_depth,
100 			  uint8_t pattern)
101 {
102 	struct mm_heap *heap;
103 	struct block_map *current_map;
104 	int i, j;
105 
106 	for (i = 0; i < heap_depth; i++) {
107 		heap = &heap_map[i];
108 
109 		for (j = 0; j < heap->blocks; j++) {
110 			current_map = &heap->map[j];
111 			memset(
112 				(void *)current_map->base, pattern,
113 				current_map->count * current_map->block_size);
114 		}
115 
116 	}
117 }
118 #endif
119 
init_heap_map(struct mm_heap * heap,int count)120 static void init_heap_map(struct mm_heap *heap, int count)
121 {
122 	struct block_map *next_map;
123 	struct block_map *current_map;
124 	int i;
125 	int j;
126 
127 	for (i = 0; i < count; i++) {
128 		/* init the map[0] */
129 		current_map = &heap[i].map[0];
130 		current_map->base = heap[i].heap;
131 
132 		/* map[j]'s base is calculated based on map[j-1] */
133 		for (j = 1; j < heap[i].blocks; j++) {
134 			next_map = &heap[i].map[j];
135 			next_map->base = current_map->base +
136 				current_map->block_size *
137 				current_map->count;
138 
139 			current_map = &heap[i].map[j];
140 		}
141 
142 	}
143 }
144 
145 /* allocate from system memory pool */
rmalloc_sys(struct mm_heap * heap,uint32_t flags,int caps,size_t bytes)146 static void *rmalloc_sys(struct mm_heap *heap, uint32_t flags, int caps, size_t bytes)
147 {
148 	void *ptr;
149 	size_t alignment = 0;
150 
151 	if ((heap->caps & caps) != caps)
152 		sof_panic(SOF_IPC_PANIC_MEM);
153 
154 	/* align address to dcache line size */
155 	if (heap->info.used % PLATFORM_DCACHE_ALIGN)
156 		alignment = PLATFORM_DCACHE_ALIGN -
157 			(heap->info.used % PLATFORM_DCACHE_ALIGN);
158 
159 	/* always succeeds or panics */
160 	if (alignment + bytes > heap->info.free) {
161 		tr_err(&mem_tr, "rmalloc_sys(): core = %d, bytes = %d",
162 		       cpu_get_id(), bytes);
163 		sof_panic(SOF_IPC_PANIC_MEM);
164 	}
165 	heap->info.used += alignment;
166 
167 	ptr = (void *)(heap->heap + heap->info.used);
168 
169 	heap->info.used += bytes;
170 	heap->info.free -= alignment + bytes;
171 
172 	return ptr;
173 }
174 
175 /* At this point the pointer we have should be unaligned
176  * (it was checked level higher) and be power of 2
177  */
align_ptr(struct mm_heap * heap,uint32_t alignment,void * ptr,struct block_hdr * hdr)178 static void *align_ptr(struct mm_heap *heap, uint32_t alignment,
179 		       void *ptr, struct block_hdr *hdr)
180 {
181 	/* Save unaligned ptr to block hdr */
182 	hdr->unaligned_ptr = ptr;
183 
184 	/* If ptr is not already aligned we calculate alignment shift */
185 	if (alignment <= 1)
186 		return ptr;
187 
188 	return (void *)ALIGN_UP((uintptr_t)ptr, alignment);
189 }
190 
191 /* allocate single block */
alloc_block_index(struct mm_heap * heap,int level,uint32_t alignment,int index)192 static void *alloc_block_index(struct mm_heap *heap, int level,
193 			       uint32_t alignment, int index)
194 {
195 	struct block_map *map = &heap->map[level];
196 	struct block_hdr *hdr;
197 	void *ptr;
198 	int i;
199 
200 	if (index < 0)
201 		index = map->first_free;
202 
203 	map->free_count--;
204 
205 	hdr = &map->block[index];
206 	ptr = (void *)(map->base + index * map->block_size);
207 	ptr = align_ptr(heap, alignment, ptr, hdr);
208 
209 	hdr->size = 1;
210 	hdr->used = 1;
211 
212 	heap->info.used += map->block_size;
213 	heap->info.free -= map->block_size;
214 
215 	if (index == map->first_free)
216 		/* find next free */
217 		for (i = map->first_free; i < map->count; ++i) {
218 			hdr = &map->block[i];
219 
220 			if (hdr->used == 0) {
221 				map->first_free = i;
222 				break;
223 			}
224 		}
225 
226 	return ptr;
227 }
228 
alloc_block(struct mm_heap * heap,int level,uint32_t caps,uint32_t alignment)229 static void *alloc_block(struct mm_heap *heap, int level,
230 			 uint32_t caps, uint32_t alignment)
231 {
232 	return alloc_block_index(heap, level, alignment, -1);
233 }
234 
235 /* allocates continuous blocks */
alloc_cont_blocks(struct mm_heap * heap,int level,uint32_t caps,size_t bytes,uint32_t alignment)236 static void *alloc_cont_blocks(struct mm_heap *heap, int level,
237 			       uint32_t caps, size_t bytes, uint32_t alignment)
238 {
239 	struct block_map *map = &heap->map[level];
240 	struct block_hdr *hdr;
241 	void *ptr = NULL, *unaligned_ptr;
242 	unsigned int current;
243 	unsigned int count = 0;			/* keep compiler quiet */
244 	unsigned int start = 0;			/* keep compiler quiet */
245 	uintptr_t blk_start = 0, aligned = 0;	/* keep compiler quiet */
246 	size_t found = 0, total_bytes = bytes;
247 
248 	/* check if we have enough consecutive blocks for requested
249 	 * allocation size.
250 	 */
251 	if ((map->count - map->first_free) * map->block_size < bytes)
252 		return NULL;
253 
254 	/*
255 	 * Walk all blocks in the map, beginning with the first free one, until
256 	 * a sufficiently large sequence is found, in which the first block
257 	 * contains an address with the requested alignment.
258 	 */
259 	for (current = map->first_free, hdr = map->block + current;
260 	     current < map->count && found < total_bytes;
261 	     current++, hdr++) {
262 		if (hdr->used) {
263 			/* Restart the search */
264 			found = 0;
265 			count = 0;
266 			total_bytes = bytes;
267 			continue;
268 		}
269 
270 		if (!found) {
271 			/* A possible beginning of a sequence */
272 			blk_start = map->base + current * map->block_size;
273 			start = current;
274 
275 			/* Check if we can start a sequence here */
276 			if (alignment) {
277 				aligned = ALIGN_UP(blk_start, alignment);
278 
279 				if (blk_start & (alignment - 1) &&
280 				    aligned >= blk_start + map->block_size)
281 					/*
282 					 * This block doesn't contain an address
283 					 * with required alignment, it is useless
284 					 * as the beginning of the sequence
285 					 */
286 					continue;
287 
288 				/*
289 				 * Found a potentially suitable beginning of a
290 				 * sequence, from here we'll check if we get
291 				 * enough blocks
292 				 */
293 				total_bytes += aligned - blk_start;
294 			} else {
295 				aligned = blk_start;
296 			}
297 		}
298 
299 		count++;
300 		found += map->block_size;
301 	}
302 
303 	if (found < total_bytes) {
304 		tr_err(&mem_tr, "failed to allocate %u", total_bytes);
305 		goto out;
306 	}
307 
308 	ptr = (void *)aligned;
309 
310 	/* we found enough space, let's allocate it */
311 	map->free_count -= count;
312 	unaligned_ptr = (void *)blk_start;
313 
314 	hdr = &map->block[start];
315 	hdr->size = count;
316 
317 	heap->info.used += count * map->block_size;
318 	heap->info.free -= count * map->block_size;
319 
320 	/*
321 	 * if .first_free has to be updated, set it to first free block or past
322 	 * the end of the map
323 	 */
324 	if (map->first_free == start) {
325 		for (current = map->first_free + count, hdr = &map->block[current];
326 		     current < map->count && hdr->used;
327 		     current++, hdr++)
328 			;
329 
330 		map->first_free = current;
331 	}
332 
333 	/* update each block */
334 	for (current = start; current < start + count; current++) {
335 		hdr = &map->block[current];
336 		hdr->used = 1;
337 		hdr->unaligned_ptr = unaligned_ptr;
338 	}
339 
340 out:
341 
342 	return ptr;
343 }
344 
find_in_heap_arr(struct mm_heap * heap_arr,int arr_len,void * ptr)345 static inline struct mm_heap *find_in_heap_arr(struct mm_heap *heap_arr, int arr_len, void *ptr)
346 {
347 	struct mm_heap *heap;
348 	int i;
349 
350 	for (i = 0; i < arr_len; i++) {
351 		heap = &heap_arr[i];
352 		if ((uint32_t)ptr >= heap->heap &&
353 		    (uint32_t)ptr < heap->heap + heap->size)
354 			return heap;
355 	}
356 	return NULL;
357 }
358 
get_heap_from_ptr(void * ptr)359 static struct mm_heap *get_heap_from_ptr(void *ptr)
360 {
361 	struct mm *memmap = memmap_get();
362 	struct mm_heap *heap;
363 
364 	/* find mm_heap that ptr belongs to */
365 	heap = find_in_heap_arr(memmap->system_runtime + cpu_get_id(), 1, ptr);
366 	if (heap)
367 		goto out;
368 
369 	heap = find_in_heap_arr(memmap->runtime, PLATFORM_HEAP_RUNTIME, ptr);
370 	if (heap)
371 		goto out;
372 
373 #if CONFIG_CORE_COUNT > 1
374 	heap = find_in_heap_arr(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED, ptr);
375 	if (heap)
376 		goto out;
377 #endif
378 
379 	heap = find_in_heap_arr(memmap->buffer, PLATFORM_HEAP_BUFFER, ptr);
380 	if (heap)
381 		goto out;
382 
383 	return NULL;
384 
385 out:
386 
387 	return heap;
388 }
389 
get_heap_from_caps(struct mm_heap * heap,int count,uint32_t caps)390 static struct mm_heap *get_heap_from_caps(struct mm_heap *heap, int count,
391 					  uint32_t caps)
392 {
393 	uint32_t mask;
394 	int i;
395 
396 	/* find first heap that support type */
397 	for (i = 0; i < count; i++) {
398 		mask = heap[i].caps & caps;
399 		if (mask == caps)
400 			return &heap[i];
401 	}
402 
403 	return NULL;
404 }
405 
get_ptr_from_heap(struct mm_heap * heap,uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)406 static void *get_ptr_from_heap(struct mm_heap *heap, uint32_t flags,
407 			       uint32_t caps, size_t bytes, uint32_t alignment)
408 {
409 	struct block_map *map;
410 	int i, temp_bytes = bytes;
411 	void *ptr = NULL;
412 
413 	/* Only allow alignment as a power of 2 */
414 	if ((alignment & (alignment - 1)) != 0)
415 		sof_panic(SOF_IPC_PANIC_MEM);
416 
417 	for (i = 0; i < heap->blocks; i++) {
418 		map = &heap->map[i];
419 
420 		/* size of requested buffer is adjusted for alignment purposes
421 		 * we check if first free block is already aligned if not
422 		 * we need to allocate bigger size for alignment
423 		 */
424 		if (alignment &&
425 		    ((map->base + (map->block_size * map->first_free)) %
426 		     alignment))
427 			temp_bytes += alignment;
428 
429 		/* is block big enough */
430 		if (map->block_size < temp_bytes) {
431 			temp_bytes = bytes;
432 			continue;
433 		}
434 
435 		/* does block have free space */
436 		if (map->free_count == 0) {
437 			temp_bytes = bytes;
438 			continue;
439 		}
440 
441 		/* free block space exists */
442 		ptr = alloc_block(heap, i, caps, alignment);
443 
444 		break;
445 	}
446 
447 	return ptr;
448 }
449 
450 /* free block(s) */
free_block(void * ptr)451 static void free_block(void *ptr)
452 {
453 	struct mm_heap *heap;
454 	struct block_map *block_map = NULL;
455 	struct block_hdr *hdr;
456 	void *cached_ptr = uncache_to_cache(ptr);
457 	void *uncached_ptr = cache_to_uncache(ptr);
458 	void *free_ptr;
459 	int i;
460 	int block;
461 	int used_blocks;
462 	bool heap_is_full;
463 
464 	/* try cached_ptr first */
465 	heap = get_heap_from_ptr(cached_ptr);
466 
467 	/* try uncached_ptr if needed */
468 	if (!heap) {
469 		heap = get_heap_from_ptr(uncached_ptr);
470 		if (!heap) {
471 			tr_err(&mem_tr, "free_block(): invalid heap, ptr = %p, cpu = %d",
472 			       ptr, cpu_get_id());
473 			return;
474 		}
475 		free_ptr = uncached_ptr;
476 	} else {
477 		free_ptr = cached_ptr;
478 	}
479 
480 	/* find block that ptr belongs to */
481 	for (i = 0; i < heap->blocks; i++) {
482 		block_map = &heap->map[i];
483 
484 		/* is ptr in this block */
485 		if ((uint32_t)free_ptr < (block_map->base +
486 		    (block_map->block_size * block_map->count)))
487 			break;
488 
489 	}
490 
491 	if (i == heap->blocks) {
492 
493 		/* not found */
494 		tr_err(&mem_tr, "free_block(): invalid free_ptr = %p cpu = %d",
495 		       free_ptr, cpu_get_id());
496 		return;
497 	}
498 
499 	/* calculate block header */
500 	block = ((uint32_t)free_ptr - block_map->base) / block_map->block_size;
501 
502 	hdr = &block_map->block[block];
503 
504 	/* bring back original unaligned pointer position
505 	 * and calculate correct hdr for free operation (it could
506 	 * be from different block since we got user pointer here
507 	 * or null if header was not set)
508 	 */
509 	if (hdr->unaligned_ptr != free_ptr && hdr->unaligned_ptr) {
510 		free_ptr = hdr->unaligned_ptr;
511 		block = ((uint32_t)free_ptr - block_map->base)
512 			 / block_map->block_size;
513 		hdr = &block_map->block[block];
514 	}
515 
516 	/* report an error if ptr is not aligned to block */
517 	if (block_map->base + block_map->block_size * block != (uint32_t)free_ptr)
518 		sof_panic(SOF_IPC_PANIC_MEM);
519 
520 	/* There may still be live dirty cache lines in the region
521 	 * on the current core. Those must be invalidated, otherwise
522 	 * they will be evicted from the cache at some point in the
523 	 * future, on top of the memory region now being used for
524 	 * different purposes on another core.
525 	 */
526 	dcache_writeback_invalidate_region(ptr, block_map->block_size * hdr->size);
527 
528 	heap_is_full = !block_map->free_count;
529 
530 	/* free block header and continuous blocks */
531 	used_blocks = block + hdr->size;
532 
533 	for (i = block; i < used_blocks; i++) {
534 		hdr = &block_map->block[i];
535 		hdr->size = 0;
536 		hdr->used = 0;
537 		hdr->unaligned_ptr = NULL;
538 		block_map->free_count++;
539 		heap->info.used -= block_map->block_size;
540 		heap->info.free += block_map->block_size;
541 	}
542 
543 	/* set first free block */
544 	if (block < block_map->first_free || heap_is_full)
545 		block_map->first_free = block;
546 
547 #if CONFIG_DEBUG_BLOCK_FREE
548 	/* memset the whole block in case of unaligned ptr */
549 	validate_memory(
550 		(void *)(block_map->base + block_map->block_size * block),
551 		block_map->block_size * (i - block));
552 	memset(
553 		(void *)(block_map->base + block_map->block_size * block),
554 		DEBUG_BLOCK_FREE_VALUE_8BIT, block_map->block_size *
555 		(i - block));
556 #endif
557 }
558 
559 #if CONFIG_TRACE
heap_trace(struct mm_heap * heap,int size)560 void heap_trace(struct mm_heap *heap, int size)
561 {
562 	struct block_map *current_map;
563 	int i;
564 	int j;
565 
566 	for (i = 0; i < size; i++) {
567 		tr_info(&mem_tr, " heap: 0x%x size %d blocks %d caps 0x%x",
568 			heap->heap, heap->size, heap->blocks,
569 			heap->caps);
570 		tr_info(&mem_tr, "  (In Bytes) used %d free %d", heap->info.used,
571 			heap->info.free);
572 
573 		/* map[j]'s base is calculated based on map[j-1] */
574 		for (j = 0; j < heap->blocks; j++) {
575 			current_map = &heap->map[j];
576 
577 			tr_info(&mem_tr, " %d Bytes blocks ID:%d base 0x%x",
578 				current_map->block_size, j, current_map->base);
579 			tr_info(&mem_tr, "   Number of Blocks: total %d used %d free %d",
580 				current_map->count,
581 				(current_map->count - current_map->free_count),
582 				current_map->free_count);
583 		}
584 
585 		heap++;
586 	}
587 }
588 
heap_trace_all(int force)589 void heap_trace_all(int force)
590 {
591 	struct mm *memmap = memmap_get();
592 
593 	/* has heap changed since last shown */
594 	if (memmap->heap_trace_updated || force) {
595 		tr_info(&mem_tr, "heap: system status");
596 		heap_trace(memmap->system, PLATFORM_HEAP_SYSTEM);
597 		tr_info(&mem_tr, "heap: system runtime status");
598 		heap_trace(memmap->system_runtime, PLATFORM_HEAP_SYSTEM_RUNTIME);
599 		tr_info(&mem_tr, "heap: buffer status");
600 		heap_trace(memmap->buffer, PLATFORM_HEAP_BUFFER);
601 		tr_info(&mem_tr, "heap: runtime status");
602 		heap_trace(memmap->runtime, PLATFORM_HEAP_RUNTIME);
603 #if CONFIG_CORE_COUNT > 1
604 		tr_info(&mem_tr, "heap: runtime shared status");
605 		heap_trace(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED);
606 		tr_info(&mem_tr, "heap: system shared status");
607 		heap_trace(memmap->system_shared, PLATFORM_HEAP_SYSTEM_SHARED);
608 #endif
609 	}
610 
611 	memmap->heap_trace_updated = 0;
612 
613 }
614 #else
heap_trace_all(int force)615 void heap_trace_all(int force) { }
heap_trace(struct mm_heap * heap,int size)616 void heap_trace(struct mm_heap *heap, int size) { }
617 #endif
618 
619 #define _ALLOC_FAILURE(bytes, zone, caps, flags) \
620 	tr_err(&mem_tr, \
621 	       "failed to alloc 0x%x bytes zone 0x%x caps 0x%x flags 0x%x", \
622 	       bytes, zone, caps, flags)
623 
624 #if CONFIG_DEBUG_HEAP
625 #define DEBUG_TRACE_PTR(ptr, bytes, zone, caps, flags) do { \
626 		if (trace_get()) { \
627 			if (!(ptr)) \
628 				_ALLOC_FAILURE(bytes, zone, caps, flags); \
629 			heap_trace_all(0); \
630 		} \
631 	} while (0)
632 #else
633 #define DEBUG_TRACE_PTR(ptr, bytes, zone, caps, flags) do { \
634 		if (trace_get()) { \
635 			if (!(ptr)) { \
636 				_ALLOC_FAILURE(bytes, zone, caps, flags); \
637 				heap_trace_all(0); \
638 			} \
639 		} \
640 	} while (0)
641 #endif
642 
643 /* allocate single block for system runtime */
rmalloc_sys_runtime(uint32_t flags,int caps,int core,size_t bytes)644 static void *rmalloc_sys_runtime(uint32_t flags, int caps, int core,
645 				 size_t bytes)
646 {
647 	struct mm *memmap = memmap_get();
648 	struct mm_heap *cpu_heap;
649 	void *ptr;
650 
651 	/* use the heap dedicated for the selected core */
652 	cpu_heap = memmap->system_runtime + core;
653 	if ((cpu_heap->caps & caps) != caps)
654 		sof_panic(SOF_IPC_PANIC_MEM);
655 
656 	ptr = get_ptr_from_heap(cpu_heap, flags, caps, bytes,
657 				PLATFORM_DCACHE_ALIGN);
658 
659 	return ptr;
660 }
661 
662 /* allocate single block for runtime */
rmalloc_runtime(uint32_t flags,uint32_t caps,size_t bytes)663 static void *rmalloc_runtime(uint32_t flags, uint32_t caps, size_t bytes)
664 {
665 	struct mm *memmap = memmap_get();
666 	struct mm_heap *heap;
667 
668 	/* check runtime heap for capabilities */
669 	heap = get_heap_from_caps(memmap->runtime, PLATFORM_HEAP_RUNTIME, caps);
670 	if (!heap) {
671 		/* next check buffer heap for capabilities */
672 		heap = get_heap_from_caps(memmap->buffer, PLATFORM_HEAP_BUFFER,
673 					  caps);
674 		if (!heap) {
675 
676 			tr_err(&mem_tr, "rmalloc_runtime(): caps = %x, bytes = %d",
677 			       caps, bytes);
678 
679 			return NULL;
680 		}
681 	}
682 
683 	return get_ptr_from_heap(heap, flags, caps, bytes,
684 				 PLATFORM_DCACHE_ALIGN);
685 }
686 
687 #if CONFIG_CORE_COUNT > 1
688 /* allocate single block for shared */
rmalloc_runtime_shared(uint32_t flags,uint32_t caps,size_t bytes)689 static void *rmalloc_runtime_shared(uint32_t flags, uint32_t caps, size_t bytes)
690 {
691 	struct mm *memmap = memmap_get();
692 	struct mm_heap *heap;
693 
694 	/* check shared heap for capabilities */
695 	heap = get_heap_from_caps(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED, caps);
696 	if (!heap) {
697 		tr_err(&mem_tr, "rmalloc_runtime_shared(): caps = %x, bytes = %d", caps, bytes);
698 		return NULL;
699 	}
700 
701 	return get_ptr_from_heap(heap, flags, caps, bytes, PLATFORM_DCACHE_ALIGN);
702 }
703 #endif
704 
_malloc_unlocked(enum mem_zone zone,uint32_t flags,uint32_t caps,size_t bytes)705 static void *_malloc_unlocked(enum mem_zone zone, uint32_t flags, uint32_t caps,
706 			      size_t bytes)
707 {
708 	struct mm *memmap = memmap_get();
709 	void *ptr = NULL;
710 
711 	switch (zone) {
712 	case SOF_MEM_ZONE_SYS:
713 		ptr = rmalloc_sys(memmap->system + cpu_get_id(), flags, caps, bytes);
714 		break;
715 	case SOF_MEM_ZONE_SYS_RUNTIME:
716 		ptr = rmalloc_sys_runtime(flags, caps, cpu_get_id(), bytes);
717 		break;
718 	case SOF_MEM_ZONE_RUNTIME:
719 		ptr = rmalloc_runtime(flags, caps, bytes);
720 		break;
721 #if CONFIG_CORE_COUNT > 1
722 	case SOF_MEM_ZONE_RUNTIME_SHARED:
723 		ptr = rmalloc_runtime_shared(flags, caps, bytes);
724 		break;
725 	case SOF_MEM_ZONE_SYS_SHARED:
726 		ptr = rmalloc_sys(memmap->system_shared, flags, caps, bytes);
727 		break;
728 #else
729 	case SOF_MEM_ZONE_RUNTIME_SHARED:
730 		ptr = rmalloc_runtime(flags, caps, bytes);
731 		break;
732 	case SOF_MEM_ZONE_SYS_SHARED:
733 		ptr = rmalloc_sys(memmap->system, flags, caps, bytes);
734 		break;
735 #endif
736 
737 	default:
738 		tr_err(&mem_tr, "rmalloc(): invalid zone");
739 		sof_panic(SOF_IPC_PANIC_MEM); /* logic non recoverable problem */
740 		break;
741 	}
742 
743 #if CONFIG_DEBUG_BLOCK_FREE
744 	if (ptr)
745 		bzero(ptr, bytes);
746 #endif
747 
748 	memmap->heap_trace_updated = 1;
749 
750 	return ptr;
751 }
752 
rmalloc(enum mem_zone zone,uint32_t flags,uint32_t caps,size_t bytes)753 void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
754 {
755 	struct mm *memmap = memmap_get();
756 	k_spinlock_key_t key;
757 	void *ptr = NULL;
758 
759 	key = k_spin_lock(&memmap->lock);
760 
761 	ptr = _malloc_unlocked(zone, flags, caps, bytes);
762 
763 	k_spin_unlock(&memmap->lock, key);
764 
765 	DEBUG_TRACE_PTR(ptr, bytes, zone, caps, flags);
766 	return ptr;
767 }
768 
769 /* allocates and clears memory - not for direct use, clients use rzalloc() */
rzalloc(enum mem_zone zone,uint32_t flags,uint32_t caps,size_t bytes)770 void *rzalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
771 {
772 	void *ptr;
773 
774 	ptr = rmalloc(zone, flags, caps, bytes);
775 	if (ptr)
776 		bzero(ptr, bytes);
777 
778 	return ptr;
779 }
780 
rzalloc_core_sys(int core,size_t bytes)781 void *rzalloc_core_sys(int core, size_t bytes)
782 {
783 	struct mm *memmap = memmap_get();
784 	k_spinlock_key_t key;
785 	void *ptr = NULL;
786 
787 	key = k_spin_lock(&memmap->lock);
788 
789 	ptr = rmalloc_sys(memmap->system + core, 0, 0, bytes);
790 	if (ptr)
791 		bzero(ptr, bytes);
792 
793 	k_spin_unlock(&memmap->lock, key);
794 	return ptr;
795 }
796 
797 /* allocates continuous buffers - not for direct use, clients use rballoc() */
alloc_heap_buffer(struct mm_heap * heap,uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)798 static void *alloc_heap_buffer(struct mm_heap *heap, uint32_t flags,
799 			       uint32_t caps, size_t bytes, uint32_t alignment)
800 {
801 	struct block_map *map;
802 #if CONFIG_DEBUG_BLOCK_FREE
803 	unsigned int temp_bytes = bytes;
804 #endif
805 	unsigned int j;
806 	int i;
807 	void *ptr = NULL;
808 
809 	/* Only allow alignment as a power of 2 */
810 	if ((alignment & (alignment - 1)) != 0)
811 		sof_panic(SOF_IPC_PANIC_MEM);
812 
813 	/*
814 	 * There are several cases when a memory allocation request can be
815 	 * satisfied with one buffer:
816 	 * 1. allocate 30 bytes 32-byte aligned from 32 byte buffers. Any free
817 	 * buffer is acceptable, the beginning of the buffer is used.
818 	 * 2. allocate 30 bytes 256-byte aligned from 0x180 byte buffers. 1
819 	 * buffer is also always enough, but in some buffers a part of the
820 	 * buffer has to be skipped.
821 	 * 3. allocate 200 bytes 256-byte aligned from 0x180 byte buffers. 1
822 	 * buffer is enough, but not every buffer is suitable.
823 	 */
824 
825 	/* will request fit in single block */
826 	for (i = 0, map = heap->map; i < heap->blocks; i++, map++) {
827 		struct block_hdr *hdr;
828 		uintptr_t free_start;
829 
830 		if (map->block_size < bytes || !map->free_count)
831 			continue;
832 
833 		if (alignment <= 1) {
834 			/* found: grab a block */
835 			ptr = alloc_block(heap, i, caps, alignment);
836 			break;
837 		}
838 
839 		/*
840 		 * Usually block sizes are a power of 2 and all blocks are
841 		 * respectively aligned. But it's also possible to have
842 		 * non-power of 2 sized blocks, e.g. to optimize for typical
843 		 * ALSA allocations a map with 0x180 byte buffers can be used.
844 		 * For performance reasons we could first check the power-of-2
845 		 * case. This can be added as an optimization later.
846 		 */
847 		for (j = map->first_free, hdr = map->block + j,
848 		     free_start = map->base + map->block_size * j;
849 		     j < map->count;
850 		     j++, hdr++, free_start += map->block_size) {
851 			uintptr_t aligned;
852 
853 			if (hdr->used)
854 				continue;
855 
856 			aligned = ALIGN_UP(free_start, alignment);
857 
858 			if (aligned + bytes > free_start + map->block_size)
859 				continue;
860 
861 			/* Found, alloc_block_index() cannot fail */
862 			ptr = alloc_block_index(heap, i, alignment, j);
863 #if CONFIG_DEBUG_BLOCK_FREE
864 			temp_bytes += aligned - free_start;
865 #endif
866 			break;
867 		}
868 
869 		if (ptr)
870 			break;
871 	}
872 
873 	/* request spans > 1 block */
874 	if (!ptr) {
875 		/* size of requested buffer is adjusted for alignment purposes
876 		 * since we span more blocks we have to assume worst case scenario
877 		 */
878 		bytes += alignment;
879 
880 		if (heap->size < bytes)
881 			return NULL;
882 
883 		/*
884 		 * Find the best block size for request. We know, that we failed
885 		 * to find a single large enough block, so, skip those.
886 		 */
887 		for (i = heap->blocks - 1; i >= 0; i--) {
888 			map = &heap->map[i];
889 
890 			/* allocate if block size is smaller than request */
891 			if (map->block_size < bytes) {
892 				ptr = alloc_cont_blocks(heap, i, caps,
893 							bytes, alignment);
894 				if (ptr)
895 					break;
896 			}
897 		}
898 	}
899 
900 #if CONFIG_DEBUG_BLOCK_FREE
901 	if (ptr)
902 		bzero(ptr, temp_bytes);
903 #endif
904 
905 	return ptr;
906 }
907 
_balloc_unlocked(uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)908 static void *_balloc_unlocked(uint32_t flags, uint32_t caps, size_t bytes,
909 			      uint32_t alignment)
910 {
911 	struct mm *memmap = memmap_get();
912 	struct mm_heap *heap;
913 	unsigned int i, n;
914 	void *ptr = NULL;
915 
916 	for (i = 0, n = PLATFORM_HEAP_BUFFER, heap = memmap->buffer;
917 	     i < PLATFORM_HEAP_BUFFER;
918 	     i = heap - memmap->buffer + 1, n = PLATFORM_HEAP_BUFFER - i,
919 	     heap++) {
920 		heap = get_heap_from_caps(heap, n, caps);
921 		if (!heap)
922 			break;
923 
924 		ptr = alloc_heap_buffer(heap, flags, caps, bytes, alignment);
925 		if (ptr)
926 			break;
927 
928 		/* Continue from the next heap */
929 	}
930 
931 	/* return directly if allocation failed */
932 	if (!ptr)
933 		return ptr;
934 
935 #ifdef	CONFIG_DEBUG_FORCE_COHERENT_BUFFER
936 	return cache_to_uncache(ptr);
937 #else
938 	return (flags & SOF_MEM_FLAG_COHERENT) && (CONFIG_CORE_COUNT > 1) ?
939 		cache_to_uncache(ptr) : uncache_to_cache(ptr);
940 #endif
941 }
942 
943 /* allocates continuous buffers - not for direct use, clients use rballoc() */
rballoc_align(uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)944 void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
945 		    uint32_t alignment)
946 {
947 	struct mm *memmap = memmap_get();
948 	void *ptr = NULL;
949 	k_spinlock_key_t key;
950 
951 	key = k_spin_lock(&memmap->lock);
952 
953 	ptr = _balloc_unlocked(flags, caps, bytes, alignment);
954 
955 	k_spin_unlock(&memmap->lock, key);
956 
957 	DEBUG_TRACE_PTR(ptr, bytes, SOF_MEM_ZONE_BUFFER, caps, flags);
958 	return ptr;
959 }
960 
_rfree_unlocked(void * ptr)961 static void _rfree_unlocked(void *ptr)
962 {
963 	struct mm *memmap = memmap_get();
964 	struct mm_heap *heap;
965 
966 	/* sanity check - NULL ptrs are fine */
967 	if (!ptr)
968 		return;
969 
970 	/* prepare pointer if it's platform requirement */
971 	ptr = platform_rfree_prepare(ptr);
972 
973 	/* use the heap dedicated for the core or shared memory */
974 #if CONFIG_CORE_COUNT > 1
975 	if (is_uncached(ptr))
976 		heap = memmap->system_shared;
977 	else
978 		heap = memmap->system + cpu_get_id();
979 #else
980 	heap = memmap->system;
981 #endif
982 
983 	/* panic if pointer is from system heap */
984 	if (ptr >= (void *)heap->heap &&
985 	    (char *)ptr < (char *)heap->heap + heap->size) {
986 		tr_err(&mem_tr, "rfree(): attempt to free system heap = %p, cpu = %d",
987 		       ptr, cpu_get_id());
988 		sof_panic(SOF_IPC_PANIC_MEM);
989 	}
990 
991 	/* free the block */
992 	free_block(ptr);
993 	memmap->heap_trace_updated = 1;
994 
995 }
996 
rfree(void * ptr)997 void rfree(void *ptr)
998 {
999 	struct mm *memmap = memmap_get();
1000 	k_spinlock_key_t key;
1001 
1002 	key = k_spin_lock(&memmap->lock);
1003 	_rfree_unlocked(ptr);
1004 	k_spin_unlock(&memmap->lock, key);
1005 }
1006 
rbrealloc_align(void * ptr,uint32_t flags,uint32_t caps,size_t bytes,size_t old_bytes,uint32_t alignment)1007 void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes,
1008 		      size_t old_bytes, uint32_t alignment)
1009 {
1010 	struct mm *memmap = memmap_get();
1011 	void *new_ptr = NULL;
1012 	k_spinlock_key_t key;
1013 	size_t copy_bytes = MIN(bytes, old_bytes);
1014 
1015 	if (!bytes)
1016 		return new_ptr;
1017 
1018 	key = k_spin_lock(&memmap->lock);
1019 
1020 	new_ptr = _balloc_unlocked(flags, caps, bytes, alignment);
1021 
1022 	if (new_ptr && ptr && !(flags & SOF_MEM_FLAG_NO_COPY))
1023 		memcpy_s(new_ptr, copy_bytes, ptr, copy_bytes);
1024 
1025 	if (new_ptr)
1026 		_rfree_unlocked(ptr);
1027 
1028 	k_spin_unlock(&memmap->lock, key);
1029 
1030 	DEBUG_TRACE_PTR(ptr, bytes, SOF_MEM_ZONE_BUFFER, caps, flags);
1031 	return new_ptr;
1032 }
1033 
1034 /* TODO: all mm_pm_...() routines to be implemented for IMR storage */
mm_pm_context_size(void)1035 uint32_t mm_pm_context_size(void)
1036 {
1037 	return 0;
1038 }
1039 
free_heap(enum mem_zone zone)1040 void free_heap(enum mem_zone zone)
1041 {
1042 	struct mm *memmap = memmap_get();
1043 	struct mm_heap *cpu_heap;
1044 
1045 	/* to be called by secondary cores only for sys heap,
1046 	 * otherwise this is critical flow issue.
1047 	 */
1048 	if (cpu_get_id() == PLATFORM_PRIMARY_CORE_ID ||
1049 	    zone != SOF_MEM_ZONE_SYS) {
1050 		tr_err(&mem_tr, "free_heap(): critical flow issue");
1051 		sof_panic(SOF_IPC_PANIC_MEM);
1052 	}
1053 
1054 	cpu_heap = memmap->system + cpu_get_id();
1055 	cpu_heap->info.used = 0;
1056 	cpu_heap->info.free = cpu_heap->size;
1057 
1058 }
1059 
1060 /* initialise map */
init_heap(struct sof * sof)1061 void init_heap(struct sof *sof)
1062 {
1063 	struct mm *memmap = sof->memory_map;
1064 
1065 #if !CONFIG_LIBRARY
1066 	extern uintptr_t _system_heap_start;
1067 
1068 	/* sanity check for malformed images or loader issues */
1069 	if (memmap->system[0].heap != (uintptr_t)&_system_heap_start)
1070 		sof_panic(SOF_IPC_PANIC_MEM);
1071 #endif
1072 
1073 	init_heap_map(memmap->system_runtime, PLATFORM_HEAP_SYSTEM_RUNTIME);
1074 
1075 	init_heap_map(memmap->runtime, PLATFORM_HEAP_RUNTIME);
1076 
1077 #if CONFIG_CORE_COUNT > 1
1078 	init_heap_map(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED);
1079 #endif
1080 
1081 	init_heap_map(memmap->buffer, PLATFORM_HEAP_BUFFER);
1082 
1083 #if CONFIG_DEBUG_BLOCK_FREE
1084 	write_pattern((struct mm_heap *)&memmap->buffer, PLATFORM_HEAP_BUFFER,
1085 		      DEBUG_BLOCK_FREE_VALUE_8BIT);
1086 	write_pattern((struct mm_heap *)&memmap->runtime, PLATFORM_HEAP_RUNTIME,
1087 		      DEBUG_BLOCK_FREE_VALUE_8BIT);
1088 #endif
1089 
1090 	k_spinlock_init(&memmap->lock);
1091 }
1092 
1093 #if CONFIG_DEBUG_MEMORY_USAGE_SCAN
heap_info(enum mem_zone zone,int index,struct mm_info * out)1094 int heap_info(enum mem_zone zone, int index, struct mm_info *out)
1095 {
1096 	struct mm *memmap = memmap_get();
1097 	struct mm_heap *heap;
1098 	k_spinlock_key_t key;
1099 
1100 	if (!out)
1101 		goto error;
1102 
1103 	switch (zone) {
1104 	case SOF_MEM_ZONE_SYS:
1105 		if (index >= PLATFORM_HEAP_SYSTEM)
1106 			goto error;
1107 		heap = memmap->system + index;
1108 		break;
1109 	case SOF_MEM_ZONE_SYS_RUNTIME:
1110 		if (index >= PLATFORM_HEAP_SYSTEM_RUNTIME)
1111 			goto error;
1112 		heap = memmap->system_runtime + index;
1113 		break;
1114 	case SOF_MEM_ZONE_RUNTIME:
1115 		if (index >= PLATFORM_HEAP_RUNTIME)
1116 			goto error;
1117 		heap = memmap->runtime + index;
1118 		break;
1119 	case SOF_MEM_ZONE_BUFFER:
1120 		if (index >= PLATFORM_HEAP_BUFFER)
1121 			goto error;
1122 		heap = memmap->buffer + index;
1123 		break;
1124 #if CONFIG_CORE_COUNT > 1
1125 	case SOF_MEM_ZONE_SYS_SHARED:
1126 		if (index >= PLATFORM_HEAP_SYSTEM_SHARED)
1127 			goto error;
1128 		heap = memmap->system_shared + index;
1129 		break;
1130 	case SOF_MEM_ZONE_RUNTIME_SHARED:
1131 		if (index >= PLATFORM_HEAP_RUNTIME_SHARED)
1132 			goto error;
1133 		heap = memmap->runtime_shared + index;
1134 		break;
1135 #endif
1136 	default:
1137 		goto error;
1138 	}
1139 
1140 	key = k_spin_lock(&memmap->lock);
1141 	*out = heap->info;
1142 	k_spin_unlock(&memmap->lock, key);
1143 	return 0;
1144 error:
1145 	tr_err(&mem_tr, "heap_info(): failed for zone 0x%x index %d out ptr 0x%x", zone, index,
1146 	       (uint32_t)out);
1147 	return -EINVAL;
1148 }
1149 #endif
1150