1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 //         Keyon Jie <yang.jie@linux.intel.com>
7 
8 #include <sof/debug/panic.h>
9 #include <sof/lib/alloc.h>
10 #include <sof/lib/cache.h>
11 #include <sof/lib/cpu.h>
12 #include <sof/lib/dma.h>
13 #include <sof/lib/memory.h>
14 #include <sof/lib/mm_heap.h>
15 #include <sof/lib/uuid.h>
16 #include <sof/math/numbers.h>
17 #include <sof/spinlock.h>
18 #include <sof/string.h>
19 #include <ipc/topology.h>
20 #include <ipc/trace.h>
21 
22 #include <errno.h>
23 #include <stdbool.h>
24 #include <stddef.h>
25 #include <stdint.h>
26 
27 /* 425d6e68-145c-4455-b0b2-c7260b0600a5 */
28 DECLARE_SOF_UUID("memory", mem_uuid, 0x425d6e68, 0x145c, 0x4455,
29 		 0xb0, 0xb2, 0xc7, 0x26, 0x0b, 0x06, 0x00, 0xa5);
30 
31 DECLARE_TR_CTX(mem_tr, SOF_UUID(mem_uuid), LOG_LEVEL_INFO);
32 
33 /* debug to set memory value on every allocation */
34 #if CONFIG_DEBUG_BLOCK_FREE
35 #define DEBUG_BLOCK_FREE_VALUE_8BIT ((uint8_t)0xa5)
36 #define DEBUG_BLOCK_FREE_VALUE_32BIT ((uint32_t)0xa5a5a5a5)
37 #endif
38 
39 /* We have 3 memory pools
40  *
41  * 1) System memory pool does not have a map and it's size is fixed at build
42  *    time. Memory cannot be freed from this pool. Used by device drivers
43  *    and any system core. Saved as part of PM context.
44  * 2) Runtime memory pool has variable size allocation map and memory is freed
45  *    on calls to rfree(). Saved as part of PM context. Global size
46  *    set at build time.
47  * 3) Buffer memory pool has fixed size allocation map and can be freed on
48  *    module removal or calls to rfree(). Saved as part of PM context.
49  */
50 
51 #if CONFIG_DEBUG_BLOCK_FREE
52 /* Check whole memory region for debug pattern to find if memory was freed
53  * second time
54  */
validate_memory(void * ptr,size_t size)55 static void validate_memory(void *ptr, size_t size)
56 {
57 	uint32_t *ptr_32 = ptr;
58 	int i, not_matching = 0;
59 
60 	for (i = 0; i < size / 4; i++) {
61 		if (ptr_32[i] != DEBUG_BLOCK_FREE_VALUE_32BIT)
62 			not_matching = 1;
63 	}
64 
65 	if (not_matching) {
66 		tr_info(&mem_tr, "validate_memory() pointer: %p freed pattern not detected",
67 			ptr);
68 	} else {
69 		tr_err(&mem_tr, "validate_memory() freeing pointer: %p double free detected",
70 		       ptr);
71 	}
72 }
73 #endif
74 
75 /* total size of block */
block_get_size(struct block_map * map)76 static inline uint32_t block_get_size(struct block_map *map)
77 {
78 	uint32_t size = sizeof(*map) + map->count *
79 		(map->block_size + sizeof(struct block_hdr));
80 
81 	return size;
82 }
83 
84 /* total size of heap */
heap_get_size(struct mm_heap * heap)85 static inline uint32_t heap_get_size(struct mm_heap *heap)
86 {
87 	uint32_t size = sizeof(struct mm_heap);
88 	int i;
89 
90 	for (i = 0; i < heap->blocks; i++)
91 		size += block_get_size(&heap->map[i]);
92 
93 	return size;
94 }
95 
96 #if CONFIG_DEBUG_BLOCK_FREE
write_pattern(struct mm_heap * heap_map,int heap_depth,uint8_t pattern)97 static void write_pattern(struct mm_heap *heap_map, int heap_depth,
98 			  uint8_t pattern)
99 {
100 	struct mm_heap *heap;
101 	struct block_map *current_map;
102 	int i, j;
103 
104 	for (i = 0; i < heap_depth; i++) {
105 		heap = &heap_map[i];
106 
107 		for (j = 0; j < heap->blocks; j++) {
108 			current_map = &heap->map[j];
109 			memset(
110 				(void *)current_map->base, pattern,
111 				current_map->count * current_map->block_size);
112 		}
113 
114 	}
115 }
116 #endif
117 
init_heap_map(struct mm_heap * heap,int count)118 static void init_heap_map(struct mm_heap *heap, int count)
119 {
120 	struct block_map *next_map;
121 	struct block_map *current_map;
122 	int i;
123 	int j;
124 
125 	for (i = 0; i < count; i++) {
126 		/* init the map[0] */
127 		current_map = &heap[i].map[0];
128 		current_map->base = heap[i].heap;
129 
130 		/* map[j]'s base is calculated based on map[j-1] */
131 		for (j = 1; j < heap[i].blocks; j++) {
132 			next_map = &heap[i].map[j];
133 			next_map->base = current_map->base +
134 				current_map->block_size *
135 				current_map->count;
136 
137 			current_map = &heap[i].map[j];
138 		}
139 
140 	}
141 }
142 
143 /* allocate from system memory pool */
rmalloc_sys(struct mm_heap * heap,uint32_t flags,int caps,size_t bytes)144 static void *rmalloc_sys(struct mm_heap *heap, uint32_t flags, int caps, size_t bytes)
145 {
146 	void *ptr;
147 	size_t alignment = 0;
148 
149 	if ((heap->caps & caps) != caps)
150 		panic(SOF_IPC_PANIC_MEM);
151 
152 	/* align address to dcache line size */
153 	if (heap->info.used % PLATFORM_DCACHE_ALIGN)
154 		alignment = PLATFORM_DCACHE_ALIGN -
155 			(heap->info.used % PLATFORM_DCACHE_ALIGN);
156 
157 	/* always succeeds or panics */
158 	if (alignment + bytes > heap->info.free) {
159 		tr_err(&mem_tr, "rmalloc_sys(): core = %d, bytes = %d",
160 		       cpu_get_id(), bytes);
161 		panic(SOF_IPC_PANIC_MEM);
162 	}
163 	heap->info.used += alignment;
164 
165 	ptr = (void *)(heap->heap + heap->info.used);
166 
167 	heap->info.used += bytes;
168 	heap->info.free -= alignment + bytes;
169 
170 	return ptr;
171 }
172 
173 /* At this point the pointer we have should be unaligned
174  * (it was checked level higher) and be power of 2
175  */
align_ptr(struct mm_heap * heap,uint32_t alignment,void * ptr,struct block_hdr * hdr)176 static void *align_ptr(struct mm_heap *heap, uint32_t alignment,
177 		       void *ptr, struct block_hdr *hdr)
178 {
179 	/* Save unaligned ptr to block hdr */
180 	hdr->unaligned_ptr = ptr;
181 
182 	/* If ptr is not already aligned we calculate alignment shift */
183 	if (alignment <= 1)
184 		return ptr;
185 
186 	return (void *)ALIGN_UP((uintptr_t)ptr, alignment);
187 }
188 
189 /* allocate single block */
alloc_block_index(struct mm_heap * heap,int level,uint32_t alignment,int index)190 static void *alloc_block_index(struct mm_heap *heap, int level,
191 			       uint32_t alignment, int index)
192 {
193 	struct block_map *map = &heap->map[level];
194 	struct block_hdr *hdr;
195 	void *ptr;
196 	int i;
197 
198 	if (index < 0)
199 		index = map->first_free;
200 
201 	map->free_count--;
202 
203 	hdr = &map->block[index];
204 	ptr = (void *)(map->base + index * map->block_size);
205 	ptr = align_ptr(heap, alignment, ptr, hdr);
206 
207 	hdr->size = 1;
208 	hdr->used = 1;
209 
210 	heap->info.used += map->block_size;
211 	heap->info.free -= map->block_size;
212 
213 	if (index == map->first_free)
214 		/* find next free */
215 		for (i = map->first_free; i < map->count; ++i) {
216 			hdr = &map->block[i];
217 
218 			if (hdr->used == 0) {
219 				map->first_free = i;
220 				break;
221 			}
222 		}
223 
224 	return ptr;
225 }
226 
alloc_block(struct mm_heap * heap,int level,uint32_t caps,uint32_t alignment)227 static void *alloc_block(struct mm_heap *heap, int level,
228 			 uint32_t caps, uint32_t alignment)
229 {
230 	return alloc_block_index(heap, level, alignment, -1);
231 }
232 
233 /* allocates continuous blocks */
alloc_cont_blocks(struct mm_heap * heap,int level,uint32_t caps,size_t bytes,uint32_t alignment)234 static void *alloc_cont_blocks(struct mm_heap *heap, int level,
235 			       uint32_t caps, size_t bytes, uint32_t alignment)
236 {
237 	struct block_map *map = &heap->map[level];
238 	struct block_hdr *hdr;
239 	void *ptr = NULL, *unaligned_ptr;
240 	unsigned int current;
241 	unsigned int count = 0;			/* keep compiler quiet */
242 	unsigned int start = 0;			/* keep compiler quiet */
243 	uintptr_t blk_start = 0, aligned = 0;	/* keep compiler quiet */
244 	size_t found = 0, total_bytes = bytes;
245 
246 	/* check if we have enough consecutive blocks for requested
247 	 * allocation size.
248 	 */
249 	if ((map->count - map->first_free) * map->block_size < bytes)
250 		return NULL;
251 
252 	/*
253 	 * Walk all blocks in the map, beginning with the first free one, until
254 	 * a sufficiently large sequence is found, in which the first block
255 	 * contains an address with the requested alignment.
256 	 */
257 	for (current = map->first_free, hdr = map->block + current;
258 	     current < map->count && found < total_bytes;
259 	     current++, hdr++) {
260 		if (hdr->used) {
261 			/* Restart the search */
262 			found = 0;
263 			count = 0;
264 			total_bytes = bytes;
265 			continue;
266 		}
267 
268 		if (!found) {
269 			/* A possible beginning of a sequence */
270 			blk_start = map->base + current * map->block_size;
271 			start = current;
272 
273 			/* Check if we can start a sequence here */
274 			if (alignment) {
275 				aligned = ALIGN_UP(blk_start, alignment);
276 
277 				if (blk_start & (alignment - 1) &&
278 				    aligned >= blk_start + map->block_size)
279 					/*
280 					 * This block doesn't contain an address
281 					 * with required alignment, it is useless
282 					 * as the beginning of the sequence
283 					 */
284 					continue;
285 
286 				/*
287 				 * Found a potentially suitable beginning of a
288 				 * sequence, from here we'll check if we get
289 				 * enough blocks
290 				 */
291 				total_bytes += aligned - blk_start;
292 			} else {
293 				aligned = blk_start;
294 			}
295 		}
296 
297 		count++;
298 		found += map->block_size;
299 	}
300 
301 	if (found < total_bytes) {
302 		tr_err(&mem_tr, "failed to allocate %u", total_bytes);
303 		goto out;
304 	}
305 
306 	ptr = (void *)aligned;
307 
308 	/* we found enough space, let's allocate it */
309 	map->free_count -= count;
310 	unaligned_ptr = (void *)blk_start;
311 
312 	hdr = &map->block[start];
313 	hdr->size = count;
314 
315 	heap->info.used += count * map->block_size;
316 	heap->info.free -= count * map->block_size;
317 
318 	/*
319 	 * if .first_free has to be updated, set it to first free block or past
320 	 * the end of the map
321 	 */
322 	if (map->first_free == start) {
323 		for (current = map->first_free + count, hdr = &map->block[current];
324 		     current < map->count && hdr->used;
325 		     current++, hdr++)
326 			;
327 
328 		map->first_free = current;
329 	}
330 
331 	/* update each block */
332 	for (current = start; current < start + count; current++) {
333 		hdr = &map->block[current];
334 		hdr->used = 1;
335 		hdr->unaligned_ptr = unaligned_ptr;
336 	}
337 
338 out:
339 
340 	return ptr;
341 }
342 
find_in_heap_arr(struct mm_heap * heap_arr,int arr_len,void * ptr)343 static inline struct mm_heap *find_in_heap_arr(struct mm_heap *heap_arr, int arr_len, void *ptr)
344 {
345 	struct mm_heap *heap;
346 	int i;
347 
348 	for (i = 0; i < arr_len; i++) {
349 		heap = &heap_arr[i];
350 		if ((uint32_t)ptr >= heap->heap &&
351 		    (uint32_t)ptr < heap->heap + heap->size)
352 			return heap;
353 	}
354 	return NULL;
355 }
356 
get_heap_from_ptr(void * ptr)357 static struct mm_heap *get_heap_from_ptr(void *ptr)
358 {
359 	struct mm *memmap = memmap_get();
360 	struct mm_heap *heap;
361 
362 	/* find mm_heap that ptr belongs to */
363 	heap = find_in_heap_arr(memmap->system_runtime + cpu_get_id(), 1, ptr);
364 	if (heap)
365 		goto out;
366 
367 	heap = find_in_heap_arr(memmap->runtime, PLATFORM_HEAP_RUNTIME, ptr);
368 	if (heap)
369 		goto out;
370 
371 #if CONFIG_CORE_COUNT > 1
372 	heap = find_in_heap_arr(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED, ptr);
373 	if (heap)
374 		goto out;
375 #endif
376 
377 	heap = find_in_heap_arr(memmap->buffer, PLATFORM_HEAP_BUFFER, ptr);
378 	if (heap)
379 		goto out;
380 
381 	return NULL;
382 
383 out:
384 
385 	return heap;
386 }
387 
get_heap_from_caps(struct mm_heap * heap,int count,uint32_t caps)388 static struct mm_heap *get_heap_from_caps(struct mm_heap *heap, int count,
389 					  uint32_t caps)
390 {
391 	uint32_t mask;
392 	int i;
393 
394 	/* find first heap that support type */
395 	for (i = 0; i < count; i++) {
396 		mask = heap[i].caps & caps;
397 		if (mask == caps)
398 			return &heap[i];
399 	}
400 
401 	return NULL;
402 }
403 
get_ptr_from_heap(struct mm_heap * heap,uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)404 static void *get_ptr_from_heap(struct mm_heap *heap, uint32_t flags,
405 			       uint32_t caps, size_t bytes, uint32_t alignment)
406 {
407 	struct block_map *map;
408 	int i, temp_bytes = bytes;
409 	void *ptr = NULL;
410 
411 	/* Only allow alignment as a power of 2 */
412 	if ((alignment & (alignment - 1)) != 0)
413 		panic(SOF_IPC_PANIC_MEM);
414 
415 	for (i = 0; i < heap->blocks; i++) {
416 		map = &heap->map[i];
417 
418 		/* size of requested buffer is adjusted for alignment purposes
419 		 * we check if first free block is already aligned if not
420 		 * we need to allocate bigger size for alignment
421 		 */
422 		if (alignment &&
423 		    ((map->base + (map->block_size * map->first_free)) %
424 		     alignment))
425 			temp_bytes += alignment;
426 
427 		/* is block big enough */
428 		if (map->block_size < temp_bytes) {
429 			temp_bytes = bytes;
430 			continue;
431 		}
432 
433 		/* does block have free space */
434 		if (map->free_count == 0) {
435 			temp_bytes = bytes;
436 			continue;
437 		}
438 
439 		/* free block space exists */
440 		ptr = alloc_block(heap, i, caps, alignment);
441 
442 		break;
443 	}
444 
445 	return ptr;
446 }
447 
448 /* free block(s) */
free_block(void * ptr)449 static void free_block(void *ptr)
450 {
451 	struct mm_heap *heap;
452 	struct block_map *block_map = NULL;
453 	struct block_hdr *hdr;
454 	int i;
455 	int block;
456 	int used_blocks;
457 	bool heap_is_full;
458 
459 	/* caller uses the direct address got from the allocator, free it directly */
460 	heap = get_heap_from_ptr(ptr);
461 
462 	/* some caller uses uncached address while allocated from cached
463 	 * address, e.g. SOF_MEM_ZONE_RUNTIME_SHARED region on cAVS platform,
464 	 * one more try to free the momory from the corresponding cached
465 	 * address.
466 	 */
467 	if (!heap && is_uncached(ptr)) {
468 		tr_dbg(&mem_tr, "free_block(): uncached buffer %p, try freeing from its cached address",
469 		       ptr);
470 		ptr = uncache_to_cache(ptr);
471 		heap = get_heap_from_ptr(ptr);
472 	}
473 
474 	if (!heap) {
475 		tr_err(&mem_tr, "free_block(): invalid heap = %p, cpu = %d",
476 		       ptr, cpu_get_id());
477 		return;
478 	}
479 
480 	/* find block that ptr belongs to */
481 	for (i = 0; i < heap->blocks; i++) {
482 		block_map = &heap->map[i];
483 
484 		/* is ptr in this block */
485 		if ((uint32_t)ptr < (block_map->base +
486 		    (block_map->block_size * block_map->count)))
487 			break;
488 
489 	}
490 
491 	if (i == heap->blocks) {
492 
493 		/* not found */
494 		tr_err(&mem_tr, "free_block(): invalid ptr = %p cpu = %d",
495 		       ptr, cpu_get_id());
496 		return;
497 	}
498 
499 	/* calculate block header */
500 	block = ((uint32_t)ptr - block_map->base) / block_map->block_size;
501 
502 	hdr = &block_map->block[block];
503 
504 	/* bring back original unaligned pointer position
505 	 * and calculate correct hdr for free operation (it could
506 	 * be from different block since we got user pointer here
507 	 * or null if header was not set)
508 	 */
509 	if (hdr->unaligned_ptr != ptr && hdr->unaligned_ptr) {
510 		ptr = hdr->unaligned_ptr;
511 		block = ((uint32_t)ptr - block_map->base)
512 			 / block_map->block_size;
513 		hdr = &block_map->block[block];
514 	}
515 
516 	/* report an error if ptr is not aligned to block */
517 	if (block_map->base + block_map->block_size * block != (uint32_t)ptr)
518 		panic(SOF_IPC_PANIC_MEM);
519 
520 	heap_is_full = !block_map->free_count;
521 
522 	/* free block header and continuous blocks */
523 	used_blocks = block + hdr->size;
524 
525 	for (i = block; i < used_blocks; i++) {
526 		hdr = &block_map->block[i];
527 		hdr->size = 0;
528 		hdr->used = 0;
529 		hdr->unaligned_ptr = NULL;
530 		block_map->free_count++;
531 		heap->info.used -= block_map->block_size;
532 		heap->info.free += block_map->block_size;
533 	}
534 
535 	/* set first free block */
536 	if (block < block_map->first_free || heap_is_full)
537 		block_map->first_free = block;
538 
539 #if CONFIG_DEBUG_BLOCK_FREE
540 	/* memset the whole block in case of unaligned ptr */
541 	validate_memory(
542 		(void *)(block_map->base + block_map->block_size * block),
543 		block_map->block_size * (i - block));
544 	memset(
545 		(void *)(block_map->base + block_map->block_size * block),
546 		DEBUG_BLOCK_FREE_VALUE_8BIT, block_map->block_size *
547 		(i - block));
548 #endif
549 }
550 
551 #if CONFIG_DEBUG_HEAP
552 
trace_heap_blocks(struct mm_heap * heap)553 static void trace_heap_blocks(struct mm_heap *heap)
554 {
555 	struct block_map *block_map;
556 	int i;
557 
558 	tr_err(&mem_tr, "heap: 0x%x size %d blocks %d caps 0x%x", heap->heap,
559 	       heap->size, heap->blocks, heap->caps);
560 	tr_err(&mem_tr, " used %d free %d", heap->info.used,
561 	       heap->info.free);
562 
563 	for (i = 0; i < heap->blocks; i++) {
564 		block_map = &heap->map[i];
565 
566 		tr_err(&mem_tr, " block %d base 0x%x size %d count %d", i,
567 		       block_map->base, block_map->block_size,
568 		       block_map->count);
569 		tr_err(&mem_tr, "  free %d first at %d",
570 		       block_map->free_count, block_map->first_free);
571 
572 	}
573 
574 }
575 
alloc_trace_heap(enum mem_zone zone,uint32_t caps,size_t bytes)576 static void alloc_trace_heap(enum mem_zone zone, uint32_t caps, size_t bytes)
577 {
578 	struct mm *memmap = memmap_get();
579 	struct mm_heap *heap_base;
580 	struct mm_heap *heap;
581 	unsigned int heap_count;
582 	unsigned int n;
583 	unsigned int i = 0;
584 	int count = 0;
585 
586 	switch (zone) {
587 	case SOF_MEM_ZONE_SYS:
588 		heap_base = memmap->system;
589 		heap_count = PLATFORM_HEAP_SYSTEM;
590 		break;
591 	case SOF_MEM_ZONE_SYS_RUNTIME:
592 		heap_base = memmap->system_runtime;
593 		heap_count = PLATFORM_HEAP_SYSTEM_RUNTIME;
594 		break;
595 	case SOF_MEM_ZONE_RUNTIME:
596 		heap_base = memmap->runtime;
597 		heap_count = PLATFORM_HEAP_RUNTIME;
598 		break;
599 	case SOF_MEM_ZONE_BUFFER:
600 		heap_base = memmap->buffer;
601 		heap_count = PLATFORM_HEAP_BUFFER;
602 		break;
603 #if CONFIG_CORE_COUNT > 1
604 	case SOF_MEM_ZONE_RUNTIME_SHARED:
605 		heap_base = memmap->runtime_shared;
606 		heap_count = PLATFORM_HEAP_RUNTIME_SHARED;
607 		break;
608 	case SOF_MEM_ZONE_SYS_SHARED:
609 		heap_base = memmap->system_shared;
610 		heap_count = PLATFORM_HEAP_SYSTEM_SHARED;
611 		break;
612 #else
613 	case SOF_MEM_ZONE_RUNTIME_SHARED:
614 		heap_base = memmap->runtime;
615 		heap_count = PLATFORM_HEAP_RUNTIME;
616 		break;
617 	case SOF_MEM_ZONE_SYS_SHARED:
618 		heap_base = memmap->system;
619 		heap_count = PLATFORM_HEAP_SYSTEM;
620 		break;
621 #endif
622 	default:
623 		tr_err(&mem_tr, "alloc trace: unsupported mem zone");
624 		goto out;
625 	}
626 	heap = heap_base;
627 	n = heap_count;
628 
629 	while (i < heap_count) {
630 		heap = get_heap_from_caps(heap, n, caps);
631 		if (!heap)
632 			break;
633 
634 		trace_heap_blocks(heap);
635 		count++;
636 		i = heap - heap_base + 1;
637 		n = heap_count - i;
638 		heap++;
639 	}
640 
641 	if (count == 0)
642 		tr_err(&mem_tr, "heap: none found for zone %d caps 0x%x, bytes 0x%x",
643 		       zone, caps, bytes);
644 out:
645 	return;
646 }
647 
648 #define DEBUG_TRACE_PTR(ptr, bytes, zone, caps, flags) \
649 	do { \
650 		if (!ptr) { \
651 			tr_err(&mem_tr, "failed to alloc 0x%x bytes zone 0x%x caps 0x%x flags 0x%x", \
652 			       bytes, zone, caps, flags); \
653 			alloc_trace_heap(zone, caps, bytes); \
654 		} \
655 	} while (0)
656 #else
657 #define DEBUG_TRACE_PTR(ptr, bytes, zone, caps, flags)
658 #endif
659 
660 /* allocate single block for system runtime */
rmalloc_sys_runtime(uint32_t flags,int caps,int core,size_t bytes)661 static void *rmalloc_sys_runtime(uint32_t flags, int caps, int core,
662 				 size_t bytes)
663 {
664 	struct mm *memmap = memmap_get();
665 	struct mm_heap *cpu_heap;
666 	void *ptr;
667 
668 	/* use the heap dedicated for the selected core */
669 	cpu_heap = memmap->system_runtime + core;
670 	if ((cpu_heap->caps & caps) != caps)
671 		panic(SOF_IPC_PANIC_MEM);
672 
673 	ptr = get_ptr_from_heap(cpu_heap, flags, caps, bytes,
674 				PLATFORM_DCACHE_ALIGN);
675 
676 	return ptr;
677 }
678 
679 /* allocate single block for runtime */
rmalloc_runtime(uint32_t flags,uint32_t caps,size_t bytes)680 static void *rmalloc_runtime(uint32_t flags, uint32_t caps, size_t bytes)
681 {
682 	struct mm *memmap = memmap_get();
683 	struct mm_heap *heap;
684 
685 	/* check runtime heap for capabilities */
686 	heap = get_heap_from_caps(memmap->runtime, PLATFORM_HEAP_RUNTIME, caps);
687 	if (!heap) {
688 		/* next check buffer heap for capabilities */
689 		heap = get_heap_from_caps(memmap->buffer, PLATFORM_HEAP_BUFFER,
690 					  caps);
691 		if (!heap) {
692 
693 			tr_err(&mem_tr, "rmalloc_runtime(): caps = %x, bytes = %d",
694 			       caps, bytes);
695 
696 			return NULL;
697 		}
698 	}
699 
700 	return get_ptr_from_heap(heap, flags, caps, bytes,
701 				 PLATFORM_DCACHE_ALIGN);
702 }
703 
704 #if CONFIG_CORE_COUNT > 1
705 /* allocate single block for shared */
rmalloc_runtime_shared(uint32_t flags,uint32_t caps,size_t bytes)706 static void *rmalloc_runtime_shared(uint32_t flags, uint32_t caps, size_t bytes)
707 {
708 	struct mm *memmap = memmap_get();
709 	struct mm_heap *heap;
710 
711 	/* check shared heap for capabilities */
712 	heap = get_heap_from_caps(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED, caps);
713 	if (!heap) {
714 		tr_err(&mem_tr, "rmalloc_runtime_shared(): caps = %x, bytes = %d", caps, bytes);
715 		return NULL;
716 	}
717 
718 	return get_ptr_from_heap(heap, flags, caps, bytes, PLATFORM_DCACHE_ALIGN);
719 }
720 #endif
721 
_malloc_unlocked(enum mem_zone zone,uint32_t flags,uint32_t caps,size_t bytes)722 static void *_malloc_unlocked(enum mem_zone zone, uint32_t flags, uint32_t caps,
723 			      size_t bytes)
724 {
725 	struct mm *memmap = memmap_get();
726 	void *ptr = NULL;
727 
728 	switch (zone) {
729 	case SOF_MEM_ZONE_SYS:
730 		ptr = rmalloc_sys(memmap->system + cpu_get_id(), flags, caps, bytes);
731 		break;
732 	case SOF_MEM_ZONE_SYS_RUNTIME:
733 		ptr = rmalloc_sys_runtime(flags, caps, cpu_get_id(), bytes);
734 		break;
735 	case SOF_MEM_ZONE_RUNTIME:
736 		ptr = rmalloc_runtime(flags, caps, bytes);
737 		break;
738 #if CONFIG_CORE_COUNT > 1
739 	case SOF_MEM_ZONE_RUNTIME_SHARED:
740 		ptr = rmalloc_runtime_shared(flags, caps, bytes);
741 		break;
742 	case SOF_MEM_ZONE_SYS_SHARED:
743 		ptr = rmalloc_sys(memmap->system_shared, flags, caps, bytes);
744 		break;
745 #else
746 	case SOF_MEM_ZONE_RUNTIME_SHARED:
747 		ptr = rmalloc_runtime(flags, caps, bytes);
748 		break;
749 	case SOF_MEM_ZONE_SYS_SHARED:
750 		ptr = rmalloc_sys(memmap->system, flags, caps, bytes);
751 		break;
752 #endif
753 
754 	default:
755 		tr_err(&mem_tr, "rmalloc(): invalid zone");
756 		panic(SOF_IPC_PANIC_MEM); /* logic non recoverable problem */
757 		break;
758 	}
759 
760 #if CONFIG_DEBUG_BLOCK_FREE
761 	if (ptr)
762 		bzero(ptr, bytes);
763 #endif
764 
765 	memmap->heap_trace_updated = 1;
766 
767 	return ptr;
768 }
769 
rmalloc(enum mem_zone zone,uint32_t flags,uint32_t caps,size_t bytes)770 void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
771 {
772 	struct mm *memmap = memmap_get();
773 	uint32_t lock_flags;
774 	void *ptr = NULL;
775 
776 	spin_lock_irq(&memmap->lock, lock_flags);
777 
778 	ptr = _malloc_unlocked(zone, flags, caps, bytes);
779 
780 	spin_unlock_irq(&memmap->lock, lock_flags);
781 
782 	DEBUG_TRACE_PTR(ptr, bytes, zone, caps, flags);
783 	return ptr;
784 }
785 
786 /* allocates and clears memory - not for direct use, clients use rzalloc() */
rzalloc(enum mem_zone zone,uint32_t flags,uint32_t caps,size_t bytes)787 void *rzalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
788 {
789 	void *ptr;
790 
791 	ptr = rmalloc(zone, flags, caps, bytes);
792 	if (ptr)
793 		bzero(ptr, bytes);
794 
795 	return ptr;
796 }
797 
rzalloc_core_sys(int core,size_t bytes)798 void *rzalloc_core_sys(int core, size_t bytes)
799 {
800 	struct mm *memmap = memmap_get();
801 	uint32_t flags;
802 	void *ptr = NULL;
803 
804 	spin_lock_irq(&memmap->lock, flags);
805 
806 	ptr = rmalloc_sys(memmap->system + core, 0, 0, bytes);
807 	if (ptr)
808 		bzero(ptr, bytes);
809 
810 	spin_unlock_irq(&memmap->lock, flags);
811 	return ptr;
812 }
813 
814 /* allocates continuous buffers - not for direct use, clients use rballoc() */
alloc_heap_buffer(struct mm_heap * heap,uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)815 static void *alloc_heap_buffer(struct mm_heap *heap, uint32_t flags,
816 			       uint32_t caps, size_t bytes, uint32_t alignment)
817 {
818 	struct block_map *map;
819 #if CONFIG_DEBUG_BLOCK_FREE
820 	unsigned int temp_bytes = bytes;
821 #endif
822 	unsigned int j;
823 	int i;
824 	void *ptr = NULL;
825 
826 	/* Only allow alignment as a power of 2 */
827 	if ((alignment & (alignment - 1)) != 0)
828 		panic(SOF_IPC_PANIC_MEM);
829 
830 	/*
831 	 * There are several cases when a memory allocation request can be
832 	 * satisfied with one buffer:
833 	 * 1. allocate 30 bytes 32-byte aligned from 32 byte buffers. Any free
834 	 * buffer is acceptable, the beginning of the buffer is used.
835 	 * 2. allocate 30 bytes 256-byte aligned from 0x180 byte buffers. 1
836 	 * buffer is also always enough, but in some buffers a part of the
837 	 * buffer has to be skipped.
838 	 * 3. allocate 200 bytes 256-byte aligned from 0x180 byte buffers. 1
839 	 * buffer is enough, but not every buffer is suitable.
840 	 */
841 
842 	/* will request fit in single block */
843 	for (i = 0, map = heap->map; i < heap->blocks; i++, map++) {
844 		struct block_hdr *hdr;
845 		uintptr_t free_start;
846 
847 		if (map->block_size < bytes || !map->free_count)
848 			continue;
849 
850 		if (alignment <= 1) {
851 			/* found: grab a block */
852 			ptr = alloc_block(heap, i, caps, alignment);
853 			break;
854 		}
855 
856 		/*
857 		 * Usually block sizes are a power of 2 and all blocks are
858 		 * respectively aligned. But it's also possible to have
859 		 * non-power of 2 sized blocks, e.g. to optimize for typical
860 		 * ALSA allocations a map with 0x180 byte buffers can be used.
861 		 * For performance reasons we could first check the power-of-2
862 		 * case. This can be added as an optimization later.
863 		 */
864 		for (j = map->first_free, hdr = map->block + j,
865 		     free_start = map->base + map->block_size * j;
866 		     j < map->count;
867 		     j++, hdr++, free_start += map->block_size) {
868 			uintptr_t aligned;
869 
870 			if (hdr->used)
871 				continue;
872 
873 			aligned = ALIGN_UP(free_start, alignment);
874 
875 			if (aligned + bytes > free_start + map->block_size)
876 				continue;
877 
878 			/* Found, alloc_block_index() cannot fail */
879 			ptr = alloc_block_index(heap, i, alignment, j);
880 #if CONFIG_DEBUG_BLOCK_FREE
881 			temp_bytes += aligned - free_start;
882 #endif
883 			break;
884 		}
885 
886 		if (ptr)
887 			break;
888 	}
889 
890 	/* request spans > 1 block */
891 	if (!ptr) {
892 		/* size of requested buffer is adjusted for alignment purposes
893 		 * since we span more blocks we have to assume worst case scenario
894 		 */
895 		bytes += alignment;
896 
897 		if (heap->size < bytes)
898 			return NULL;
899 
900 		/*
901 		 * Find the best block size for request. We know, that we failed
902 		 * to find a single large enough block, so, skip those.
903 		 */
904 		for (i = heap->blocks - 1; i >= 0; i--) {
905 			map = &heap->map[i];
906 
907 			/* allocate if block size is smaller than request */
908 			if (map->block_size < bytes) {
909 				ptr = alloc_cont_blocks(heap, i, caps,
910 							bytes, alignment);
911 				if (ptr)
912 					break;
913 			}
914 		}
915 	}
916 
917 #if CONFIG_DEBUG_BLOCK_FREE
918 	if (ptr)
919 		bzero(ptr, temp_bytes);
920 #endif
921 
922 	return ptr;
923 }
924 
_balloc_unlocked(uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)925 static void *_balloc_unlocked(uint32_t flags, uint32_t caps, size_t bytes,
926 			      uint32_t alignment)
927 {
928 	struct mm *memmap = memmap_get();
929 	struct mm_heap *heap;
930 	unsigned int i, n;
931 	void *ptr = NULL;
932 
933 	for (i = 0, n = PLATFORM_HEAP_BUFFER, heap = memmap->buffer;
934 	     i < PLATFORM_HEAP_BUFFER;
935 	     i = heap - memmap->buffer + 1, n = PLATFORM_HEAP_BUFFER - i,
936 	     heap++) {
937 		heap = get_heap_from_caps(heap, n, caps);
938 		if (!heap)
939 			break;
940 
941 		ptr = alloc_heap_buffer(heap, flags, caps, bytes, alignment);
942 		if (ptr)
943 			break;
944 
945 		/* Continue from the next heap */
946 	}
947 
948 	return ptr;
949 }
950 
951 /* allocates continuous buffers - not for direct use, clients use rballoc() */
rballoc_align(uint32_t flags,uint32_t caps,size_t bytes,uint32_t alignment)952 void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
953 		    uint32_t alignment)
954 {
955 	struct mm *memmap = memmap_get();
956 	void *ptr = NULL;
957 	uint32_t lock_flags;
958 
959 	spin_lock_irq(&memmap->lock, lock_flags);
960 
961 	ptr = _balloc_unlocked(flags, caps, bytes, alignment);
962 
963 	spin_unlock_irq(&memmap->lock, lock_flags);
964 
965 	DEBUG_TRACE_PTR(ptr, bytes, SOF_MEM_ZONE_BUFFER, caps, flags);
966 	return ptr;
967 }
968 
_rfree_unlocked(void * ptr)969 static void _rfree_unlocked(void *ptr)
970 {
971 	struct mm *memmap = memmap_get();
972 	struct mm_heap *heap;
973 
974 	/* sanity check - NULL ptrs are fine */
975 	if (!ptr)
976 		return;
977 
978 	/* prepare pointer if it's platform requirement */
979 	ptr = platform_rfree_prepare(ptr);
980 
981 	/* use the heap dedicated for the core or shared memory */
982 #if CONFIG_CORE_COUNT > 1
983 	if (is_uncached(ptr))
984 		heap = memmap->system_shared;
985 	else
986 		heap = memmap->system + cpu_get_id();
987 #else
988 	heap = memmap->system;
989 #endif
990 
991 	/* panic if pointer is from system heap */
992 	if (ptr >= (void *)heap->heap &&
993 	    (char *)ptr < (char *)heap->heap + heap->size) {
994 		tr_err(&mem_tr, "rfree(): attempt to free system heap = %p, cpu = %d",
995 		       ptr, cpu_get_id());
996 		panic(SOF_IPC_PANIC_MEM);
997 	}
998 
999 	/* free the block */
1000 	free_block(ptr);
1001 	memmap->heap_trace_updated = 1;
1002 
1003 }
1004 
rfree(void * ptr)1005 void rfree(void *ptr)
1006 {
1007 	struct mm *memmap = memmap_get();
1008 	uint32_t flags;
1009 
1010 	spin_lock_irq(&memmap->lock, flags);
1011 	_rfree_unlocked(ptr);
1012 	spin_unlock_irq(&memmap->lock, flags);
1013 }
1014 
rbrealloc_align(void * ptr,uint32_t flags,uint32_t caps,size_t bytes,size_t old_bytes,uint32_t alignment)1015 void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes,
1016 		      size_t old_bytes, uint32_t alignment)
1017 {
1018 	struct mm *memmap = memmap_get();
1019 	void *new_ptr = NULL;
1020 	uint32_t lock_flags;
1021 	size_t copy_bytes = MIN(bytes, old_bytes);
1022 
1023 	if (!bytes)
1024 		return new_ptr;
1025 
1026 	spin_lock_irq(&memmap->lock, lock_flags);
1027 
1028 	new_ptr = _balloc_unlocked(flags, caps, bytes, alignment);
1029 
1030 	if (new_ptr && ptr && !(flags & SOF_MEM_FLAG_NO_COPY))
1031 		memcpy_s(new_ptr, copy_bytes, ptr, copy_bytes);
1032 
1033 	if (new_ptr)
1034 		_rfree_unlocked(ptr);
1035 
1036 	spin_unlock_irq(&memmap->lock, lock_flags);
1037 
1038 	DEBUG_TRACE_PTR(ptr, bytes, SOF_MEM_ZONE_BUFFER, caps, flags);
1039 	return new_ptr;
1040 }
1041 
1042 /* TODO: all mm_pm_...() routines to be implemented for IMR storage */
mm_pm_context_size(void)1043 uint32_t mm_pm_context_size(void)
1044 {
1045 	return 0;
1046 }
1047 
1048 /*
1049  * Save the DSP memories that are in use the system and modules.
1050  * All pipeline and modules must be disabled before calling this functions.
1051  * No allocations are permitted after calling this and before calling restore.
1052  */
mm_pm_context_save(struct dma_copy * dc,struct dma_sg_config * sg)1053 int mm_pm_context_save(struct dma_copy *dc, struct dma_sg_config *sg)
1054 {
1055 	return -ENOTSUP;
1056 }
1057 
1058 /*
1059  * Restore the DSP memories to modules and the system.
1060  * This must be called immediately after booting before any pipeline work.
1061  */
mm_pm_context_restore(struct dma_copy * dc,struct dma_sg_config * sg)1062 int mm_pm_context_restore(struct dma_copy *dc, struct dma_sg_config *sg)
1063 {
1064 	return -ENOTSUP;
1065 }
1066 
free_heap(enum mem_zone zone)1067 void free_heap(enum mem_zone zone)
1068 {
1069 	struct mm *memmap = memmap_get();
1070 	struct mm_heap *cpu_heap;
1071 
1072 	/* to be called by secondary cores only for sys heap,
1073 	 * otherwise this is critical flow issue.
1074 	 */
1075 	if (cpu_get_id() == PLATFORM_PRIMARY_CORE_ID ||
1076 	    zone != SOF_MEM_ZONE_SYS) {
1077 		tr_err(&mem_tr, "free_heap(): critical flow issue");
1078 		panic(SOF_IPC_PANIC_MEM);
1079 	}
1080 
1081 	cpu_heap = memmap->system + cpu_get_id();
1082 	cpu_heap->info.used = 0;
1083 	cpu_heap->info.free = cpu_heap->size;
1084 
1085 }
1086 
1087 #if CONFIG_TRACE
heap_trace(struct mm_heap * heap,int size)1088 void heap_trace(struct mm_heap *heap, int size)
1089 {
1090 	struct block_map *current_map;
1091 	int i;
1092 	int j;
1093 
1094 	for (i = 0; i < size; i++) {
1095 		tr_info(&mem_tr, " heap: 0x%x size %d blocks %d caps 0x%x",
1096 			heap->heap, heap->size, heap->blocks,
1097 			heap->caps);
1098 		tr_info(&mem_tr, "  used %d free %d", heap->info.used,
1099 			heap->info.free);
1100 
1101 		/* map[j]'s base is calculated based on map[j-1] */
1102 		for (j = 0; j < heap->blocks; j++) {
1103 			current_map = &heap->map[j];
1104 
1105 			tr_info(&mem_tr, "  block %d base 0x%x size %d",
1106 				j, current_map->base,
1107 				current_map->block_size);
1108 			tr_info(&mem_tr, "   count %d free %d",
1109 				current_map->count,
1110 				current_map->free_count);
1111 		}
1112 
1113 		heap++;
1114 	}
1115 }
1116 
heap_trace_all(int force)1117 void heap_trace_all(int force)
1118 {
1119 	struct mm *memmap = memmap_get();
1120 
1121 	/* has heap changed since last shown */
1122 	if (memmap->heap_trace_updated || force) {
1123 		tr_info(&mem_tr, "heap: system status");
1124 		heap_trace(memmap->system, PLATFORM_HEAP_SYSTEM);
1125 		tr_info(&mem_tr, "heap: system runtime status");
1126 		heap_trace(memmap->system_runtime, PLATFORM_HEAP_SYSTEM_RUNTIME);
1127 		tr_info(&mem_tr, "heap: buffer status");
1128 		heap_trace(memmap->buffer, PLATFORM_HEAP_BUFFER);
1129 		tr_info(&mem_tr, "heap: runtime status");
1130 		heap_trace(memmap->runtime, PLATFORM_HEAP_RUNTIME);
1131 #if CONFIG_CORE_COUNT > 1
1132 		tr_info(&mem_tr, "heap: runtime shared status");
1133 		heap_trace(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED);
1134 		tr_info(&mem_tr, "heap: system shared status");
1135 		heap_trace(memmap->system_shared, PLATFORM_HEAP_SYSTEM_SHARED);
1136 #endif
1137 	}
1138 
1139 	memmap->heap_trace_updated = 0;
1140 
1141 }
1142 #else
heap_trace_all(int force)1143 void heap_trace_all(int force) { }
heap_trace(struct mm_heap * heap,int size)1144 void heap_trace(struct mm_heap *heap, int size) { }
1145 #endif
1146 
1147 /* initialise map */
init_heap(struct sof * sof)1148 void init_heap(struct sof *sof)
1149 {
1150 	struct mm *memmap = sof->memory_map;
1151 
1152 #if !CONFIG_LIBRARY
1153 	extern uintptr_t _system_heap_start;
1154 
1155 	/* sanity check for malformed images or loader issues */
1156 	if (memmap->system[0].heap != (uintptr_t)&_system_heap_start)
1157 		panic(SOF_IPC_PANIC_MEM);
1158 #endif
1159 
1160 	init_heap_map(memmap->system_runtime, PLATFORM_HEAP_SYSTEM_RUNTIME);
1161 
1162 	init_heap_map(memmap->runtime, PLATFORM_HEAP_RUNTIME);
1163 
1164 #if CONFIG_CORE_COUNT > 1
1165 	init_heap_map(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED);
1166 #endif
1167 
1168 	init_heap_map(memmap->buffer, PLATFORM_HEAP_BUFFER);
1169 
1170 #if CONFIG_DEBUG_BLOCK_FREE
1171 	write_pattern((struct mm_heap *)&memmap->buffer, PLATFORM_HEAP_BUFFER,
1172 		      DEBUG_BLOCK_FREE_VALUE_8BIT);
1173 	write_pattern((struct mm_heap *)&memmap->runtime, PLATFORM_HEAP_RUNTIME,
1174 		      DEBUG_BLOCK_FREE_VALUE_8BIT);
1175 #endif
1176 
1177 	spinlock_init(&memmap->lock);
1178 
1179 }
1180 
1181 #if CONFIG_DEBUG_MEMORY_USAGE_SCAN
heap_info(enum mem_zone zone,int index,struct mm_info * out)1182 int heap_info(enum mem_zone zone, int index, struct mm_info *out)
1183 {
1184 	struct mm *memmap = memmap_get();
1185 	struct mm_heap *heap;
1186 
1187 	if (!out)
1188 		goto error;
1189 
1190 	switch (zone) {
1191 	case SOF_MEM_ZONE_SYS:
1192 		if (index >= PLATFORM_HEAP_SYSTEM)
1193 			goto error;
1194 		heap = memmap->system + index;
1195 		break;
1196 	case SOF_MEM_ZONE_SYS_RUNTIME:
1197 		if (index >= PLATFORM_HEAP_SYSTEM_RUNTIME)
1198 			goto error;
1199 		heap = memmap->system_runtime + index;
1200 		break;
1201 	case SOF_MEM_ZONE_RUNTIME:
1202 		if (index >= PLATFORM_HEAP_RUNTIME)
1203 			goto error;
1204 		heap = memmap->runtime + index;
1205 		break;
1206 	case SOF_MEM_ZONE_BUFFER:
1207 		if (index >= PLATFORM_HEAP_BUFFER)
1208 			goto error;
1209 		heap = memmap->buffer + index;
1210 		break;
1211 #if CONFIG_CORE_COUNT > 1
1212 	case SOF_MEM_ZONE_SYS_SHARED:
1213 		if (index >= PLATFORM_HEAP_SYSTEM_SHARED)
1214 			goto error;
1215 		heap = memmap->system_shared + index;
1216 		break;
1217 	case SOF_MEM_ZONE_RUNTIME_SHARED:
1218 		if (index >= PLATFORM_HEAP_RUNTIME_SHARED)
1219 			goto error;
1220 		heap = memmap->runtime_shared + index;
1221 		break;
1222 #endif
1223 	default:
1224 		goto error;
1225 	}
1226 
1227 	spin_lock(&memmap->lock);
1228 	*out = heap->info;
1229 	spin_unlock(&memmap->lock);
1230 	return 0;
1231 error:
1232 	tr_err(&mem_tr, "heap_info(): failed for zone 0x%x index %d out ptr 0x%x", zone, index,
1233 	       (uint32_t)out);
1234 	return -EINVAL;
1235 }
1236 #endif
1237