1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdlib.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/init.h>
10 #include <errno.h>
11 #include <zephyr/sys/math_extras.h>
12 #include <string.h>
13 #include <zephyr/app_memory/app_memdomain.h>
14 #ifdef CONFIG_MULTITHREADING
15 #include <zephyr/sys/mutex.h>
16 #endif
17 #include <zephyr/sys/sys_heap.h>
18 #include <zephyr/sys/libc-hooks.h>
19 #include <zephyr/types.h>
20 #ifdef CONFIG_MMU
21 #include <zephyr/kernel/mm.h>
22 #endif
23 
24 #define LOG_LEVEL CONFIG_KERNEL_LOG_LEVEL
25 #include <zephyr/logging/log.h>
26 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
27 
28 #ifdef CONFIG_COMMON_LIBC_MALLOC
29 
30 #if (CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE != 0)
31 
32 /* Figure out where the malloc variables live */
33 # if Z_MALLOC_PARTITION_EXISTS
34 K_APPMEM_PARTITION_DEFINE(z_malloc_partition);
35 #  define POOL_SECTION Z_GENERIC_SECTION(K_APP_DMEM_SECTION(z_malloc_partition))
36 # else
37 #  define POOL_SECTION __noinit
38 # endif /* CONFIG_USERSPACE */
39 
40 # if defined(CONFIG_MMU) && CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE < 0
41 #  define ALLOCATE_HEAP_AT_STARTUP
42 # endif
43 
44 # ifndef ALLOCATE_HEAP_AT_STARTUP
45 
46 /* Figure out alignment requirement */
47 #  ifdef Z_MALLOC_PARTITION_EXISTS
48 
49 #   ifdef CONFIG_MMU
50 #    define HEAP_ALIGN CONFIG_MMU_PAGE_SIZE
51 #   elif defined(CONFIG_MPU)
52 #    if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) && \
53 	(CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE > 0)
54 #     if (CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE & (CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE - 1)) != 0
55 #      error CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE must be power of two on this target
56 #     endif
57 #     define HEAP_ALIGN	CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE
58 #    elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
59 #     define HEAP_ALIGN	MAX(sizeof(double), CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
60 #    elif defined(CONFIG_ARC)
61 #     define HEAP_ALIGN	MAX(sizeof(double), Z_ARC_MPU_ALIGN)
62 #    elif defined(CONFIG_RISCV)
63 #     define HEAP_ALIGN	Z_POW2_CEIL(MAX(sizeof(double), Z_RISCV_STACK_GUARD_SIZE))
64 #    else
65 /* Default to 64-bytes; we'll get a run-time error if this doesn't work. */
66 #     define HEAP_ALIGN	64
67 #    endif /* CONFIG_<arch> */
68 #   endif /* elif CONFIG_MPU */
69 
70 #  endif /* else Z_MALLOC_PARTITION_EXISTS */
71 
72 #  ifndef HEAP_ALIGN
73 #   define HEAP_ALIGN	sizeof(double)
74 #  endif
75 
76 #  if CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE > 0
77 
78 #  define HEAP_STATIC
79 
80 /* Static allocation of heap in BSS */
81 
82 #   define HEAP_SIZE	ROUND_UP(CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE, HEAP_ALIGN)
83 #   define HEAP_BASE	POINTER_TO_UINT(malloc_arena)
84 
85 static POOL_SECTION unsigned char __aligned(HEAP_ALIGN) malloc_arena[HEAP_SIZE];
86 
87 #  else /* CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE > 0 */
88 
89 /*
90  * Heap base and size are determined based on the available unused SRAM, in the
91  * interval from a properly aligned address after the linker symbol `_end`, to
92  * the end of SRAM
93  */
94 
95 #   define USED_RAM_END_ADDR   POINTER_TO_UINT(&_end)
96 
97 /*
98  * No partition, heap can just start wherever _end is, with
99  * suitable alignment
100  */
101 
102 #   define HEAP_BASE	ROUND_UP(USED_RAM_END_ADDR, HEAP_ALIGN)
103 
104 #   if defined(CONFIG_XTENSA) && (defined(CONFIG_SOC_FAMILY_INTEL_ADSP) \
105 	|| defined(CONFIG_HAS_ESPRESSIF_HAL))
106 extern char _heap_sentry[];
107 #    define HEAP_SIZE  ROUND_DOWN((POINTER_TO_UINT(_heap_sentry) - HEAP_BASE), HEAP_ALIGN)
108 #   else
109 #    define HEAP_SIZE	ROUND_DOWN((KB((size_t) CONFIG_SRAM_SIZE) -	\
110 		((size_t) HEAP_BASE - (size_t) CONFIG_SRAM_BASE_ADDRESS)), HEAP_ALIGN)
111 #   endif /* else CONFIG_XTENSA */
112 
113 #  endif /* else CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE > 0 */
114 
115 # endif /* else ALLOCATE_HEAP_AT_STARTUP */
116 
117 Z_LIBC_DATA static struct sys_heap z_malloc_heap;
118 
119 #ifdef CONFIG_MULTITHREADING
120 Z_LIBC_DATA SYS_MUTEX_DEFINE(z_malloc_heap_mutex);
121 
122 static inline void
malloc_lock(void)123 malloc_lock(void) {
124 	int lock_ret;
125 
126 	lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
127 	__ASSERT_NO_MSG(lock_ret == 0);
128 }
129 
130 static inline void
malloc_unlock(void)131 malloc_unlock(void)
132 {
133 	(void) sys_mutex_unlock(&z_malloc_heap_mutex);
134 }
135 #else
136 #define malloc_lock()
137 #define malloc_unlock()
138 #endif
139 
malloc(size_t size)140 void *malloc(size_t size)
141 {
142 	malloc_lock();
143 
144 	void *ret = sys_heap_aligned_alloc(&z_malloc_heap,
145 					   __alignof__(z_max_align_t),
146 					   size);
147 	if (ret == NULL && size != 0) {
148 		errno = ENOMEM;
149 	}
150 
151 	malloc_unlock();
152 
153 	return ret;
154 }
155 
aligned_alloc(size_t alignment,size_t size)156 void *aligned_alloc(size_t alignment, size_t size)
157 {
158 	malloc_lock();
159 
160 	void *ret = sys_heap_aligned_alloc(&z_malloc_heap,
161 					   alignment,
162 					   size);
163 	if (ret == NULL && size != 0) {
164 		errno = ENOMEM;
165 	}
166 
167 	malloc_unlock();
168 
169 	return ret;
170 }
171 
172 #ifdef CONFIG_GLIBCXX_LIBCPP
173 
174 /*
175  * GCC's libstdc++ may use this function instead of aligned_alloc due to a
176  * bug in the configuration for "newlib" environments (which includes picolibc).
177  * When toolchains including that bug fix can become a dependency for Zephyr,
178  * this work-around can be removed.
179  *
180  * Note that aligned_alloc isn't defined to work as a replacement for
181  * memalign as it requires that the size be a multiple of the alignment,
182  * while memalign does not. However, the aligned_alloc implementation here
183  * is just a wrapper around sys_heap_aligned_alloc which doesn't have that
184  * requirement and so can be used by memalign.
185  */
186 
memalign(size_t alignment,size_t size)187 void *memalign(size_t alignment, size_t size)
188 {
189 	return aligned_alloc(alignment, size);
190 }
191 #endif
192 
malloc_prepare(void)193 static int malloc_prepare(void)
194 {
195 	void *heap_base = NULL;
196 	size_t heap_size;
197 
198 #ifdef ALLOCATE_HEAP_AT_STARTUP
199 	heap_size = k_mem_free_get();
200 
201 	if (heap_size != 0) {
202 		heap_base = k_mem_map(heap_size, K_MEM_PERM_RW);
203 		__ASSERT(heap_base != NULL,
204 			 "failed to allocate heap of size %zu", heap_size);
205 
206 	}
207 #elif defined(Z_MALLOC_PARTITION_EXISTS) && \
208 	defined(CONFIG_MPU) && \
209 	defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
210 
211 	/* Align size to power of two */
212 	heap_size = 1;
213 	while (heap_size * 2 <= HEAP_SIZE) {
214 		heap_size *= 2;
215 	}
216 
217 	/* Search for an aligned heap that fits within the available space */
218 	while (heap_size >= HEAP_ALIGN) {
219 		heap_base = UINT_TO_POINTER(ROUND_UP(HEAP_BASE, heap_size));
220 		if (POINTER_TO_UINT(heap_base) + heap_size <= HEAP_BASE + HEAP_SIZE) {
221 			break;
222 		}
223 		heap_size >>= 1;
224 	}
225 #else
226 	heap_base = UINT_TO_POINTER(HEAP_BASE);
227 	heap_size = HEAP_SIZE;
228 #endif
229 
230 #if Z_MALLOC_PARTITION_EXISTS && !defined(HEAP_STATIC)
231 	z_malloc_partition.start = POINTER_TO_UINT(heap_base);
232 	z_malloc_partition.size = heap_size;
233 	z_malloc_partition.attr = K_MEM_PARTITION_P_RW_U_RW;
234 #endif
235 
236 	sys_heap_init(&z_malloc_heap, heap_base, heap_size);
237 
238 	return 0;
239 }
240 
realloc(void * ptr,size_t requested_size)241 void *realloc(void *ptr, size_t requested_size)
242 {
243 	malloc_lock();
244 
245 	void *ret = sys_heap_aligned_realloc(&z_malloc_heap, ptr,
246 					     __alignof__(z_max_align_t),
247 					     requested_size);
248 
249 	if (ret == NULL && requested_size != 0) {
250 		errno = ENOMEM;
251 	}
252 
253 	malloc_unlock();
254 
255 	return ret;
256 }
257 
free(void * ptr)258 void free(void *ptr)
259 {
260 	malloc_lock();
261 	sys_heap_free(&z_malloc_heap, ptr);
262 	malloc_unlock();
263 }
264 
265 SYS_INIT(malloc_prepare, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_LIBC);
266 #else /* No malloc arena */
malloc(size_t size)267 void *malloc(size_t size)
268 {
269 	ARG_UNUSED(size);
270 
271 	LOG_ERR("CONFIG_COMMON_LIBC_MALLOC_ARENA_SIZE is 0");
272 	errno = ENOMEM;
273 
274 	return NULL;
275 }
276 
free(void * ptr)277 void free(void *ptr)
278 {
279 	ARG_UNUSED(ptr);
280 }
281 
realloc(void * ptr,size_t size)282 void *realloc(void *ptr, size_t size)
283 {
284 	ARG_UNUSED(ptr);
285 	return malloc(size);
286 }
287 #endif /* else no malloc arena */
288 
289 #endif /* CONFIG_COMMON_LIBC_MALLOC */
290 
291 #ifdef CONFIG_COMMON_LIBC_CALLOC
calloc(size_t nmemb,size_t size)292 void *calloc(size_t nmemb, size_t size)
293 {
294 	void *ret;
295 
296 	if (size_mul_overflow(nmemb, size, &size)) {
297 		errno = ENOMEM;
298 		return NULL;
299 	}
300 
301 	ret = malloc(size);
302 
303 	if (ret != NULL) {
304 		(void)memset(ret, 0, size);
305 	}
306 
307 	return ret;
308 }
309 #endif /* CONFIG_COMMON_LIBC_CALLOC */
310 
311 #ifdef CONFIG_COMMON_LIBC_REALLOCARRAY
reallocarray(void * ptr,size_t nmemb,size_t size)312 void *reallocarray(void *ptr, size_t nmemb, size_t size)
313 {
314 	if (size_mul_overflow(nmemb, size, &size)) {
315 		errno = ENOMEM;
316 		return NULL;
317 	}
318 	return realloc(ptr, size);
319 }
320 #endif /* CONFIG_COMMON_LIBC_REALLOCARRAY */
321