1 /*
2  * Copyright (c) 2015, Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <arch/cpu.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <malloc.h>
11 #include <sys/__assert.h>
12 #include <sys/stat.h>
13 #include <linker/linker-defs.h>
14 #include <sys/util.h>
15 #include <sys/errno_private.h>
16 #include <sys/libc-hooks.h>
17 #include <syscall_handler.h>
18 #include <app_memory/app_memdomain.h>
19 #include <init.h>
20 #include <sys/sem.h>
21 #include <sys/mutex.h>
22 #include <sys/mem_manage.h>
23 #include <sys/time.h>
24 
25 #define LIBC_BSS	K_APP_BMEM(z_libc_partition)
26 #define LIBC_DATA	K_APP_DMEM(z_libc_partition)
27 
28 /*
29  * End result of this thorny set of ifdefs is to define:
30  *
31  * - HEAP_BASE base address of the heap arena
32  * - MAX_HEAP_SIZE size of the heap arena
33  */
34 
35 #ifdef CONFIG_MMU
36 	#ifdef CONFIG_USERSPACE
37 		struct k_mem_partition z_malloc_partition;
38 	#endif
39 
40 	LIBC_BSS static unsigned char *heap_base;
41 	LIBC_BSS static size_t max_heap_size;
42 
43 	#define HEAP_BASE		heap_base
44 	#define MAX_HEAP_SIZE		max_heap_size
45 	#define USE_MALLOC_PREPARE	1
46 #elif CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
47 	/* Arena size expressed in Kconfig, due to power-of-two size/align
48 	 * requirements of certain MPUs.
49 	 *
50 	 * We use an automatic memory partition instead of setting this up
51 	 * in malloc_prepare().
52 	 */
53 	K_APPMEM_PARTITION_DEFINE(z_malloc_partition);
54 	#define MALLOC_BSS	K_APP_BMEM(z_malloc_partition)
55 
56 	/* Compiler will throw an error if the provided value isn't a
57 	 * power of two
58 	 */
59 	MALLOC_BSS static unsigned char
__aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)60 		__aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)
61 		heap_base[CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE];
62 	#define MAX_HEAP_SIZE CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
63 	#define HEAP_BASE heap_base
64 #else /* Not MMU or CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE */
65 	#define USED_RAM_END_ADDR   POINTER_TO_UINT(&_end)
66 
67 	#ifdef Z_MALLOC_PARTITION_EXISTS
68 		/* Start of malloc arena needs to be aligned per MPU
69 		 * requirements
70 		 */
71 		struct k_mem_partition z_malloc_partition;
72 
73 		#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
74 			#define HEAP_BASE	ROUND_UP(USED_RAM_END_ADDR, \
75 				 CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
76 		#elif defined(CONFIG_ARC)
77 			#define HEAP_BASE	ROUND_UP(USED_RAM_END_ADDR, \
78 							  Z_ARC_MPU_ALIGN)
79 		#else
80 			#error "Unsupported platform"
81 		#endif /* CONFIG_<arch> */
82 		#define USE_MALLOC_PREPARE	1
83 	#else
84 		/* End of kernel image */
85 		#define HEAP_BASE		USED_RAM_END_ADDR
86 	#endif
87 
88 	/* End of the malloc arena is the end of physical memory */
89 	#if defined(CONFIG_XTENSA)
90 		/* TODO: Why is xtensa a special case? */
91 		extern void *_heap_sentry;
92 		#define MAX_HEAP_SIZE	(POINTER_TO_UINT(&_heap_sentry) - \
93 					 HEAP_BASE)
94 	#else
95 		#define MAX_HEAP_SIZE	(KB(CONFIG_SRAM_SIZE) - (HEAP_BASE - \
96 					 CONFIG_SRAM_BASE_ADDRESS))
97 	#endif /* CONFIG_XTENSA */
98 #endif
99 
100 static int malloc_prepare(const struct device *unused)
101 {
102 	ARG_UNUSED(unused);
103 
104 #ifdef USE_MALLOC_PREPARE
105 #ifdef CONFIG_MMU
106 	max_heap_size = MIN(CONFIG_NEWLIB_LIBC_MAX_MAPPED_REGION_SIZE,
107 			    k_mem_free_get());
108 
109 	if (max_heap_size != 0) {
110 		heap_base = k_mem_map(max_heap_size, K_MEM_PERM_RW);
111 		__ASSERT(heap_base != NULL,
112 			 "failed to allocate heap of size %zu", max_heap_size);
113 
114 	}
115 #endif /* CONFIG_MMU */
116 
117 #ifdef Z_MALLOC_PARTITION_EXISTS
118 	z_malloc_partition.start = (uintptr_t)HEAP_BASE;
119 	z_malloc_partition.size = (size_t)MAX_HEAP_SIZE;
120 	z_malloc_partition.attr = K_MEM_PARTITION_P_RW_U_RW;
121 #endif /* Z_MALLOC_PARTITION_EXISTS */
122 #endif /* USE_MALLOC_PREPARE */
123 
124 	/*
125 	 * Validate that the memory space available for the newlib heap is
126 	 * greater than the minimum required size.
127 	 */
128 	__ASSERT(MAX_HEAP_SIZE >= CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE,
129 		 "memory space available for newlib heap is less than the "
130 		 "minimum required size specified by "
131 		 "CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE");
132 
133 	return 0;
134 }
135 
136 SYS_INIT(malloc_prepare, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
137 
138 /* Current offset from HEAP_BASE of unused memory */
139 LIBC_BSS static size_t heap_sz;
140 
_stdout_hook_default(int c)141 static int _stdout_hook_default(int c)
142 {
143 	(void)(c);  /* Prevent warning about unused argument */
144 
145 	return EOF;
146 }
147 
148 static int (*_stdout_hook)(int) = _stdout_hook_default;
149 
__stdout_hook_install(int (* hook)(int))150 void __stdout_hook_install(int (*hook)(int))
151 {
152 	_stdout_hook = hook;
153 }
154 
_stdin_hook_default(void)155 static unsigned char _stdin_hook_default(void)
156 {
157 	return 0;
158 }
159 
160 static unsigned char (*_stdin_hook)(void) = _stdin_hook_default;
161 
__stdin_hook_install(unsigned char (* hook)(void))162 void __stdin_hook_install(unsigned char (*hook)(void))
163 {
164 	_stdin_hook = hook;
165 }
166 
z_impl_zephyr_read_stdin(char * buf,int nbytes)167 int z_impl_zephyr_read_stdin(char *buf, int nbytes)
168 {
169 	int i = 0;
170 
171 	for (i = 0; i < nbytes; i++) {
172 		*(buf + i) = _stdin_hook();
173 		if ((*(buf + i) == '\n') || (*(buf + i) == '\r')) {
174 			i++;
175 			break;
176 		}
177 	}
178 	return i;
179 }
180 
181 #ifdef CONFIG_USERSPACE
z_vrfy_zephyr_read_stdin(char * buf,int nbytes)182 static inline int z_vrfy_zephyr_read_stdin(char *buf, int nbytes)
183 {
184 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(buf, nbytes));
185 	return z_impl_zephyr_read_stdin((char *)buf, nbytes);
186 }
187 #include <syscalls/zephyr_read_stdin_mrsh.c>
188 #endif
189 
z_impl_zephyr_write_stdout(const void * buffer,int nbytes)190 int z_impl_zephyr_write_stdout(const void *buffer, int nbytes)
191 {
192 	const char *buf = buffer;
193 	int i;
194 
195 	for (i = 0; i < nbytes; i++) {
196 		if (*(buf + i) == '\n') {
197 			_stdout_hook('\r');
198 		}
199 		_stdout_hook(*(buf + i));
200 	}
201 	return nbytes;
202 }
203 
204 #ifdef CONFIG_USERSPACE
z_vrfy_zephyr_write_stdout(const void * buf,int nbytes)205 static inline int z_vrfy_zephyr_write_stdout(const void *buf, int nbytes)
206 {
207 	Z_OOPS(Z_SYSCALL_MEMORY_READ(buf, nbytes));
208 	return z_impl_zephyr_write_stdout((const void *)buf, nbytes);
209 }
210 #include <syscalls/zephyr_write_stdout_mrsh.c>
211 #endif
212 
213 #ifndef CONFIG_POSIX_API
_read(int fd,char * buf,int nbytes)214 int _read(int fd, char *buf, int nbytes)
215 {
216 	ARG_UNUSED(fd);
217 
218 	return zephyr_read_stdin(buf, nbytes);
219 }
220 __weak FUNC_ALIAS(_read, read, int);
221 
_write(int fd,const void * buf,int nbytes)222 int _write(int fd, const void *buf, int nbytes)
223 {
224 	ARG_UNUSED(fd);
225 
226 	return zephyr_write_stdout(buf, nbytes);
227 }
228 __weak FUNC_ALIAS(_write, write, int);
229 
_open(const char * name,int mode)230 int _open(const char *name, int mode)
231 {
232 	return -1;
233 }
234 __weak FUNC_ALIAS(_open, open, int);
235 
_close(int file)236 int _close(int file)
237 {
238 	return -1;
239 }
240 __weak FUNC_ALIAS(_close, close, int);
241 
_lseek(int file,int ptr,int dir)242 int _lseek(int file, int ptr, int dir)
243 {
244 	return 0;
245 }
246 __weak FUNC_ALIAS(_lseek, lseek, int);
247 #else
248 extern ssize_t write(int file, const char *buffer, size_t count);
249 #define _write	write
250 #endif
251 
_isatty(int file)252 int _isatty(int file)
253 {
254 	return file <= 2;
255 }
256 __weak FUNC_ALIAS(_isatty, isatty, int);
257 
_kill(int i,int j)258 int _kill(int i, int j)
259 {
260 	return 0;
261 }
262 __weak FUNC_ALIAS(_kill, kill, int);
263 
_getpid(void)264 int _getpid(void)
265 {
266 	return 0;
267 }
268 __weak FUNC_ALIAS(_getpid, getpid, int);
269 
_fstat(int file,struct stat * st)270 int _fstat(int file, struct stat *st)
271 {
272 	st->st_mode = S_IFCHR;
273 	return 0;
274 }
275 __weak FUNC_ALIAS(_fstat, fstat, int);
276 
_exit(int status)277 __weak void _exit(int status)
278 {
279 	_write(1, "exit\n", 5);
280 	while (1) {
281 		;
282 	}
283 }
284 
_sbrk(intptr_t count)285 void *_sbrk(intptr_t count)
286 {
287 	void *ret, *ptr;
288 
289 	ptr = ((char *)HEAP_BASE) + heap_sz;
290 
291 	if ((heap_sz + count) < MAX_HEAP_SIZE) {
292 		heap_sz += count;
293 		ret = ptr;
294 	} else {
295 		ret = (void *)-1;
296 	}
297 
298 	return ret;
299 }
300 __weak FUNC_ALIAS(_sbrk, sbrk, void *);
301 
302 #ifdef CONFIG_MULTITHREADING
303 /*
304  * Newlib Retargetable Locking Interface Implementation
305  *
306  * When multithreading is enabled, the newlib retargetable locking interface is
307  * defined below to override the default void implementation and provide the
308  * Zephyr-side locks.
309  *
310  * NOTE: `k_mutex` and `k_sem` are used instead of `sys_mutex` and `sys_sem`
311  *	 because the latter do not support dynamic allocation for now.
312  */
313 
314 /* Static locks */
315 K_MUTEX_DEFINE(__lock___sinit_recursive_mutex);
316 K_MUTEX_DEFINE(__lock___sfp_recursive_mutex);
317 K_MUTEX_DEFINE(__lock___atexit_recursive_mutex);
318 K_MUTEX_DEFINE(__lock___malloc_recursive_mutex);
319 K_MUTEX_DEFINE(__lock___env_recursive_mutex);
320 K_SEM_DEFINE(__lock___at_quick_exit_mutex, 1, 1);
321 K_SEM_DEFINE(__lock___tz_mutex, 1, 1);
322 K_SEM_DEFINE(__lock___dd_hash_mutex, 1, 1);
323 K_SEM_DEFINE(__lock___arc4random_mutex, 1, 1);
324 
325 #ifdef CONFIG_USERSPACE
326 /* Grant public access to all static locks after boot */
newlib_locks_prepare(const struct device * unused)327 static int newlib_locks_prepare(const struct device *unused)
328 {
329 	ARG_UNUSED(unused);
330 
331 	/* Initialise recursive locks */
332 	k_object_access_all_grant(&__lock___sinit_recursive_mutex);
333 	k_object_access_all_grant(&__lock___sfp_recursive_mutex);
334 	k_object_access_all_grant(&__lock___atexit_recursive_mutex);
335 	k_object_access_all_grant(&__lock___malloc_recursive_mutex);
336 	k_object_access_all_grant(&__lock___env_recursive_mutex);
337 
338 	/* Initialise non-recursive locks */
339 	k_object_access_all_grant(&__lock___at_quick_exit_mutex);
340 	k_object_access_all_grant(&__lock___tz_mutex);
341 	k_object_access_all_grant(&__lock___dd_hash_mutex);
342 	k_object_access_all_grant(&__lock___arc4random_mutex);
343 
344 	return 0;
345 }
346 
347 SYS_INIT(newlib_locks_prepare, POST_KERNEL,
348 	 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
349 #endif /* CONFIG_USERSPACE */
350 
351 /* Create a new dynamic non-recursive lock */
__retarget_lock_init(_LOCK_T * lock)352 void __retarget_lock_init(_LOCK_T *lock)
353 {
354 	__ASSERT_NO_MSG(lock != NULL);
355 
356 	/* Allocate semaphore object */
357 #ifndef CONFIG_USERSPACE
358 	*lock = malloc(sizeof(struct k_sem));
359 #else
360 	*lock = k_object_alloc(K_OBJ_SEM);
361 #endif /* !CONFIG_USERSPACE */
362 	__ASSERT(*lock != NULL, "non-recursive lock allocation failed");
363 
364 	k_sem_init((struct k_sem *)*lock, 1, 1);
365 }
366 
367 /* Create a new dynamic recursive lock */
__retarget_lock_init_recursive(_LOCK_T * lock)368 void __retarget_lock_init_recursive(_LOCK_T *lock)
369 {
370 	__ASSERT_NO_MSG(lock != NULL);
371 
372 	/* Allocate mutex object */
373 #ifndef CONFIG_USERSPACE
374 	*lock = malloc(sizeof(struct k_mutex));
375 #else
376 	*lock = k_object_alloc(K_OBJ_MUTEX);
377 #endif /* !CONFIG_USERSPACE */
378 	__ASSERT(*lock != NULL, "recursive lock allocation failed");
379 
380 	k_mutex_init((struct k_mutex *)*lock);
381 }
382 
383 /* Close dynamic non-recursive lock */
__retarget_lock_close(_LOCK_T lock)384 void __retarget_lock_close(_LOCK_T lock)
385 {
386 	__ASSERT_NO_MSG(lock != NULL);
387 #ifndef CONFIG_USERSPACE
388 	free(lock);
389 #else
390 	k_object_release(lock);
391 #endif /* !CONFIG_USERSPACE */
392 }
393 
394 /* Close dynamic recursive lock */
__retarget_lock_close_recursive(_LOCK_T lock)395 void __retarget_lock_close_recursive(_LOCK_T lock)
396 {
397 	__ASSERT_NO_MSG(lock != NULL);
398 #ifndef CONFIG_USERSPACE
399 	free(lock);
400 #else
401 	k_object_release(lock);
402 #endif /* !CONFIG_USERSPACE */
403 }
404 
405 /* Acquiure non-recursive lock */
__retarget_lock_acquire(_LOCK_T lock)406 void __retarget_lock_acquire(_LOCK_T lock)
407 {
408 	__ASSERT_NO_MSG(lock != NULL);
409 	k_sem_take((struct k_sem *)lock, K_FOREVER);
410 }
411 
412 /* Acquiure recursive lock */
__retarget_lock_acquire_recursive(_LOCK_T lock)413 void __retarget_lock_acquire_recursive(_LOCK_T lock)
414 {
415 	__ASSERT_NO_MSG(lock != NULL);
416 	k_mutex_lock((struct k_mutex *)lock, K_FOREVER);
417 }
418 
419 /* Try acquiring non-recursive lock */
__retarget_lock_try_acquire(_LOCK_T lock)420 int __retarget_lock_try_acquire(_LOCK_T lock)
421 {
422 	__ASSERT_NO_MSG(lock != NULL);
423 	return !k_sem_take((struct k_sem *)lock, K_NO_WAIT);
424 }
425 
426 /* Try acquiring recursive lock */
__retarget_lock_try_acquire_recursive(_LOCK_T lock)427 int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
428 {
429 	__ASSERT_NO_MSG(lock != NULL);
430 	return !k_mutex_lock((struct k_mutex *)lock, K_NO_WAIT);
431 }
432 
433 /* Release non-recursive lock */
__retarget_lock_release(_LOCK_T lock)434 void __retarget_lock_release(_LOCK_T lock)
435 {
436 	__ASSERT_NO_MSG(lock != NULL);
437 	k_sem_give((struct k_sem *)lock);
438 }
439 
440 /* Release recursive lock */
__retarget_lock_release_recursive(_LOCK_T lock)441 void __retarget_lock_release_recursive(_LOCK_T lock)
442 {
443 	__ASSERT_NO_MSG(lock != NULL);
444 	k_mutex_unlock((struct k_mutex *)lock);
445 }
446 #endif /* CONFIG_MULTITHREADING */
447 
__errno(void)448 __weak int *__errno(void)
449 {
450 	return z_errno();
451 }
452 
453 /* This function gets called if static buffer overflow detection is enabled
454  * on stdlib side (Newlib here), in case such an overflow is detected. Newlib
455  * provides an implementation not suitable for us, so we override it here.
456  */
__chk_fail(void)457 __weak FUNC_NORETURN void __chk_fail(void)
458 {
459 	static const char chk_fail_msg[] = "* buffer overflow detected *\n";
460 	_write(2, chk_fail_msg, sizeof(chk_fail_msg) - 1);
461 	k_oops();
462 	CODE_UNREACHABLE;
463 }
464 
465 #if CONFIG_XTENSA
466 extern int _read(int fd, char *buf, int nbytes);
467 extern int _open(const char *name, int mode);
468 extern int _close(int file);
469 extern int _lseek(int file, int ptr, int dir);
470 
471 /* The Newlib in xtensa toolchain has a few missing functions for the
472  * reentrant versions of the syscalls.
473  */
_read_r(struct _reent * r,int fd,void * buf,size_t nbytes)474 _ssize_t _read_r(struct _reent *r, int fd, void *buf, size_t nbytes)
475 {
476 	ARG_UNUSED(r);
477 
478 	return _read(fd, (char *)buf, nbytes);
479 }
480 
_write_r(struct _reent * r,int fd,const void * buf,size_t nbytes)481 _ssize_t _write_r(struct _reent *r, int fd, const void *buf, size_t nbytes)
482 {
483 	ARG_UNUSED(r);
484 
485 	return _write(fd, buf, nbytes);
486 }
487 
_open_r(struct _reent * r,const char * name,int flags,int mode)488 int _open_r(struct _reent *r, const char *name, int flags, int mode)
489 {
490 	ARG_UNUSED(r);
491 	ARG_UNUSED(flags);
492 
493 	return _open(name, mode);
494 }
495 
_close_r(struct _reent * r,int file)496 int _close_r(struct _reent *r, int file)
497 {
498 	ARG_UNUSED(r);
499 
500 	return _close(file);
501 }
502 
_lseek_r(struct _reent * r,int file,_off_t ptr,int dir)503 _off_t _lseek_r(struct _reent *r, int file, _off_t ptr, int dir)
504 {
505 	ARG_UNUSED(r);
506 
507 	return _lseek(file, ptr, dir);
508 }
509 
_isatty_r(struct _reent * r,int file)510 int _isatty_r(struct _reent *r, int file)
511 {
512 	ARG_UNUSED(r);
513 
514 	return _isatty(file);
515 }
516 
_kill_r(struct _reent * r,int i,int j)517 int _kill_r(struct _reent *r, int i, int j)
518 {
519 	ARG_UNUSED(r);
520 
521 	return _kill(i, j);
522 }
523 
_getpid_r(struct _reent * r)524 int _getpid_r(struct _reent *r)
525 {
526 	ARG_UNUSED(r);
527 
528 	return _getpid();
529 }
530 
_fstat_r(struct _reent * r,int file,struct stat * st)531 int _fstat_r(struct _reent *r, int file, struct stat *st)
532 {
533 	ARG_UNUSED(r);
534 
535 	return _fstat(file, st);
536 }
537 
_exit_r(struct _reent * r,int status)538 void _exit_r(struct _reent *r, int status)
539 {
540 	ARG_UNUSED(r);
541 
542 	_exit(status);
543 }
544 
_sbrk_r(struct _reent * r,int count)545 void *_sbrk_r(struct _reent *r, int count)
546 {
547 	ARG_UNUSED(r);
548 
549 	return _sbrk(count);
550 }
551 #endif /* CONFIG_XTENSA */
552 
_gettimeofday(struct timeval * __tp,void * __tzp)553 int _gettimeofday(struct timeval *__tp, void *__tzp)
554 {
555 #ifdef CONFIG_POSIX_API
556 	return gettimeofday(__tp, __tzp);
557 #else
558 	/* Non-posix systems should not call gettimeofday() here as it will
559 	 * result in a recursive call loop and result in a stack overflow.
560 	 */
561 	return -1;
562 #endif
563 }
564