1 /*
2 * Copyright (c) 2015, Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/arch/cpu.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <malloc.h>
11 #include <zephyr/sys/__assert.h>
12 #include <zephyr/posix/sys/stat.h>
13 #include <zephyr/linker/linker-defs.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/errno_private.h>
16 #include <zephyr/sys/heap_listener.h>
17 #include <zephyr/sys/libc-hooks.h>
18 #include <zephyr/internal/syscall_handler.h>
19 #include <zephyr/app_memory/app_memdomain.h>
20 #include <zephyr/init.h>
21 #include <zephyr/sys/sem.h>
22 #include <zephyr/sys/mutex.h>
23 #include <zephyr/kernel/mm.h>
24 #include <sys/time.h>
25
26 int _fstat(int fd, struct stat *st);
27 int _read(int fd, void *buf, int nbytes);
28 int _write(int fd, const void *buf, int nbytes);
29 int _open(const char *name, int mode);
30 int _close(int file);
31 int _lseek(int file, int ptr, int dir);
32 int _kill(int pid, int sig);
33 int _getpid(void);
34
35 #ifndef CONFIG_NEWLIB_LIBC_CUSTOM_SBRK
36
37 #define LIBC_BSS K_APP_BMEM(z_libc_partition)
38 #define LIBC_DATA K_APP_DMEM(z_libc_partition)
39
40 /*
41 * End result of this thorny set of ifdefs is to define:
42 *
43 * - HEAP_BASE base address of the heap arena
44 * - MAX_HEAP_SIZE size of the heap arena
45 */
46
47 #ifdef CONFIG_MMU
48 #ifdef CONFIG_USERSPACE
49 struct k_mem_partition z_malloc_partition;
50 #endif
51
52 LIBC_BSS static unsigned char *heap_base;
53 LIBC_BSS static size_t max_heap_size;
54
55 #define HEAP_BASE heap_base
56 #define MAX_HEAP_SIZE max_heap_size
57 #define USE_MALLOC_PREPARE 1
58 #elif CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
59 /* Arena size expressed in Kconfig, due to power-of-two size/align
60 * requirements of certain MPUs.
61 *
62 * We use an automatic memory partition instead of setting this up
63 * in malloc_prepare().
64 */
65 K_APPMEM_PARTITION_DEFINE(z_malloc_partition);
66 #define MALLOC_BSS K_APP_BMEM(z_malloc_partition)
67
68 /* Compiler will throw an error if the provided value isn't a
69 * power of two
70 */
71 MALLOC_BSS static unsigned char
__aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)72 __aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)
73 heap_base[CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE];
74 #define MAX_HEAP_SIZE CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
75 #define HEAP_BASE heap_base
76 #else /* Not MMU or CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE */
77 #define USED_RAM_END_ADDR POINTER_TO_UINT(&_end)
78
79 #ifdef Z_MALLOC_PARTITION_EXISTS
80 /* Start of malloc arena needs to be aligned per MPU
81 * requirements
82 */
83 struct k_mem_partition z_malloc_partition;
84
85 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
86 #define HEAP_BASE ROUND_UP(USED_RAM_END_ADDR, \
87 CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
88 #elif defined(CONFIG_ARC)
89 #define HEAP_BASE ROUND_UP(USED_RAM_END_ADDR, \
90 Z_ARC_MPU_ALIGN)
91 #else
92 #error "Unsupported platform"
93 #endif /* CONFIG_<arch> */
94 #define USE_MALLOC_PREPARE 1
95 #else
96 /* End of kernel image */
97 #define HEAP_BASE USED_RAM_END_ADDR
98 #endif
99
100 /* End of the malloc arena is the end of physical memory */
101 #if defined(CONFIG_XTENSA)
102 /* TODO: Why is xtensa a special case? */
103 extern char _heap_sentry[];
104 #define MAX_HEAP_SIZE (POINTER_TO_UINT(&_heap_sentry) - \
105 HEAP_BASE)
106 #else
107 #define MAX_HEAP_SIZE (KB(CONFIG_SRAM_SIZE) - (HEAP_BASE - \
108 CONFIG_SRAM_BASE_ADDRESS))
109 #endif /* CONFIG_XTENSA */
110 #endif
111
112 static int malloc_prepare(void)
113 {
114
115 #ifdef USE_MALLOC_PREPARE
116 #ifdef CONFIG_MMU
117 max_heap_size = MIN(CONFIG_NEWLIB_LIBC_MAX_MAPPED_REGION_SIZE,
118 k_mem_free_get());
119
120 if (max_heap_size != 0) {
121 heap_base = k_mem_map(max_heap_size, K_MEM_PERM_RW);
122 __ASSERT(heap_base != NULL,
123 "failed to allocate heap of size %zu", max_heap_size);
124
125 }
126 #endif /* CONFIG_MMU */
127
128 #ifdef Z_MALLOC_PARTITION_EXISTS
129 z_malloc_partition.start = (uintptr_t)HEAP_BASE;
130 z_malloc_partition.size = (size_t)MAX_HEAP_SIZE;
131 z_malloc_partition.attr = K_MEM_PARTITION_P_RW_U_RW;
132 #endif /* Z_MALLOC_PARTITION_EXISTS */
133 #endif /* USE_MALLOC_PREPARE */
134
135 /*
136 * Validate that the memory space available for the newlib heap is
137 * greater than the minimum required size.
138 */
139 __ASSERT(MAX_HEAP_SIZE >= CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE,
140 "memory space available for newlib heap is less than the "
141 "minimum required size specified by "
142 "CONFIG_NEWLIB_LIBC_MIN_REQUIRED_HEAP_SIZE");
143
144 return 0;
145 }
146
147 SYS_INIT(malloc_prepare, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_LIBC);
148
149 /* Current offset from HEAP_BASE of unused memory */
150 LIBC_BSS static size_t heap_sz;
151 #endif /* CONFIG_NEWLIB_LIBC_CUSTOM_SBRK */
152
_stdout_hook_default(int c)153 static int _stdout_hook_default(int c)
154 {
155 (void)(c); /* Prevent warning about unused argument */
156
157 return EOF;
158 }
159
160 static int (*_stdout_hook)(int) = _stdout_hook_default;
161
__stdout_hook_install(int (* hook)(int))162 void __stdout_hook_install(int (*hook)(int))
163 {
164 _stdout_hook = hook;
165 }
166
_stdin_hook_default(void)167 static unsigned char _stdin_hook_default(void)
168 {
169 return 0;
170 }
171
172 static unsigned char (*_stdin_hook)(void) = _stdin_hook_default;
173
__stdin_hook_install(unsigned char (* hook)(void))174 void __stdin_hook_install(unsigned char (*hook)(void))
175 {
176 _stdin_hook = hook;
177 }
178
z_impl_zephyr_read_stdin(char * buf,int nbytes)179 int z_impl_zephyr_read_stdin(char *buf, int nbytes)
180 {
181 int i = 0;
182
183 for (i = 0; i < nbytes; i++) {
184 *(buf + i) = _stdin_hook();
185 if ((*(buf + i) == '\n') || (*(buf + i) == '\r')) {
186 i++;
187 break;
188 }
189 }
190 return i;
191 }
192
193 #ifdef CONFIG_USERSPACE
z_vrfy_zephyr_read_stdin(char * buf,int nbytes)194 static inline int z_vrfy_zephyr_read_stdin(char *buf, int nbytes)
195 {
196 K_OOPS(K_SYSCALL_MEMORY_WRITE(buf, nbytes));
197 return z_impl_zephyr_read_stdin((char *)buf, nbytes);
198 }
199 #include <zephyr/syscalls/zephyr_read_stdin_mrsh.c>
200 #endif
201
z_impl_zephyr_write_stdout(const void * buffer,int nbytes)202 int z_impl_zephyr_write_stdout(const void *buffer, int nbytes)
203 {
204 const char *buf = buffer;
205 int i;
206
207 for (i = 0; i < nbytes; i++) {
208 if (*(buf + i) == '\n') {
209 _stdout_hook('\r');
210 }
211 _stdout_hook(*(buf + i));
212 }
213 return nbytes;
214 }
215
216 #ifdef CONFIG_USERSPACE
z_vrfy_zephyr_write_stdout(const void * buf,int nbytes)217 static inline int z_vrfy_zephyr_write_stdout(const void *buf, int nbytes)
218 {
219 K_OOPS(K_SYSCALL_MEMORY_READ(buf, nbytes));
220 return z_impl_zephyr_write_stdout((const void *)buf, nbytes);
221 }
222 #include <zephyr/syscalls/zephyr_write_stdout_mrsh.c>
223 #endif
224
225 #ifndef CONFIG_POSIX_DEVICE_IO
_read(int fd,void * buf,int nbytes)226 int _read(int fd, void *buf, int nbytes)
227 {
228 ARG_UNUSED(fd);
229
230 return zephyr_read_stdin(buf, nbytes);
231 }
232 __weak FUNC_ALIAS(_read, read, int);
233
_write(int fd,const void * buf,int nbytes)234 int _write(int fd, const void *buf, int nbytes)
235 {
236 ARG_UNUSED(fd);
237
238 return zephyr_write_stdout(buf, nbytes);
239 }
240 __weak FUNC_ALIAS(_write, write, int);
241
_open(const char * name,int mode)242 int _open(const char *name, int mode)
243 {
244 return -1;
245 }
246 __weak FUNC_ALIAS(_open, open, int);
247
_close(int file)248 int _close(int file)
249 {
250 return -1;
251 }
252 __weak FUNC_ALIAS(_close, close, int);
253 #endif /* CONFIG_POSIX_DEVICE_IO */
254
255 #ifndef CONFIG_POSIX_FD_MGMT
_lseek(int file,int ptr,int dir)256 int _lseek(int file, int ptr, int dir)
257 {
258 return 0;
259 }
260 __weak FUNC_ALIAS(_lseek, lseek, int);
261 #endif /* CONFIG_POSIX_FD_MGMT */
262
_isatty(int file)263 int _isatty(int file)
264 {
265 return file <= 2;
266 }
267 __weak FUNC_ALIAS(_isatty, isatty, int);
268
269 #ifndef CONFIG_POSIX_SIGNALS
_kill(int i,int j)270 int _kill(int i, int j)
271 {
272 return 0;
273 }
274 __weak FUNC_ALIAS(_kill, kill, int);
275 #endif /* CONFIG_POSIX_SIGNALS */
276
277 #ifndef CONFIG_POSIX_FILE_SYSTEM
_fstat(int file,struct stat * st)278 int _fstat(int file, struct stat *st)
279 {
280 st->st_mode = S_IFCHR;
281 return 0;
282 }
283 __weak FUNC_ALIAS(_fstat, fstat, int);
284 #endif /* CONFIG_POSIX_FILE_SYSTEM */
285
286 #ifndef CONFIG_POSIX_MULTI_PROCESS
_getpid(void)287 int _getpid(void)
288 {
289 return 0;
290 }
291 __weak FUNC_ALIAS(_getpid, getpid, int);
292
293 #endif /* CONFIG_POSIX_MULTI_PROCESS */
294
_exit(int status)295 __weak void _exit(int status)
296 {
297 _write(1, "exit\n", 5);
298 while (1) {
299 ;
300 }
301 }
302
303 #ifndef CONFIG_NEWLIB_LIBC_CUSTOM_SBRK
_sbrk(intptr_t count)304 void *_sbrk(intptr_t count)
305 {
306 void *ret, *ptr;
307
308 ptr = ((char *)HEAP_BASE) + heap_sz;
309
310 if ((heap_sz + count) < MAX_HEAP_SIZE) {
311 heap_sz += count;
312 ret = ptr;
313
314 #ifdef CONFIG_NEWLIB_LIBC_HEAP_LISTENER
315 heap_listener_notify_resize(HEAP_ID_LIBC, ptr, (char *)ptr + count);
316 #endif
317 } else {
318 ret = (void *)-1;
319 }
320
321 return ret;
322 }
323 __weak FUNC_ALIAS(_sbrk, sbrk, void *);
324 #endif /* CONFIG_NEWLIB_LIBC_CUSTOM_SBRK */
325
326 #ifdef CONFIG_MULTITHREADING
327
328 /* Make sure _RETARGETABLE_LOCKING is enabled in toolchain */
329 BUILD_ASSERT(IS_ENABLED(_RETARGETABLE_LOCKING), "Retargetable locking must be enabled");
330
331 /*
332 * Newlib Retargetable Locking Interface Implementation
333 *
334 * When multithreading is enabled, the newlib retargetable locking interface is
335 * defined below to override the default void implementation and provide the
336 * Zephyr-side locks.
337 *
338 * NOTE: `k_mutex` and `k_sem` are used instead of `sys_mutex` and `sys_sem`
339 * because the latter do not support dynamic allocation for now.
340 */
341
342 /* Static locks */
343 K_MUTEX_DEFINE(__lock___sinit_recursive_mutex);
344 K_MUTEX_DEFINE(__lock___sfp_recursive_mutex);
345 K_MUTEX_DEFINE(__lock___atexit_recursive_mutex);
346 K_MUTEX_DEFINE(__lock___malloc_recursive_mutex);
347 K_MUTEX_DEFINE(__lock___env_recursive_mutex);
348 K_SEM_DEFINE(__lock___at_quick_exit_mutex, 1, 1);
349 K_SEM_DEFINE(__lock___tz_mutex, 1, 1);
350 K_SEM_DEFINE(__lock___dd_hash_mutex, 1, 1);
351 K_SEM_DEFINE(__lock___arc4random_mutex, 1, 1);
352
353 #ifdef CONFIG_USERSPACE
354 /* Grant public access to all static locks after boot */
newlib_locks_prepare(void)355 static int newlib_locks_prepare(void)
356 {
357
358 /* Initialise recursive locks */
359 k_object_access_all_grant(&__lock___sinit_recursive_mutex);
360 k_object_access_all_grant(&__lock___sfp_recursive_mutex);
361 k_object_access_all_grant(&__lock___atexit_recursive_mutex);
362 k_object_access_all_grant(&__lock___malloc_recursive_mutex);
363 k_object_access_all_grant(&__lock___env_recursive_mutex);
364
365 /* Initialise non-recursive locks */
366 k_object_access_all_grant(&__lock___at_quick_exit_mutex);
367 k_object_access_all_grant(&__lock___tz_mutex);
368 k_object_access_all_grant(&__lock___dd_hash_mutex);
369 k_object_access_all_grant(&__lock___arc4random_mutex);
370
371 return 0;
372 }
373
374 SYS_INIT(newlib_locks_prepare, POST_KERNEL,
375 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
376 #endif /* CONFIG_USERSPACE */
377
378 /* Create a new dynamic non-recursive lock */
__retarget_lock_init(_LOCK_T * lock)379 void __retarget_lock_init(_LOCK_T *lock)
380 {
381 __ASSERT_NO_MSG(lock != NULL);
382
383 /* Allocate semaphore object */
384 #ifndef CONFIG_USERSPACE
385 *lock = malloc(sizeof(struct k_sem));
386 #else
387 *lock = k_object_alloc(K_OBJ_SEM);
388 #endif /* !CONFIG_USERSPACE */
389 __ASSERT(*lock != NULL, "non-recursive lock allocation failed");
390
391 k_sem_init((struct k_sem *)*lock, 1, 1);
392 #ifdef CONFIG_USERSPACE
393 k_object_access_all_grant(*lock);
394 #endif /* CONFIG_USERSPACE */
395 }
396
397 /* Create a new dynamic recursive lock */
__retarget_lock_init_recursive(_LOCK_T * lock)398 void __retarget_lock_init_recursive(_LOCK_T *lock)
399 {
400 __ASSERT_NO_MSG(lock != NULL);
401
402 /* Allocate mutex object */
403 #ifndef CONFIG_USERSPACE
404 *lock = malloc(sizeof(struct k_mutex));
405 #else
406 *lock = k_object_alloc(K_OBJ_MUTEX);
407 #endif /* !CONFIG_USERSPACE */
408 __ASSERT(*lock != NULL, "recursive lock allocation failed");
409
410 k_mutex_init((struct k_mutex *)*lock);
411 #ifdef CONFIG_USERSPACE
412 k_object_access_all_grant(*lock);
413 #endif /* CONFIG_USERSPACE */
414 }
415
416 /* Close dynamic non-recursive lock */
__retarget_lock_close(_LOCK_T lock)417 void __retarget_lock_close(_LOCK_T lock)
418 {
419 __ASSERT_NO_MSG(lock != NULL);
420 #ifndef CONFIG_USERSPACE
421 free(lock);
422 #else
423 k_object_release(lock);
424 #endif /* !CONFIG_USERSPACE */
425 }
426
427 /* Close dynamic recursive lock */
__retarget_lock_close_recursive(_LOCK_T lock)428 void __retarget_lock_close_recursive(_LOCK_T lock)
429 {
430 __ASSERT_NO_MSG(lock != NULL);
431 #ifndef CONFIG_USERSPACE
432 free(lock);
433 #else
434 k_object_release(lock);
435 #endif /* !CONFIG_USERSPACE */
436 }
437
438 /* Acquiure non-recursive lock */
__retarget_lock_acquire(_LOCK_T lock)439 void __retarget_lock_acquire(_LOCK_T lock)
440 {
441 __ASSERT_NO_MSG(lock != NULL);
442 k_sem_take((struct k_sem *)lock, K_FOREVER);
443 }
444
445 /* Acquiure recursive lock */
__retarget_lock_acquire_recursive(_LOCK_T lock)446 void __retarget_lock_acquire_recursive(_LOCK_T lock)
447 {
448 __ASSERT_NO_MSG(lock != NULL);
449 k_mutex_lock((struct k_mutex *)lock, K_FOREVER);
450 }
451
452 /* Try acquiring non-recursive lock */
__retarget_lock_try_acquire(_LOCK_T lock)453 int __retarget_lock_try_acquire(_LOCK_T lock)
454 {
455 __ASSERT_NO_MSG(lock != NULL);
456 return !k_sem_take((struct k_sem *)lock, K_NO_WAIT);
457 }
458
459 /* Try acquiring recursive lock */
__retarget_lock_try_acquire_recursive(_LOCK_T lock)460 int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
461 {
462 __ASSERT_NO_MSG(lock != NULL);
463 return !k_mutex_lock((struct k_mutex *)lock, K_NO_WAIT);
464 }
465
466 /* Release non-recursive lock */
__retarget_lock_release(_LOCK_T lock)467 void __retarget_lock_release(_LOCK_T lock)
468 {
469 __ASSERT_NO_MSG(lock != NULL);
470 k_sem_give((struct k_sem *)lock);
471 }
472
473 /* Release recursive lock */
__retarget_lock_release_recursive(_LOCK_T lock)474 void __retarget_lock_release_recursive(_LOCK_T lock)
475 {
476 __ASSERT_NO_MSG(lock != NULL);
477 k_mutex_unlock((struct k_mutex *)lock);
478 }
479 #endif /* CONFIG_MULTITHREADING */
480
__errno(void)481 __weak int *__errno(void)
482 {
483 return z_errno();
484 }
485
486 /* This function gets called if static buffer overflow detection is enabled
487 * on stdlib side (Newlib here), in case such an overflow is detected. Newlib
488 * provides an implementation not suitable for us, so we override it here.
489 */
__chk_fail(void)490 __weak FUNC_NORETURN void __chk_fail(void)
491 {
492 static const char chk_fail_msg[] = "* buffer overflow detected *\n";
493 _write(2, chk_fail_msg, sizeof(chk_fail_msg) - 1);
494 k_oops();
495 CODE_UNREACHABLE;
496 }
497
498 #if CONFIG_XTENSA
499 /* The Newlib in xtensa toolchain has a few missing functions for the
500 * reentrant versions of the syscalls.
501 */
_read_r(struct _reent * r,int fd,void * buf,size_t nbytes)502 _ssize_t _read_r(struct _reent *r, int fd, void *buf, size_t nbytes)
503 {
504 ARG_UNUSED(r);
505
506 return _read(fd, (char *)buf, nbytes);
507 }
508
_write_r(struct _reent * r,int fd,const void * buf,size_t nbytes)509 _ssize_t _write_r(struct _reent *r, int fd, const void *buf, size_t nbytes)
510 {
511 ARG_UNUSED(r);
512
513 return _write(fd, buf, nbytes);
514 }
515
_open_r(struct _reent * r,const char * name,int flags,int mode)516 int _open_r(struct _reent *r, const char *name, int flags, int mode)
517 {
518 ARG_UNUSED(r);
519 ARG_UNUSED(flags);
520
521 return _open(name, mode);
522 }
523
_close_r(struct _reent * r,int file)524 int _close_r(struct _reent *r, int file)
525 {
526 ARG_UNUSED(r);
527
528 return _close(file);
529 }
530
_lseek_r(struct _reent * r,int file,_off_t ptr,int dir)531 _off_t _lseek_r(struct _reent *r, int file, _off_t ptr, int dir)
532 {
533 ARG_UNUSED(r);
534
535 return _lseek(file, ptr, dir);
536 }
537
_isatty_r(struct _reent * r,int file)538 int _isatty_r(struct _reent *r, int file)
539 {
540 ARG_UNUSED(r);
541
542 return _isatty(file);
543 }
544
_kill_r(struct _reent * r,int i,int j)545 int _kill_r(struct _reent *r, int i, int j)
546 {
547 ARG_UNUSED(r);
548
549 return _kill(i, j);
550 }
551
_getpid_r(struct _reent * r)552 int _getpid_r(struct _reent *r)
553 {
554 ARG_UNUSED(r);
555
556 return _getpid();
557 }
558
_fstat_r(struct _reent * r,int file,struct stat * st)559 int _fstat_r(struct _reent *r, int file, struct stat *st)
560 {
561 ARG_UNUSED(r);
562
563 return _fstat(file, st);
564 }
565
_exit_r(struct _reent * r,int status)566 void _exit_r(struct _reent *r, int status)
567 {
568 ARG_UNUSED(r);
569
570 _exit(status);
571 }
572
_sbrk_r(struct _reent * r,int count)573 void *_sbrk_r(struct _reent *r, int count)
574 {
575 ARG_UNUSED(r);
576
577 return _sbrk(count);
578 }
579 #endif /* CONFIG_XTENSA */
580
_gettimeofday(struct timeval * __tp,void * __tzp)581 int _gettimeofday(struct timeval *__tp, void *__tzp)
582 {
583 #ifdef CONFIG_XSI_SINGLE_PROCESS
584 return gettimeofday(__tp, __tzp);
585 #else
586 /* Non-posix systems should not call gettimeofday() here as it will
587 * result in a recursive call loop and result in a stack overflow.
588 */
589 return -1;
590 #endif
591 }
592