1 /*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7
8 #include <zephyr/kernel.h>
9 #include <string.h>
10 #include <zephyr/sys/math_extras.h>
11 #include <zephyr/sys/rb.h>
12 #include <zephyr/kernel_structs.h>
13 #include <zephyr/sys/sys_io.h>
14 #include <ksched.h>
15 #include <zephyr/syscall.h>
16 #include <zephyr/internal/syscall_handler.h>
17 #include <zephyr/device.h>
18 #include <zephyr/init.h>
19 #include <stdbool.h>
20 #include <zephyr/app_memory/app_memdomain.h>
21 #include <zephyr/sys/libc-hooks.h>
22 #include <zephyr/sys/mutex.h>
23 #include <inttypes.h>
24 #include <zephyr/linker/linker-defs.h>
25
26 #ifdef Z_LIBC_PARTITION_EXISTS
27 K_APPMEM_PARTITION_DEFINE(z_libc_partition);
28 #endif /* Z_LIBC_PARTITION_EXISTS */
29
30 /* TODO: Find a better place to put this. Since we pull the entire
31 * lib..__modules__crypto__mbedtls.a globals into app shared memory
32 * section, we can't put this in zephyr_init.c of the mbedtls module.
33 */
34 #ifdef CONFIG_MBEDTLS
35 K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
36 #endif /* CONFIG_MBEDTLS */
37
38 #include <zephyr/logging/log.h>
39 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
40
41 /* The originally synchronization strategy made heavy use of recursive
42 * irq_locking, which ports poorly to spinlocks which are
43 * non-recursive. Rather than try to redesign as part of
44 * spinlockification, this uses multiple locks to preserve the
45 * original semantics exactly. The locks are named for the data they
46 * protect where possible, or just for the code that uses them where
47 * not.
48 */
49 #ifdef CONFIG_DYNAMIC_OBJECTS
50 static struct k_spinlock lists_lock; /* kobj dlist */
51 static struct k_spinlock objfree_lock; /* k_object_free */
52
53 #ifdef CONFIG_GEN_PRIV_STACKS
54 /* On ARM & ARC MPU we may have two different alignment requirement
55 * when dynamically allocating thread stacks, one for the privileged
56 * stack and other for the user stack, so we need to account the
57 * worst alignment scenario and reserve space for that.
58 */
59 #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
60 #define STACK_ELEMENT_DATA_SIZE(size) \
61 (sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
62 Z_THREAD_STACK_OBJ_ALIGN(size) + K_THREAD_STACK_LEN(size))
63 #else
64 #define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
65 K_THREAD_STACK_LEN(size))
66 #endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
67 #else
68 #define STACK_ELEMENT_DATA_SIZE(size) K_THREAD_STACK_LEN(size)
69 #endif /* CONFIG_GEN_PRIV_STACKS */
70
71 #endif /* CONFIG_DYNAMIC_OBJECTS */
72 static struct k_spinlock obj_lock; /* kobj struct data */
73
74 #define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
75
76 #ifdef CONFIG_DYNAMIC_OBJECTS
77 extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
78 #endif /* CONFIG_DYNAMIC_OBJECTS */
79
80 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
81
otype_to_str(enum k_objects otype)82 const char *otype_to_str(enum k_objects otype)
83 {
84 const char *ret;
85 /* -fdata-sections doesn't work right except in very recent
86 * GCC and these literal strings would appear in the binary even if
87 * otype_to_str was omitted by the linker
88 */
89 #ifdef CONFIG_LOG
90 switch (otype) {
91 /* otype-to-str.h is generated automatically during build by
92 * gen_kobject_list.py
93 */
94 case K_OBJ_ANY:
95 ret = "generic";
96 break;
97 #include <zephyr/otype-to-str.h>
98 default:
99 ret = "?";
100 break;
101 }
102 #else
103 ARG_UNUSED(otype);
104 ret = NULL;
105 #endif /* CONFIG_LOG */
106 return ret;
107 }
108
109 struct perm_ctx {
110 int parent_id;
111 int child_id;
112 struct k_thread *parent;
113 };
114
115 #ifdef CONFIG_GEN_PRIV_STACKS
116 /* See write_gperf_table() in scripts/build/gen_kobject_list.py. The privilege
117 * mode stacks are allocated as an array. The base of the array is
118 * aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
119 */
z_priv_stack_find(k_thread_stack_t * stack)120 uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
121 {
122 struct k_object *obj = k_object_find(stack);
123
124 __ASSERT(obj != NULL, "stack object not found");
125 __ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
126 "bad stack object");
127
128 return obj->data.stack_data->priv;
129 }
130 #endif /* CONFIG_GEN_PRIV_STACKS */
131
132 #ifdef CONFIG_DYNAMIC_OBJECTS
133
134 /*
135 * Note that dyn_obj->data is where the kernel object resides
136 * so it is the one that actually needs to be aligned.
137 * Due to the need to get the fields inside struct dyn_obj
138 * from kernel object pointers (i.e. from data[]), the offset
139 * from data[] needs to be fixed at build time. Therefore,
140 * data[] is declared with __aligned(), such that when dyn_obj
141 * is allocated with alignment, data[] is also aligned.
142 * Due to this requirement, data[] needs to be aligned with
143 * the maximum alignment needed for all kernel objects
144 * (hence the following DYN_OBJ_DATA_ALIGN).
145 */
146 #ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
147 #define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
148 #else
149 #define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
150 #endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
151
152 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
153 #ifndef CONFIG_MPU_STACK_GUARD
154 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
155 Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
156 #else
157 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
158 Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
159 #endif /* !CONFIG_MPU_STACK_GUARD */
160 #else
161 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
162 Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
163 #endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
164
165 #define DYN_OBJ_DATA_ALIGN \
166 MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
167
168 struct dyn_obj {
169 struct k_object kobj;
170 sys_dnode_t dobj_list;
171
172 /* The object itself */
173 void *data;
174 };
175
176 extern struct k_object *z_object_gperf_find(const void *obj);
177 extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
178 void *context);
179
180 /*
181 * Linked list of allocated kernel objects, for iteration over all allocated
182 * objects (and potentially deleting them during iteration).
183 */
184 static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
185
186 /*
187 * TODO: Write some hash table code that will replace obj_list.
188 */
189
obj_size_get(enum k_objects otype)190 static size_t obj_size_get(enum k_objects otype)
191 {
192 size_t ret;
193
194 switch (otype) {
195 #include <zephyr/otype-to-size.h>
196 default:
197 ret = sizeof(const struct device);
198 break;
199 }
200
201 return ret;
202 }
203
obj_align_get(enum k_objects otype)204 static size_t obj_align_get(enum k_objects otype)
205 {
206 size_t ret;
207
208 switch (otype) {
209 case K_OBJ_THREAD:
210 #ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
211 ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
212 #else
213 ret = __alignof(struct dyn_obj);
214 #endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
215 break;
216 default:
217 ret = __alignof(struct dyn_obj);
218 break;
219 }
220
221 return ret;
222 }
223
dyn_object_find(const void * obj)224 static struct dyn_obj *dyn_object_find(const void *obj)
225 {
226 struct dyn_obj *node;
227 k_spinlock_key_t key;
228
229 /* For any dynamically allocated kernel object, the object
230 * pointer is just a member of the containing struct dyn_obj,
231 * so just a little arithmetic is necessary to locate the
232 * corresponding struct rbnode
233 */
234 key = k_spin_lock(&lists_lock);
235
236 SYS_DLIST_FOR_EACH_CONTAINER(&obj_list, node, dobj_list) {
237 if (node->kobj.name == obj) {
238 goto end;
239 }
240 }
241
242 /* No object found */
243 node = NULL;
244
245 end:
246 k_spin_unlock(&lists_lock, key);
247
248 return node;
249 }
250
251 /**
252 * @internal
253 *
254 * @brief Allocate a new thread index for a new thread.
255 *
256 * This finds an unused thread index that can be assigned to a new
257 * thread. If too many threads have been allocated, the kernel will
258 * run out of indexes and this function will fail.
259 *
260 * Note that if an unused index is found, that index will be marked as
261 * used after return of this function.
262 *
263 * @param tidx The new thread index if successful
264 *
265 * @return true if successful, false if failed
266 **/
thread_idx_alloc(uintptr_t * tidx)267 static bool thread_idx_alloc(uintptr_t *tidx)
268 {
269 int i;
270 int idx;
271 int base;
272
273 base = 0;
274 for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
275 idx = find_lsb_set(_thread_idx_map[i]);
276
277 if (idx != 0) {
278 *tidx = base + (idx - 1);
279
280 /* Clear the bit. We already know the array index,
281 * and the bit to be cleared.
282 */
283 _thread_idx_map[i] &= ~(BIT(idx - 1));
284
285 /* Clear permission from all objects */
286 k_object_wordlist_foreach(clear_perms_cb,
287 (void *)*tidx);
288
289 return true;
290 }
291
292 base += 8;
293 }
294
295 return false;
296 }
297
298 /**
299 * @internal
300 *
301 * @brief Free a thread index.
302 *
303 * This frees a thread index so it can be used by another
304 * thread.
305 *
306 * @param tidx The thread index to be freed
307 **/
thread_idx_free(uintptr_t tidx)308 static void thread_idx_free(uintptr_t tidx)
309 {
310 /* To prevent leaked permission when index is recycled */
311 k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
312
313 /* Figure out which bits to set in _thread_idx_map[] and set it. */
314 int base = tidx / NUM_BITS(_thread_idx_map[0]);
315 int offset = tidx % NUM_BITS(_thread_idx_map[0]);
316
317 _thread_idx_map[base] |= BIT(offset);
318 }
319
dynamic_object_create(enum k_objects otype,size_t align,size_t size)320 static struct k_object *dynamic_object_create(enum k_objects otype, size_t align,
321 size_t size)
322 {
323 struct dyn_obj *dyn;
324
325 dyn = z_thread_aligned_alloc(align, sizeof(struct dyn_obj));
326 if (dyn == NULL) {
327 return NULL;
328 }
329
330 if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
331 size_t adjusted_size;
332
333 if (size == 0) {
334 k_free(dyn);
335 return NULL;
336 }
337
338 adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
339 dyn->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
340 adjusted_size);
341 if (dyn->data == NULL) {
342 k_free(dyn);
343 return NULL;
344 }
345
346 #ifdef CONFIG_GEN_PRIV_STACKS
347 struct z_stack_data *stack_data = (struct z_stack_data *)
348 ((uint8_t *)dyn->data + adjusted_size - sizeof(*stack_data));
349 stack_data->priv = (uint8_t *)dyn->data;
350 stack_data->size = adjusted_size;
351 dyn->kobj.data.stack_data = stack_data;
352 #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
353 dyn->kobj.name = (void *)ROUND_UP(
354 ((uint8_t *)dyn->data + CONFIG_PRIVILEGED_STACK_SIZE),
355 Z_THREAD_STACK_OBJ_ALIGN(size));
356 #else
357 dyn->kobj.name = dyn->data;
358 #endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
359 #else
360 dyn->kobj.name = dyn->data;
361 dyn->kobj.data.stack_size = adjusted_size;
362 #endif /* CONFIG_GEN_PRIV_STACKS */
363 } else {
364 dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
365 if (dyn->data == NULL) {
366 k_free(dyn->data);
367 return NULL;
368 }
369 dyn->kobj.name = dyn->data;
370 }
371
372 dyn->kobj.type = otype;
373 dyn->kobj.flags = 0;
374 (void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
375
376 k_spinlock_key_t key = k_spin_lock(&lists_lock);
377
378 sys_dlist_append(&obj_list, &dyn->dobj_list);
379 k_spin_unlock(&lists_lock, key);
380
381 return &dyn->kobj;
382 }
383
k_object_create_dynamic_aligned(size_t align,size_t size)384 struct k_object *k_object_create_dynamic_aligned(size_t align, size_t size)
385 {
386 struct k_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
387
388 if (obj == NULL) {
389 LOG_ERR("could not allocate kernel object, out of memory");
390 }
391
392 return obj;
393 }
394
z_object_alloc(enum k_objects otype,size_t size)395 static void *z_object_alloc(enum k_objects otype, size_t size)
396 {
397 struct k_object *zo;
398 uintptr_t tidx = 0;
399
400 if ((otype <= K_OBJ_ANY) || (otype >= K_OBJ_LAST)) {
401 LOG_ERR("bad object type %d requested", otype);
402 return NULL;
403 }
404
405 switch (otype) {
406 case K_OBJ_THREAD:
407 if (!thread_idx_alloc(&tidx)) {
408 LOG_ERR("out of free thread indexes");
409 return NULL;
410 }
411 break;
412 /* The following are currently not allowed at all */
413 case K_OBJ_FUTEX: /* Lives in user memory */
414 case K_OBJ_SYS_MUTEX: /* Lives in user memory */
415 case K_OBJ_NET_SOCKET: /* Indeterminate size */
416 LOG_ERR("forbidden object type '%s' requested",
417 otype_to_str(otype));
418 return NULL;
419 default:
420 /* Remainder within bounds are permitted */
421 break;
422 }
423
424 zo = dynamic_object_create(otype, obj_align_get(otype), size);
425 if (zo == NULL) {
426 if (otype == K_OBJ_THREAD) {
427 thread_idx_free(tidx);
428 }
429 return NULL;
430 }
431
432 if (otype == K_OBJ_THREAD) {
433 zo->data.thread_id = tidx;
434 }
435
436 /* The allocating thread implicitly gets permission on kernel objects
437 * that it allocates
438 */
439 k_thread_perms_set(zo, _current);
440
441 /* Activates reference counting logic for automatic disposal when
442 * all permissions have been revoked
443 */
444 zo->flags |= K_OBJ_FLAG_ALLOC;
445
446 return zo->name;
447 }
448
z_impl_k_object_alloc(enum k_objects otype)449 void *z_impl_k_object_alloc(enum k_objects otype)
450 {
451 return z_object_alloc(otype, 0);
452 }
453
z_impl_k_object_alloc_size(enum k_objects otype,size_t size)454 void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
455 {
456 return z_object_alloc(otype, size);
457 }
458
k_object_free(void * obj)459 void k_object_free(void *obj)
460 {
461 struct dyn_obj *dyn;
462
463 /* This function is intentionally not exposed to user mode.
464 * There's currently no robust way to track that an object isn't
465 * being used by some other thread
466 */
467
468 k_spinlock_key_t key = k_spin_lock(&objfree_lock);
469
470 dyn = dyn_object_find(obj);
471 if (dyn != NULL) {
472 sys_dlist_remove(&dyn->dobj_list);
473
474 if (dyn->kobj.type == K_OBJ_THREAD) {
475 thread_idx_free(dyn->kobj.data.thread_id);
476 }
477 }
478 k_spin_unlock(&objfree_lock, key);
479
480 if (dyn != NULL) {
481 k_free(dyn->data);
482 k_free(dyn);
483 }
484 }
485
k_object_find(const void * obj)486 struct k_object *k_object_find(const void *obj)
487 {
488 struct k_object *ret;
489
490 ret = z_object_gperf_find(obj);
491
492 if (ret == NULL) {
493 struct dyn_obj *dyn;
494
495 /* The cast to pointer-to-non-const violates MISRA
496 * 11.8 but is justified since we know dynamic objects
497 * were not declared with a const qualifier.
498 */
499 dyn = dyn_object_find(obj);
500 if (dyn != NULL) {
501 ret = &dyn->kobj;
502 }
503 }
504
505 return ret;
506 }
507
k_object_wordlist_foreach(_wordlist_cb_func_t func,void * context)508 void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
509 {
510 struct dyn_obj *obj, *next;
511
512 z_object_gperf_wordlist_foreach(func, context);
513
514 k_spinlock_key_t key = k_spin_lock(&lists_lock);
515
516 SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
517 func(&obj->kobj, context);
518 }
519 k_spin_unlock(&lists_lock, key);
520 }
521 #endif /* CONFIG_DYNAMIC_OBJECTS */
522
thread_index_get(struct k_thread * thread)523 static unsigned int thread_index_get(struct k_thread *thread)
524 {
525 struct k_object *ko;
526
527 ko = k_object_find(thread);
528
529 if (ko == NULL) {
530 return -1;
531 }
532
533 return ko->data.thread_id;
534 }
535
unref_check(struct k_object * ko,uintptr_t index)536 static void unref_check(struct k_object *ko, uintptr_t index)
537 {
538 k_spinlock_key_t key = k_spin_lock(&obj_lock);
539
540 sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
541
542 #ifdef CONFIG_DYNAMIC_OBJECTS
543 if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
544 /* skip unref check for static kernel object */
545 goto out;
546 }
547
548 void *vko = ko;
549
550 struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
551
552 __ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
553
554 for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
555 if (ko->perms[i] != 0U) {
556 goto out;
557 }
558 }
559
560 /* This object has no more references. Some objects may have
561 * dynamically allocated resources, require cleanup, or need to be
562 * marked as uninitialized when all references are gone. What
563 * specifically needs to happen depends on the object type.
564 */
565 switch (ko->type) {
566 #ifdef CONFIG_PIPES
567 case K_OBJ_PIPE:
568 k_pipe_cleanup((struct k_pipe *)ko->name);
569 break;
570 #endif /* CONFIG_PIPES */
571 case K_OBJ_MSGQ:
572 k_msgq_cleanup((struct k_msgq *)ko->name);
573 break;
574 case K_OBJ_STACK:
575 k_stack_cleanup((struct k_stack *)ko->name);
576 break;
577 default:
578 /* Nothing to do */
579 break;
580 }
581
582 sys_dlist_remove(&dyn->dobj_list);
583 k_free(dyn->data);
584 k_free(dyn);
585 out:
586 #endif /* CONFIG_DYNAMIC_OBJECTS */
587 k_spin_unlock(&obj_lock, key);
588 }
589
wordlist_cb(struct k_object * ko,void * ctx_ptr)590 static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
591 {
592 struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
593
594 if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
595 ((struct k_thread *)ko->name != ctx->parent)) {
596 sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
597 }
598 }
599
k_thread_perms_inherit(struct k_thread * parent,struct k_thread * child)600 void k_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
601 {
602 struct perm_ctx ctx = {
603 thread_index_get(parent),
604 thread_index_get(child),
605 parent
606 };
607
608 if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
609 k_object_wordlist_foreach(wordlist_cb, &ctx);
610 }
611 }
612
k_thread_perms_set(struct k_object * ko,struct k_thread * thread)613 void k_thread_perms_set(struct k_object *ko, struct k_thread *thread)
614 {
615 int index = thread_index_get(thread);
616
617 if (index != -1) {
618 sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
619 }
620 }
621
k_thread_perms_clear(struct k_object * ko,struct k_thread * thread)622 void k_thread_perms_clear(struct k_object *ko, struct k_thread *thread)
623 {
624 int index = thread_index_get(thread);
625
626 if (index != -1) {
627 sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
628 unref_check(ko, index);
629 }
630 }
631
clear_perms_cb(struct k_object * ko,void * ctx_ptr)632 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr)
633 {
634 uintptr_t id = (uintptr_t)ctx_ptr;
635
636 unref_check(ko, id);
637 }
638
k_thread_perms_all_clear(struct k_thread * thread)639 void k_thread_perms_all_clear(struct k_thread *thread)
640 {
641 uintptr_t index = thread_index_get(thread);
642
643 if ((int)index != -1) {
644 k_object_wordlist_foreach(clear_perms_cb, (void *)index);
645 }
646 }
647
thread_perms_test(struct k_object * ko)648 static int thread_perms_test(struct k_object *ko)
649 {
650 int index;
651
652 if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
653 return 1;
654 }
655
656 index = thread_index_get(_current);
657 if (index != -1) {
658 return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
659 }
660 return 0;
661 }
662
dump_permission_error(struct k_object * ko)663 static void dump_permission_error(struct k_object *ko)
664 {
665 int index = thread_index_get(_current);
666 LOG_ERR("thread %p (%d) does not have permission on %s %p",
667 _current, index,
668 otype_to_str(ko->type), ko->name);
669 LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
670 }
671
k_object_dump_error(int retval,const void * obj,struct k_object * ko,enum k_objects otype)672 void k_object_dump_error(int retval, const void *obj, struct k_object *ko,
673 enum k_objects otype)
674 {
675 switch (retval) {
676 case -EBADF:
677 LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
678 if (ko == NULL) {
679 LOG_ERR("address is not a known kernel object");
680 } else {
681 LOG_ERR("address is actually a %s",
682 otype_to_str(ko->type));
683 }
684 break;
685 case -EPERM:
686 dump_permission_error(ko);
687 break;
688 case -EINVAL:
689 LOG_ERR("%p used before initialization", obj);
690 break;
691 case -EADDRINUSE:
692 LOG_ERR("%p %s in use", obj, otype_to_str(otype));
693 break;
694 default:
695 /* Not handled error */
696 break;
697 }
698 }
699
z_impl_k_object_access_grant(const void * object,struct k_thread * thread)700 void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
701 {
702 struct k_object *ko = k_object_find(object);
703
704 if (ko != NULL) {
705 k_thread_perms_set(ko, thread);
706 }
707 }
708
k_object_access_revoke(const void * object,struct k_thread * thread)709 void k_object_access_revoke(const void *object, struct k_thread *thread)
710 {
711 struct k_object *ko = k_object_find(object);
712
713 if (ko != NULL) {
714 k_thread_perms_clear(ko, thread);
715 }
716 }
717
z_impl_k_object_release(const void * object)718 void z_impl_k_object_release(const void *object)
719 {
720 k_object_access_revoke(object, _current);
721 }
722
k_object_access_all_grant(const void * object)723 void k_object_access_all_grant(const void *object)
724 {
725 struct k_object *ko = k_object_find(object);
726
727 if (ko != NULL) {
728 ko->flags |= K_OBJ_FLAG_PUBLIC;
729 }
730 }
731
k_object_validate(struct k_object * ko,enum k_objects otype,enum _obj_init_check init)732 int k_object_validate(struct k_object *ko, enum k_objects otype,
733 enum _obj_init_check init)
734 {
735 if (unlikely((ko == NULL) ||
736 ((otype != K_OBJ_ANY) && (ko->type != otype)))) {
737 return -EBADF;
738 }
739
740 /* Manipulation of any kernel objects by a user thread requires that
741 * thread be granted access first, even for uninitialized objects
742 */
743 if (unlikely(thread_perms_test(ko) == 0)) {
744 return -EPERM;
745 }
746
747 /* Initialization state checks. _OBJ_INIT_ANY, we don't care */
748 if (likely(init == _OBJ_INIT_TRUE)) {
749 /* Object MUST be initialized */
750 if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
751 return -EINVAL;
752 }
753 } else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
754 /* Object MUST NOT be initialized */
755 if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
756 return -EADDRINUSE;
757 }
758 } else {
759 /* _OBJ_INIT_ANY */
760 }
761
762 return 0;
763 }
764
k_object_init(const void * obj)765 void k_object_init(const void *obj)
766 {
767 struct k_object *ko;
768
769 /* By the time we get here, if the caller was from userspace, all the
770 * necessary checks have been done in k_object_validate(), which takes
771 * place before the object is initialized.
772 *
773 * This function runs after the object has been initialized and
774 * finalizes it
775 */
776
777 ko = k_object_find(obj);
778 if (ko == NULL) {
779 /* Supervisor threads can ignore rules about kernel objects
780 * and may declare them on stacks, etc. Such objects will never
781 * be usable from userspace, but we shouldn't explode.
782 */
783 return;
784 }
785
786 /* Allows non-initialization system calls to be made on this object */
787 ko->flags |= K_OBJ_FLAG_INITIALIZED;
788 }
789
k_object_recycle(const void * obj)790 void k_object_recycle(const void *obj)
791 {
792 struct k_object *ko = k_object_find(obj);
793
794 if (ko != NULL) {
795 (void)memset(ko->perms, 0, sizeof(ko->perms));
796 k_thread_perms_set(ko, _current);
797 ko->flags |= K_OBJ_FLAG_INITIALIZED;
798 }
799 }
800
k_object_uninit(const void * obj)801 void k_object_uninit(const void *obj)
802 {
803 struct k_object *ko;
804
805 /* See comments in k_object_init() */
806 ko = k_object_find(obj);
807 if (ko == NULL) {
808 return;
809 }
810
811 ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
812 }
813
814 /*
815 * Copy to/from helper functions used in syscall handlers
816 */
k_usermode_alloc_from_copy(const void * src,size_t size)817 void *k_usermode_alloc_from_copy(const void *src, size_t size)
818 {
819 void *dst = NULL;
820
821 /* Does the caller in user mode have access to read this memory? */
822 if (K_SYSCALL_MEMORY_READ(src, size)) {
823 goto out_err;
824 }
825
826 dst = z_thread_malloc(size);
827 if (dst == NULL) {
828 LOG_ERR("out of thread resource pool memory (%zu)", size);
829 goto out_err;
830 }
831
832 (void)memcpy(dst, src, size);
833 out_err:
834 return dst;
835 }
836
user_copy(void * dst,const void * src,size_t size,bool to_user)837 static int user_copy(void *dst, const void *src, size_t size, bool to_user)
838 {
839 int ret = EFAULT;
840
841 /* Does the caller in user mode have access to this memory? */
842 if (to_user ? K_SYSCALL_MEMORY_WRITE(dst, size) :
843 K_SYSCALL_MEMORY_READ(src, size)) {
844 goto out_err;
845 }
846
847 (void)memcpy(dst, src, size);
848 ret = 0;
849 out_err:
850 return ret;
851 }
852
k_usermode_from_copy(void * dst,const void * src,size_t size)853 int k_usermode_from_copy(void *dst, const void *src, size_t size)
854 {
855 return user_copy(dst, src, size, false);
856 }
857
k_usermode_to_copy(void * dst,const void * src,size_t size)858 int k_usermode_to_copy(void *dst, const void *src, size_t size)
859 {
860 return user_copy(dst, src, size, true);
861 }
862
k_usermode_string_alloc_copy(const char * src,size_t maxlen)863 char *k_usermode_string_alloc_copy(const char *src, size_t maxlen)
864 {
865 size_t actual_len;
866 int err;
867 char *ret = NULL;
868
869 actual_len = k_usermode_string_nlen(src, maxlen, &err);
870 if (err != 0) {
871 goto out;
872 }
873 if (actual_len == maxlen) {
874 /* Not NULL terminated */
875 LOG_ERR("string too long %p (%zu)", src, actual_len);
876 goto out;
877 }
878 if (size_add_overflow(actual_len, 1, &actual_len)) {
879 LOG_ERR("overflow");
880 goto out;
881 }
882
883 ret = k_usermode_alloc_from_copy(src, actual_len);
884
885 /* Someone may have modified the source string during the above
886 * checks. Ensure what we actually copied is still terminated
887 * properly.
888 */
889 if (ret != NULL) {
890 ret[actual_len - 1U] = '\0';
891 }
892 out:
893 return ret;
894 }
895
k_usermode_string_copy(char * dst,const char * src,size_t maxlen)896 int k_usermode_string_copy(char *dst, const char *src, size_t maxlen)
897 {
898 size_t actual_len;
899 int ret, err;
900
901 actual_len = k_usermode_string_nlen(src, maxlen, &err);
902 if (err != 0) {
903 ret = EFAULT;
904 goto out;
905 }
906 if (actual_len == maxlen) {
907 /* Not NULL terminated */
908 LOG_ERR("string too long %p (%zu)", src, actual_len);
909 ret = EINVAL;
910 goto out;
911 }
912 if (size_add_overflow(actual_len, 1, &actual_len)) {
913 LOG_ERR("overflow");
914 ret = EINVAL;
915 goto out;
916 }
917
918 ret = k_usermode_from_copy(dst, src, actual_len);
919
920 /* See comment above in k_usermode_string_alloc_copy() */
921 dst[actual_len - 1] = '\0';
922 out:
923 return ret;
924 }
925
926 /*
927 * Application memory region initialization
928 */
929
930 extern char __app_shmem_regions_start[];
931 extern char __app_shmem_regions_end[];
932
app_shmem_bss_zero(void)933 static int app_shmem_bss_zero(void)
934 {
935 struct z_app_region *region, *end;
936
937
938 end = (struct z_app_region *)&__app_shmem_regions_end[0];
939 region = (struct z_app_region *)&__app_shmem_regions_start[0];
940
941 for ( ; region < end; region++) {
942 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
943 /* When BSS sections are not present at boot, we need to wait for
944 * paging mechanism to be initialized before we can zero out BSS.
945 */
946 extern bool z_sys_post_kernel;
947 bool do_clear = z_sys_post_kernel;
948
949 /* During pre-kernel init, z_sys_post_kernel == false, but
950 * with pinned rodata region, so clear. Otherwise skip.
951 * In post-kernel init, z_sys_post_kernel == true,
952 * skip those in pinned rodata region as they have already
953 * been cleared and possibly already in use. Otherwise clear.
954 */
955 if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
956 ((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
957 do_clear = !do_clear;
958 }
959
960 if (do_clear)
961 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
962 {
963 (void)memset(region->bss_start, 0, region->bss_size);
964 }
965 }
966
967 return 0;
968 }
969
970 SYS_INIT_NAMED(app_shmem_bss_zero_pre, app_shmem_bss_zero,
971 PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
972
973 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
974 /* When BSS sections are not present at boot, we need to wait for
975 * paging mechanism to be initialized before we can zero out BSS.
976 */
977 SYS_INIT_NAMED(app_shmem_bss_zero_post, app_shmem_bss_zero,
978 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
979 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
980
981 /*
982 * Default handlers if otherwise unimplemented
983 */
984
handler_bad_syscall(uintptr_t bad_id,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)985 static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
986 uintptr_t arg3, uintptr_t arg4,
987 uintptr_t arg5, uintptr_t arg6,
988 void *ssf)
989 {
990 ARG_UNUSED(arg2);
991 ARG_UNUSED(arg3);
992 ARG_UNUSED(arg4);
993 ARG_UNUSED(arg5);
994 ARG_UNUSED(arg6);
995
996 LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
997 arch_syscall_oops(ssf);
998 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
999 }
1000
handler_no_syscall(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)1001 static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
1002 uintptr_t arg3, uintptr_t arg4,
1003 uintptr_t arg5, uintptr_t arg6, void *ssf)
1004 {
1005 ARG_UNUSED(arg1);
1006 ARG_UNUSED(arg2);
1007 ARG_UNUSED(arg3);
1008 ARG_UNUSED(arg4);
1009 ARG_UNUSED(arg5);
1010 ARG_UNUSED(arg6);
1011
1012 LOG_ERR("Unimplemented system call");
1013 arch_syscall_oops(ssf);
1014 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1015 }
1016
1017 #include <zephyr/syscall_dispatch.c>
1018