1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 
8 #include <kernel.h>
9 #include <string.h>
10 #include <sys/math_extras.h>
11 #include <sys/rb.h>
12 #include <kernel_structs.h>
13 #include <sys/sys_io.h>
14 #include <ksched.h>
15 #include <syscall.h>
16 #include <syscall_handler.h>
17 #include <device.h>
18 #include <init.h>
19 #include <stdbool.h>
20 #include <app_memory/app_memdomain.h>
21 #include <sys/libc-hooks.h>
22 #include <sys/mutex.h>
23 #include <inttypes.h>
24 #include <linker/linker-defs.h>
25 
26 #ifdef Z_LIBC_PARTITION_EXISTS
27 K_APPMEM_PARTITION_DEFINE(z_libc_partition);
28 #endif
29 
30 /* TODO: Find a better place to put this. Since we pull the entire
31  * lib..__modules__crypto__mbedtls.a  globals into app shared memory
32  * section, we can't put this in zephyr_init.c of the mbedtls module.
33  */
34 #ifdef CONFIG_MBEDTLS
35 K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
36 #endif
37 
38 #include <logging/log.h>
39 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
40 
41 /* The originally synchronization strategy made heavy use of recursive
42  * irq_locking, which ports poorly to spinlocks which are
43  * non-recursive.  Rather than try to redesign as part of
44  * spinlockification, this uses multiple locks to preserve the
45  * original semantics exactly.  The locks are named for the data they
46  * protect where possible, or just for the code that uses them where
47  * not.
48  */
49 #ifdef CONFIG_DYNAMIC_OBJECTS
50 static struct k_spinlock lists_lock;       /* kobj rbtree/dlist */
51 static struct k_spinlock objfree_lock;     /* k_object_free */
52 #endif
53 static struct k_spinlock obj_lock;         /* kobj struct data */
54 
55 #define MAX_THREAD_BITS		(CONFIG_MAX_THREAD_BYTES * 8)
56 
57 #ifdef CONFIG_DYNAMIC_OBJECTS
58 extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
59 #endif
60 
61 static void clear_perms_cb(struct z_object *ko, void *ctx_ptr);
62 
otype_to_str(enum k_objects otype)63 const char *otype_to_str(enum k_objects otype)
64 {
65 	const char *ret;
66 	/* -fdata-sections doesn't work right except in very very recent
67 	 * GCC and these literal strings would appear in the binary even if
68 	 * otype_to_str was omitted by the linker
69 	 */
70 #ifdef CONFIG_LOG
71 	switch (otype) {
72 	/* otype-to-str.h is generated automatically during build by
73 	 * gen_kobject_list.py
74 	 */
75 	case K_OBJ_ANY:
76 		ret = "generic";
77 		break;
78 #include <otype-to-str.h>
79 	default:
80 		ret = "?";
81 		break;
82 	}
83 #else
84 	ARG_UNUSED(otype);
85 	ret = NULL;
86 #endif
87 	return ret;
88 }
89 
90 struct perm_ctx {
91 	int parent_id;
92 	int child_id;
93 	struct k_thread *parent;
94 };
95 
96 #ifdef CONFIG_GEN_PRIV_STACKS
97 /* See write_gperf_table() in scripts/gen_kobject_list.py. The privilege
98  * mode stacks are allocated as an array. The base of the array is
99  * aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
100  */
z_priv_stack_find(k_thread_stack_t * stack)101 uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
102 {
103 	struct z_object *obj = z_object_find(stack);
104 
105 	__ASSERT(obj != NULL, "stack object not found");
106 	__ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
107 		 "bad stack object");
108 
109 	return obj->data.stack_data->priv;
110 }
111 #endif /* CONFIG_GEN_PRIV_STACKS */
112 
113 #ifdef CONFIG_DYNAMIC_OBJECTS
114 
115 /*
116  * Note that dyn_obj->data is where the kernel object resides
117  * so it is the one that actually needs to be aligned.
118  * Due to the need to get the the fields inside struct dyn_obj
119  * from kernel object pointers (i.e. from data[]), the offset
120  * from data[] needs to be fixed at build time. Therefore,
121  * data[] is declared with __aligned(), such that when dyn_obj
122  * is allocated with alignment, data[] is also aligned.
123  * Due to this requirement, data[] needs to be aligned with
124  * the maximum alignment needed for all kernel objects
125  * (hence the following DYN_OBJ_DATA_ALIGN).
126  */
127 #ifdef ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT
128 #define DYN_OBJ_DATA_ALIGN_K_THREAD	(ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT)
129 #else
130 #define DYN_OBJ_DATA_ALIGN_K_THREAD	(sizeof(void *))
131 #endif
132 
133 #define DYN_OBJ_DATA_ALIGN		\
134 	MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
135 
136 struct dyn_obj {
137 	struct z_object kobj;
138 	sys_dnode_t dobj_list;
139 	struct rbnode node; /* must be immediately before data member */
140 
141 	/* The object itself */
142 	uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD);
143 };
144 
145 extern struct z_object *z_object_gperf_find(const void *obj);
146 extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
147 					     void *context);
148 
149 static bool node_lessthan(struct rbnode *a, struct rbnode *b);
150 
151 /*
152  * Red/black tree of allocated kernel objects, for reasonably fast lookups
153  * based on object pointer values.
154  */
155 static struct rbtree obj_rb_tree = {
156 	.lessthan_fn = node_lessthan
157 };
158 
159 /*
160  * Linked list of allocated kernel objects, for iteration over all allocated
161  * objects (and potentially deleting them during iteration).
162  */
163 static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
164 
165 /*
166  * TODO: Write some hash table code that will replace both obj_rb_tree
167  * and obj_list.
168  */
169 
obj_size_get(enum k_objects otype)170 static size_t obj_size_get(enum k_objects otype)
171 {
172 	size_t ret;
173 
174 	switch (otype) {
175 #include <otype-to-size.h>
176 	default:
177 		ret = sizeof(const struct device);
178 		break;
179 	}
180 
181 	return ret;
182 }
183 
obj_align_get(enum k_objects otype)184 static size_t obj_align_get(enum k_objects otype)
185 {
186 	size_t ret;
187 
188 	switch (otype) {
189 	case K_OBJ_THREAD:
190 #ifdef ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT
191 		ret = ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT;
192 #else
193 		ret = sizeof(void *);
194 #endif
195 		break;
196 	default:
197 		ret = sizeof(void *);
198 		break;
199 	}
200 
201 	return ret;
202 }
203 
node_lessthan(struct rbnode * a,struct rbnode * b)204 static bool node_lessthan(struct rbnode *a, struct rbnode *b)
205 {
206 	return a < b;
207 }
208 
node_to_dyn_obj(struct rbnode * node)209 static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
210 {
211 	return CONTAINER_OF(node, struct dyn_obj, node);
212 }
213 
dyn_obj_to_node(void * obj)214 static inline struct rbnode *dyn_obj_to_node(void *obj)
215 {
216 	struct dyn_obj *dobj = CONTAINER_OF(obj, struct dyn_obj, data);
217 
218 	return &dobj->node;
219 }
220 
dyn_object_find(void * obj)221 static struct dyn_obj *dyn_object_find(void *obj)
222 {
223 	struct rbnode *node;
224 	struct dyn_obj *ret;
225 
226 	/* For any dynamically allocated kernel object, the object
227 	 * pointer is just a member of the containing struct dyn_obj,
228 	 * so just a little arithmetic is necessary to locate the
229 	 * corresponding struct rbnode
230 	 */
231 	node = dyn_obj_to_node(obj);
232 
233 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
234 	if (rb_contains(&obj_rb_tree, node)) {
235 		ret = node_to_dyn_obj(node);
236 	} else {
237 		ret = NULL;
238 	}
239 	k_spin_unlock(&lists_lock, key);
240 
241 	return ret;
242 }
243 
244 /**
245  * @internal
246  *
247  * @brief Allocate a new thread index for a new thread.
248  *
249  * This finds an unused thread index that can be assigned to a new
250  * thread. If too many threads have been allocated, the kernel will
251  * run out of indexes and this function will fail.
252  *
253  * Note that if an unused index is found, that index will be marked as
254  * used after return of this function.
255  *
256  * @param tidx The new thread index if successful
257  *
258  * @return true if successful, false if failed
259  **/
thread_idx_alloc(uintptr_t * tidx)260 static bool thread_idx_alloc(uintptr_t *tidx)
261 {
262 	int i;
263 	int idx;
264 	int base;
265 
266 	base = 0;
267 	for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
268 		idx = find_lsb_set(_thread_idx_map[i]);
269 
270 		if (idx != 0) {
271 			*tidx = base + (idx - 1);
272 
273 			sys_bitfield_clear_bit((mem_addr_t)_thread_idx_map,
274 					       *tidx);
275 
276 			/* Clear permission from all objects */
277 			z_object_wordlist_foreach(clear_perms_cb,
278 						   (void *)*tidx);
279 
280 			return true;
281 		}
282 
283 		base += 8;
284 	}
285 
286 	return false;
287 }
288 
289 /**
290  * @internal
291  *
292  * @brief Free a thread index.
293  *
294  * This frees a thread index so it can be used by another
295  * thread.
296  *
297  * @param tidx The thread index to be freed
298  **/
thread_idx_free(uintptr_t tidx)299 static void thread_idx_free(uintptr_t tidx)
300 {
301 	/* To prevent leaked permission when index is recycled */
302 	z_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
303 
304 	sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
305 }
306 
z_dynamic_object_aligned_create(size_t align,size_t size)307 struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
308 {
309 	struct dyn_obj *dyn;
310 
311 	dyn = z_thread_aligned_alloc(align, sizeof(*dyn) + size);
312 	if (dyn == NULL) {
313 		LOG_ERR("could not allocate kernel object, out of memory");
314 		return NULL;
315 	}
316 
317 	dyn->kobj.name = &dyn->data;
318 	dyn->kobj.type = K_OBJ_ANY;
319 	dyn->kobj.flags = 0;
320 	(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
321 
322 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
323 
324 	rb_insert(&obj_rb_tree, &dyn->node);
325 	sys_dlist_append(&obj_list, &dyn->dobj_list);
326 	k_spin_unlock(&lists_lock, key);
327 
328 	return &dyn->kobj;
329 }
330 
z_impl_k_object_alloc(enum k_objects otype)331 void *z_impl_k_object_alloc(enum k_objects otype)
332 {
333 	struct z_object *zo;
334 	uintptr_t tidx = 0;
335 
336 	if (otype <= K_OBJ_ANY || otype >= K_OBJ_LAST) {
337 		LOG_ERR("bad object type %d requested", otype);
338 		return NULL;
339 	}
340 
341 	switch (otype) {
342 	case K_OBJ_THREAD:
343 		if (!thread_idx_alloc(&tidx)) {
344 			LOG_ERR("out of free thread indexes");
345 			return NULL;
346 		}
347 		break;
348 	/* The following are currently not allowed at all */
349 	case K_OBJ_FUTEX:			/* Lives in user memory */
350 	case K_OBJ_SYS_MUTEX:			/* Lives in user memory */
351 	case K_OBJ_THREAD_STACK_ELEMENT:	/* No aligned allocator */
352 	case K_OBJ_NET_SOCKET:			/* Indeterminate size */
353 		LOG_ERR("forbidden object type '%s' requested",
354 			otype_to_str(otype));
355 		return NULL;
356 	default:
357 		/* Remainder within bounds are permitted */
358 		break;
359 	}
360 
361 	zo = z_dynamic_object_aligned_create(obj_align_get(otype),
362 					     obj_size_get(otype));
363 	if (zo == NULL) {
364 		return NULL;
365 	}
366 	zo->type = otype;
367 
368 	if (otype == K_OBJ_THREAD) {
369 		zo->data.thread_id = tidx;
370 	}
371 
372 	/* The allocating thread implicitly gets permission on kernel objects
373 	 * that it allocates
374 	 */
375 	z_thread_perms_set(zo, _current);
376 
377 	/* Activates reference counting logic for automatic disposal when
378 	 * all permissions have been revoked
379 	 */
380 	zo->flags |= K_OBJ_FLAG_ALLOC;
381 
382 	return zo->name;
383 }
384 
k_object_free(void * obj)385 void k_object_free(void *obj)
386 {
387 	struct dyn_obj *dyn;
388 
389 	/* This function is intentionally not exposed to user mode.
390 	 * There's currently no robust way to track that an object isn't
391 	 * being used by some other thread
392 	 */
393 
394 	k_spinlock_key_t key = k_spin_lock(&objfree_lock);
395 
396 	dyn = dyn_object_find(obj);
397 	if (dyn != NULL) {
398 		rb_remove(&obj_rb_tree, &dyn->node);
399 		sys_dlist_remove(&dyn->dobj_list);
400 
401 		if (dyn->kobj.type == K_OBJ_THREAD) {
402 			thread_idx_free(dyn->kobj.data.thread_id);
403 		}
404 	}
405 	k_spin_unlock(&objfree_lock, key);
406 
407 	if (dyn != NULL) {
408 		k_free(dyn);
409 	}
410 }
411 
z_object_find(const void * obj)412 struct z_object *z_object_find(const void *obj)
413 {
414 	struct z_object *ret;
415 
416 	ret = z_object_gperf_find(obj);
417 
418 	if (ret == NULL) {
419 		struct dyn_obj *dynamic_obj;
420 
421 		/* The cast to pointer-to-non-const violates MISRA
422 		 * 11.8 but is justified since we know dynamic objects
423 		 * were not declared with a const qualifier.
424 		 */
425 		dynamic_obj = dyn_object_find((void *)obj);
426 		if (dynamic_obj != NULL) {
427 			ret = &dynamic_obj->kobj;
428 		}
429 	}
430 
431 	return ret;
432 }
433 
z_object_wordlist_foreach(_wordlist_cb_func_t func,void * context)434 void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
435 {
436 	struct dyn_obj *obj, *next;
437 
438 	z_object_gperf_wordlist_foreach(func, context);
439 
440 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
441 
442 	SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
443 		func(&obj->kobj, context);
444 	}
445 	k_spin_unlock(&lists_lock, key);
446 }
447 #endif /* CONFIG_DYNAMIC_OBJECTS */
448 
thread_index_get(struct k_thread * thread)449 static unsigned int thread_index_get(struct k_thread *thread)
450 {
451 	struct z_object *ko;
452 
453 	ko = z_object_find(thread);
454 
455 	if (ko == NULL) {
456 		return -1;
457 	}
458 
459 	return ko->data.thread_id;
460 }
461 
unref_check(struct z_object * ko,uintptr_t index)462 static void unref_check(struct z_object *ko, uintptr_t index)
463 {
464 	k_spinlock_key_t key = k_spin_lock(&obj_lock);
465 
466 	sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
467 
468 #ifdef CONFIG_DYNAMIC_OBJECTS
469 	struct dyn_obj *dyn =
470 			CONTAINER_OF(ko, struct dyn_obj, kobj);
471 
472 	if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
473 		goto out;
474 	}
475 
476 	for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
477 		if (ko->perms[i] != 0U) {
478 			goto out;
479 		}
480 	}
481 
482 	/* This object has no more references. Some objects may have
483 	 * dynamically allocated resources, require cleanup, or need to be
484 	 * marked as uninitailized when all references are gone. What
485 	 * specifically needs to happen depends on the object type.
486 	 */
487 	switch (ko->type) {
488 	case K_OBJ_PIPE:
489 		k_pipe_cleanup((struct k_pipe *)ko->name);
490 		break;
491 	case K_OBJ_MSGQ:
492 		k_msgq_cleanup((struct k_msgq *)ko->name);
493 		break;
494 	case K_OBJ_STACK:
495 		k_stack_cleanup((struct k_stack *)ko->name);
496 		break;
497 	default:
498 		/* Nothing to do */
499 		break;
500 	}
501 
502 	rb_remove(&obj_rb_tree, &dyn->node);
503 	sys_dlist_remove(&dyn->dobj_list);
504 	k_free(dyn);
505 out:
506 #endif
507 	k_spin_unlock(&obj_lock, key);
508 }
509 
wordlist_cb(struct z_object * ko,void * ctx_ptr)510 static void wordlist_cb(struct z_object *ko, void *ctx_ptr)
511 {
512 	struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
513 
514 	if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
515 				  (struct k_thread *)ko->name != ctx->parent) {
516 		sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
517 	}
518 }
519 
z_thread_perms_inherit(struct k_thread * parent,struct k_thread * child)520 void z_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
521 {
522 	struct perm_ctx ctx = {
523 		thread_index_get(parent),
524 		thread_index_get(child),
525 		parent
526 	};
527 
528 	if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
529 		z_object_wordlist_foreach(wordlist_cb, &ctx);
530 	}
531 }
532 
z_thread_perms_set(struct z_object * ko,struct k_thread * thread)533 void z_thread_perms_set(struct z_object *ko, struct k_thread *thread)
534 {
535 	int index = thread_index_get(thread);
536 
537 	if (index != -1) {
538 		sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
539 	}
540 }
541 
z_thread_perms_clear(struct z_object * ko,struct k_thread * thread)542 void z_thread_perms_clear(struct z_object *ko, struct k_thread *thread)
543 {
544 	int index = thread_index_get(thread);
545 
546 	if (index != -1) {
547 		sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
548 		unref_check(ko, index);
549 	}
550 }
551 
clear_perms_cb(struct z_object * ko,void * ctx_ptr)552 static void clear_perms_cb(struct z_object *ko, void *ctx_ptr)
553 {
554 	uintptr_t id = (uintptr_t)ctx_ptr;
555 
556 	unref_check(ko, id);
557 }
558 
z_thread_perms_all_clear(struct k_thread * thread)559 void z_thread_perms_all_clear(struct k_thread *thread)
560 {
561 	uintptr_t index = thread_index_get(thread);
562 
563 	if ((int)index != -1) {
564 		z_object_wordlist_foreach(clear_perms_cb, (void *)index);
565 	}
566 }
567 
thread_perms_test(struct z_object * ko)568 static int thread_perms_test(struct z_object *ko)
569 {
570 	int index;
571 
572 	if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
573 		return 1;
574 	}
575 
576 	index = thread_index_get(_current);
577 	if (index != -1) {
578 		return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
579 	}
580 	return 0;
581 }
582 
dump_permission_error(struct z_object * ko)583 static void dump_permission_error(struct z_object *ko)
584 {
585 	int index = thread_index_get(_current);
586 	LOG_ERR("thread %p (%d) does not have permission on %s %p",
587 		_current, index,
588 		otype_to_str(ko->type), ko->name);
589 	LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
590 }
591 
z_dump_object_error(int retval,const void * obj,struct z_object * ko,enum k_objects otype)592 void z_dump_object_error(int retval, const void *obj, struct z_object *ko,
593 			enum k_objects otype)
594 {
595 	switch (retval) {
596 	case -EBADF:
597 		LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
598 		if (ko == NULL) {
599 			LOG_ERR("address is not a known kernel object");
600 		} else {
601 			LOG_ERR("address is actually a %s",
602 				otype_to_str(ko->type));
603 		}
604 		break;
605 	case -EPERM:
606 		dump_permission_error(ko);
607 		break;
608 	case -EINVAL:
609 		LOG_ERR("%p used before initialization", obj);
610 		break;
611 	case -EADDRINUSE:
612 		LOG_ERR("%p %s in use", obj, otype_to_str(otype));
613 		break;
614 	default:
615 		/* Not handled error */
616 		break;
617 	}
618 }
619 
z_impl_k_object_access_grant(const void * object,struct k_thread * thread)620 void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
621 {
622 	struct z_object *ko = z_object_find(object);
623 
624 	if (ko != NULL) {
625 		z_thread_perms_set(ko, thread);
626 	}
627 }
628 
k_object_access_revoke(const void * object,struct k_thread * thread)629 void k_object_access_revoke(const void *object, struct k_thread *thread)
630 {
631 	struct z_object *ko = z_object_find(object);
632 
633 	if (ko != NULL) {
634 		z_thread_perms_clear(ko, thread);
635 	}
636 }
637 
z_impl_k_object_release(const void * object)638 void z_impl_k_object_release(const void *object)
639 {
640 	k_object_access_revoke(object, _current);
641 }
642 
k_object_access_all_grant(const void * object)643 void k_object_access_all_grant(const void *object)
644 {
645 	struct z_object *ko = z_object_find(object);
646 
647 	if (ko != NULL) {
648 		ko->flags |= K_OBJ_FLAG_PUBLIC;
649 	}
650 }
651 
z_object_validate(struct z_object * ko,enum k_objects otype,enum _obj_init_check init)652 int z_object_validate(struct z_object *ko, enum k_objects otype,
653 		       enum _obj_init_check init)
654 {
655 	if (unlikely((ko == NULL) ||
656 		(otype != K_OBJ_ANY && ko->type != otype))) {
657 		return -EBADF;
658 	}
659 
660 	/* Manipulation of any kernel objects by a user thread requires that
661 	 * thread be granted access first, even for uninitialized objects
662 	 */
663 	if (unlikely(thread_perms_test(ko) == 0)) {
664 		return -EPERM;
665 	}
666 
667 	/* Initialization state checks. _OBJ_INIT_ANY, we don't care */
668 	if (likely(init == _OBJ_INIT_TRUE)) {
669 		/* Object MUST be initialized */
670 		if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
671 			return -EINVAL;
672 		}
673 	} else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
674 		/* Object MUST NOT be initialized */
675 		if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
676 			return -EADDRINUSE;
677 		}
678 	} else {
679 		/* _OBJ_INIT_ANY */
680 	}
681 
682 	return 0;
683 }
684 
z_object_init(const void * obj)685 void z_object_init(const void *obj)
686 {
687 	struct z_object *ko;
688 
689 	/* By the time we get here, if the caller was from userspace, all the
690 	 * necessary checks have been done in z_object_validate(), which takes
691 	 * place before the object is initialized.
692 	 *
693 	 * This function runs after the object has been initialized and
694 	 * finalizes it
695 	 */
696 
697 	ko = z_object_find(obj);
698 	if (ko == NULL) {
699 		/* Supervisor threads can ignore rules about kernel objects
700 		 * and may declare them on stacks, etc. Such objects will never
701 		 * be usable from userspace, but we shouldn't explode.
702 		 */
703 		return;
704 	}
705 
706 	/* Allows non-initialization system calls to be made on this object */
707 	ko->flags |= K_OBJ_FLAG_INITIALIZED;
708 }
709 
z_object_recycle(const void * obj)710 void z_object_recycle(const void *obj)
711 {
712 	struct z_object *ko = z_object_find(obj);
713 
714 	if (ko != NULL) {
715 		(void)memset(ko->perms, 0, sizeof(ko->perms));
716 		z_thread_perms_set(ko, k_current_get());
717 		ko->flags |= K_OBJ_FLAG_INITIALIZED;
718 	}
719 }
720 
z_object_uninit(const void * obj)721 void z_object_uninit(const void *obj)
722 {
723 	struct z_object *ko;
724 
725 	/* See comments in z_object_init() */
726 	ko = z_object_find(obj);
727 	if (ko == NULL) {
728 		return;
729 	}
730 
731 	ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
732 }
733 
734 /*
735  * Copy to/from helper functions used in syscall handlers
736  */
z_user_alloc_from_copy(const void * src,size_t size)737 void *z_user_alloc_from_copy(const void *src, size_t size)
738 {
739 	void *dst = NULL;
740 
741 	/* Does the caller in user mode have access to read this memory? */
742 	if (Z_SYSCALL_MEMORY_READ(src, size)) {
743 		goto out_err;
744 	}
745 
746 	dst = z_thread_malloc(size);
747 	if (dst == NULL) {
748 		LOG_ERR("out of thread resource pool memory (%zu)", size);
749 		goto out_err;
750 	}
751 
752 	(void)memcpy(dst, src, size);
753 out_err:
754 	return dst;
755 }
756 
user_copy(void * dst,const void * src,size_t size,bool to_user)757 static int user_copy(void *dst, const void *src, size_t size, bool to_user)
758 {
759 	int ret = EFAULT;
760 
761 	/* Does the caller in user mode have access to this memory? */
762 	if (to_user ? Z_SYSCALL_MEMORY_WRITE(dst, size) :
763 			Z_SYSCALL_MEMORY_READ(src, size)) {
764 		goto out_err;
765 	}
766 
767 	(void)memcpy(dst, src, size);
768 	ret = 0;
769 out_err:
770 	return ret;
771 }
772 
z_user_from_copy(void * dst,const void * src,size_t size)773 int z_user_from_copy(void *dst, const void *src, size_t size)
774 {
775 	return user_copy(dst, src, size, false);
776 }
777 
z_user_to_copy(void * dst,const void * src,size_t size)778 int z_user_to_copy(void *dst, const void *src, size_t size)
779 {
780 	return user_copy(dst, src, size, true);
781 }
782 
z_user_string_alloc_copy(const char * src,size_t maxlen)783 char *z_user_string_alloc_copy(const char *src, size_t maxlen)
784 {
785 	size_t actual_len;
786 	int err;
787 	char *ret = NULL;
788 
789 	actual_len = z_user_string_nlen(src, maxlen, &err);
790 	if (err != 0) {
791 		goto out;
792 	}
793 	if (actual_len == maxlen) {
794 		/* Not NULL terminated */
795 		LOG_ERR("string too long %p (%zu)", src, actual_len);
796 		goto out;
797 	}
798 	if (size_add_overflow(actual_len, 1, &actual_len)) {
799 		LOG_ERR("overflow");
800 		goto out;
801 	}
802 
803 	ret = z_user_alloc_from_copy(src, actual_len);
804 
805 	/* Someone may have modified the source string during the above
806 	 * checks. Ensure what we actually copied is still terminated
807 	 * properly.
808 	 */
809 	if (ret != NULL) {
810 		ret[actual_len - 1U] = '\0';
811 	}
812 out:
813 	return ret;
814 }
815 
z_user_string_copy(char * dst,const char * src,size_t maxlen)816 int z_user_string_copy(char *dst, const char *src, size_t maxlen)
817 {
818 	size_t actual_len;
819 	int ret, err;
820 
821 	actual_len = z_user_string_nlen(src, maxlen, &err);
822 	if (err != 0) {
823 		ret = EFAULT;
824 		goto out;
825 	}
826 	if (actual_len == maxlen) {
827 		/* Not NULL terminated */
828 		LOG_ERR("string too long %p (%zu)", src, actual_len);
829 		ret = EINVAL;
830 		goto out;
831 	}
832 	if (size_add_overflow(actual_len, 1, &actual_len)) {
833 		LOG_ERR("overflow");
834 		ret = EINVAL;
835 		goto out;
836 	}
837 
838 	ret = z_user_from_copy(dst, src, actual_len);
839 
840 	/* See comment above in z_user_string_alloc_copy() */
841 	dst[actual_len - 1] = '\0';
842 out:
843 	return ret;
844 }
845 
846 /*
847  * Application memory region initialization
848  */
849 
850 extern char __app_shmem_regions_start[];
851 extern char __app_shmem_regions_end[];
852 
app_shmem_bss_zero(const struct device * unused)853 static int app_shmem_bss_zero(const struct device *unused)
854 {
855 	struct z_app_region *region, *end;
856 
857 	ARG_UNUSED(unused);
858 
859 	end = (struct z_app_region *)&__app_shmem_regions_end;
860 	region = (struct z_app_region *)&__app_shmem_regions_start;
861 
862 	for ( ; region < end; region++) {
863 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
864 		/* When BSS sections are not present at boot, we need to wait for
865 		 * paging mechanism to be initialized before we can zero out BSS.
866 		 */
867 		extern bool z_sys_post_kernel;
868 		bool do_clear = z_sys_post_kernel;
869 
870 		/* During pre-kernel init, z_sys_post_kernel == false, but
871 		 * with pinned rodata region, so clear. Otherwise skip.
872 		 * In post-kernel init, z_sys_post_kernel == true,
873 		 * skip those in pinned rodata region as they have already
874 		 * been cleared and possibly already in use. Otherwise clear.
875 		 */
876 		if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
877 		    ((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
878 			do_clear = !do_clear;
879 		}
880 
881 		if (do_clear)
882 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
883 		{
884 			(void)memset(region->bss_start, 0, region->bss_size);
885 		}
886 	}
887 
888 	return 0;
889 }
890 
891 SYS_INIT(app_shmem_bss_zero, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
892 
893 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
894 /* When BSS sections are not present at boot, we need to wait for
895  * paging mechanism to be initialized before we can zero out BSS.
896  */
897 SYS_INIT(app_shmem_bss_zero, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
898 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
899 
900 /*
901  * Default handlers if otherwise unimplemented
902  */
903 
handler_bad_syscall(uintptr_t bad_id,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)904 static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
905 				     uintptr_t arg3, uintptr_t arg4,
906 				     uintptr_t arg5, uintptr_t arg6,
907 				     void *ssf)
908 {
909 	LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
910 	arch_syscall_oops(ssf);
911 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
912 }
913 
handler_no_syscall(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)914 static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
915 				    uintptr_t arg3, uintptr_t arg4,
916 				    uintptr_t arg5, uintptr_t arg6, void *ssf)
917 {
918 	LOG_ERR("Unimplemented system call");
919 	arch_syscall_oops(ssf);
920 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
921 }
922 
923 #include <syscall_dispatch.c>
924