1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 
8 #include <zephyr/kernel.h>
9 #include <string.h>
10 #include <zephyr/sys/math_extras.h>
11 #include <zephyr/sys/rb.h>
12 #include <zephyr/kernel_structs.h>
13 #include <zephyr/sys/sys_io.h>
14 #include <ksched.h>
15 #include <zephyr/syscall.h>
16 #include <zephyr/internal/syscall_handler.h>
17 #include <zephyr/device.h>
18 #include <zephyr/init.h>
19 #include <stdbool.h>
20 #include <zephyr/app_memory/app_memdomain.h>
21 #include <zephyr/sys/libc-hooks.h>
22 #include <zephyr/sys/mutex.h>
23 #include <inttypes.h>
24 #include <zephyr/linker/linker-defs.h>
25 
26 #ifdef Z_LIBC_PARTITION_EXISTS
27 K_APPMEM_PARTITION_DEFINE(z_libc_partition);
28 #endif
29 
30 /* TODO: Find a better place to put this. Since we pull the entire
31  * lib..__modules__crypto__mbedtls.a  globals into app shared memory
32  * section, we can't put this in zephyr_init.c of the mbedtls module.
33  */
34 #ifdef CONFIG_MBEDTLS
35 K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
36 #endif
37 
38 #include <zephyr/logging/log.h>
39 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
40 
41 /* The originally synchronization strategy made heavy use of recursive
42  * irq_locking, which ports poorly to spinlocks which are
43  * non-recursive.  Rather than try to redesign as part of
44  * spinlockification, this uses multiple locks to preserve the
45  * original semantics exactly.  The locks are named for the data they
46  * protect where possible, or just for the code that uses them where
47  * not.
48  */
49 #ifdef CONFIG_DYNAMIC_OBJECTS
50 static struct k_spinlock lists_lock;       /* kobj dlist */
51 static struct k_spinlock objfree_lock;     /* k_object_free */
52 
53 #ifdef CONFIG_GEN_PRIV_STACKS
54 /* On ARM & ARC MPU we may have two different alignment requirement
55  * when dynamically allocating thread stacks, one for the privileged
56  * stack and other for the user stack, so we need to account the
57  * worst alignment scenario and reserve space for that.
58  */
59 #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
60 #define STACK_ELEMENT_DATA_SIZE(size) \
61 	(sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
62 	Z_THREAD_STACK_OBJ_ALIGN(size) + Z_THREAD_STACK_SIZE_ADJUST(size))
63 #else
64 #define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
65 	Z_THREAD_STACK_SIZE_ADJUST(size))
66 #endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
67 #else
68 #define STACK_ELEMENT_DATA_SIZE(size) Z_THREAD_STACK_SIZE_ADJUST(size)
69 #endif /* CONFIG_GEN_PRIV_STACKS */
70 
71 #endif
72 static struct k_spinlock obj_lock;         /* kobj struct data */
73 
74 #define MAX_THREAD_BITS		(CONFIG_MAX_THREAD_BYTES * 8)
75 
76 #ifdef CONFIG_DYNAMIC_OBJECTS
77 extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
78 #endif
79 
80 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
81 
otype_to_str(enum k_objects otype)82 const char *otype_to_str(enum k_objects otype)
83 {
84 	const char *ret;
85 	/* -fdata-sections doesn't work right except in very very recent
86 	 * GCC and these literal strings would appear in the binary even if
87 	 * otype_to_str was omitted by the linker
88 	 */
89 #ifdef CONFIG_LOG
90 	switch (otype) {
91 	/* otype-to-str.h is generated automatically during build by
92 	 * gen_kobject_list.py
93 	 */
94 	case K_OBJ_ANY:
95 		ret = "generic";
96 		break;
97 #include <otype-to-str.h>
98 	default:
99 		ret = "?";
100 		break;
101 	}
102 #else
103 	ARG_UNUSED(otype);
104 	ret = NULL;
105 #endif
106 	return ret;
107 }
108 
109 struct perm_ctx {
110 	int parent_id;
111 	int child_id;
112 	struct k_thread *parent;
113 };
114 
115 #ifdef CONFIG_GEN_PRIV_STACKS
116 /* See write_gperf_table() in scripts/build/gen_kobject_list.py. The privilege
117  * mode stacks are allocated as an array. The base of the array is
118  * aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
119  */
z_priv_stack_find(k_thread_stack_t * stack)120 uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
121 {
122 	struct k_object *obj = k_object_find(stack);
123 
124 	__ASSERT(obj != NULL, "stack object not found");
125 	__ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
126 		 "bad stack object");
127 
128 	return obj->data.stack_data->priv;
129 }
130 #endif /* CONFIG_GEN_PRIV_STACKS */
131 
132 #ifdef CONFIG_DYNAMIC_OBJECTS
133 
134 /*
135  * Note that dyn_obj->data is where the kernel object resides
136  * so it is the one that actually needs to be aligned.
137  * Due to the need to get the the fields inside struct dyn_obj
138  * from kernel object pointers (i.e. from data[]), the offset
139  * from data[] needs to be fixed at build time. Therefore,
140  * data[] is declared with __aligned(), such that when dyn_obj
141  * is allocated with alignment, data[] is also aligned.
142  * Due to this requirement, data[] needs to be aligned with
143  * the maximum alignment needed for all kernel objects
144  * (hence the following DYN_OBJ_DATA_ALIGN).
145  */
146 #ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
147 #define DYN_OBJ_DATA_ALIGN_K_THREAD	(ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
148 #else
149 #define DYN_OBJ_DATA_ALIGN_K_THREAD	(sizeof(void *))
150 #endif
151 
152 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
153 #ifndef CONFIG_MPU_STACK_GUARD
154 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
155 	Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
156 #else
157 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
158 	Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
159 #endif /* !CONFIG_MPU_STACK_GUARD */
160 #else
161 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
162 	Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
163 #endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
164 
165 #define DYN_OBJ_DATA_ALIGN		\
166 	MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
167 
168 struct dyn_obj {
169 	struct k_object kobj;
170 	sys_dnode_t dobj_list;
171 
172 	/* The object itself */
173 	void *data;
174 };
175 
176 extern struct k_object *z_object_gperf_find(const void *obj);
177 extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
178 					     void *context);
179 
180 /*
181  * Linked list of allocated kernel objects, for iteration over all allocated
182  * objects (and potentially deleting them during iteration).
183  */
184 static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
185 
186 /*
187  * TODO: Write some hash table code that will replace obj_list.
188  */
189 
obj_size_get(enum k_objects otype)190 static size_t obj_size_get(enum k_objects otype)
191 {
192 	size_t ret;
193 
194 	switch (otype) {
195 #include <otype-to-size.h>
196 	default:
197 		ret = sizeof(const struct device);
198 		break;
199 	}
200 
201 	return ret;
202 }
203 
obj_align_get(enum k_objects otype)204 static size_t obj_align_get(enum k_objects otype)
205 {
206 	size_t ret;
207 
208 	switch (otype) {
209 	case K_OBJ_THREAD:
210 #ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
211 		ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
212 #else
213 		ret = __alignof(struct dyn_obj);
214 #endif
215 		break;
216 	default:
217 		ret = __alignof(struct dyn_obj);
218 		break;
219 	}
220 
221 	return ret;
222 }
223 
dyn_object_find(void * obj)224 static struct dyn_obj *dyn_object_find(void *obj)
225 {
226 	struct dyn_obj *node;
227 	k_spinlock_key_t key;
228 
229 	/* For any dynamically allocated kernel object, the object
230 	 * pointer is just a member of the containing struct dyn_obj,
231 	 * so just a little arithmetic is necessary to locate the
232 	 * corresponding struct rbnode
233 	 */
234 	key = k_spin_lock(&lists_lock);
235 
236 	SYS_DLIST_FOR_EACH_CONTAINER(&obj_list, node, dobj_list) {
237 		if (node->kobj.name == obj) {
238 			goto end;
239 		}
240 	}
241 
242 	/* No object found */
243 	node = NULL;
244 
245  end:
246 	k_spin_unlock(&lists_lock, key);
247 
248 	return node;
249 }
250 
251 /**
252  * @internal
253  *
254  * @brief Allocate a new thread index for a new thread.
255  *
256  * This finds an unused thread index that can be assigned to a new
257  * thread. If too many threads have been allocated, the kernel will
258  * run out of indexes and this function will fail.
259  *
260  * Note that if an unused index is found, that index will be marked as
261  * used after return of this function.
262  *
263  * @param tidx The new thread index if successful
264  *
265  * @return true if successful, false if failed
266  **/
thread_idx_alloc(uintptr_t * tidx)267 static bool thread_idx_alloc(uintptr_t *tidx)
268 {
269 	int i;
270 	int idx;
271 	int base;
272 
273 	base = 0;
274 	for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
275 		idx = find_lsb_set(_thread_idx_map[i]);
276 
277 		if (idx != 0) {
278 			*tidx = base + (idx - 1);
279 
280 			sys_bitfield_clear_bit((mem_addr_t)_thread_idx_map,
281 					       *tidx);
282 
283 			/* Clear permission from all objects */
284 			k_object_wordlist_foreach(clear_perms_cb,
285 						   (void *)*tidx);
286 
287 			return true;
288 		}
289 
290 		base += 8;
291 	}
292 
293 	return false;
294 }
295 
296 /**
297  * @internal
298  *
299  * @brief Free a thread index.
300  *
301  * This frees a thread index so it can be used by another
302  * thread.
303  *
304  * @param tidx The thread index to be freed
305  **/
thread_idx_free(uintptr_t tidx)306 static void thread_idx_free(uintptr_t tidx)
307 {
308 	/* To prevent leaked permission when index is recycled */
309 	k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
310 
311 	sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
312 }
313 
dynamic_object_create(enum k_objects otype,size_t align,size_t size)314 static struct k_object *dynamic_object_create(enum k_objects otype, size_t align,
315 					      size_t size)
316 {
317 	struct dyn_obj *dyn;
318 
319 	dyn = z_thread_aligned_alloc(align, sizeof(struct dyn_obj));
320 	if (dyn == NULL) {
321 		return NULL;
322 	}
323 
324 	if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
325 		size_t adjusted_size;
326 
327 		if (size == 0) {
328 			k_free(dyn);
329 			return NULL;
330 		}
331 
332 		adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
333 		dyn->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
334 						     adjusted_size);
335 		if (dyn->data == NULL) {
336 			k_free(dyn);
337 			return NULL;
338 		}
339 
340 #ifdef CONFIG_GEN_PRIV_STACKS
341 		struct z_stack_data *stack_data = (struct z_stack_data *)
342 			((uint8_t *)dyn->data + adjusted_size - sizeof(*stack_data));
343 		stack_data->priv = (uint8_t *)dyn->data;
344 		dyn->kobj.data.stack_data = stack_data;
345 #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
346 		dyn->kobj.name = (void *)ROUND_UP(
347 			  ((uint8_t *)dyn->data + CONFIG_PRIVILEGED_STACK_SIZE),
348 			  Z_THREAD_STACK_OBJ_ALIGN(size));
349 #else
350 		dyn->kobj.name = dyn->data;
351 #endif
352 #else
353 		dyn->kobj.name = dyn->data;
354 #endif
355 	} else {
356 		dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
357 		if (dyn->data == NULL) {
358 			k_free(dyn->data);
359 			return NULL;
360 		}
361 		dyn->kobj.name = dyn->data;
362 	}
363 
364 	dyn->kobj.type = otype;
365 	dyn->kobj.flags = 0;
366 	(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
367 
368 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
369 
370 	sys_dlist_append(&obj_list, &dyn->dobj_list);
371 	k_spin_unlock(&lists_lock, key);
372 
373 	return &dyn->kobj;
374 }
375 
k_object_create_dynamic_aligned(size_t align,size_t size)376 struct k_object *k_object_create_dynamic_aligned(size_t align, size_t size)
377 {
378 	struct k_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
379 
380 	if (obj == NULL) {
381 		LOG_ERR("could not allocate kernel object, out of memory");
382 	}
383 
384 	return obj;
385 }
386 
z_object_alloc(enum k_objects otype,size_t size)387 static void *z_object_alloc(enum k_objects otype, size_t size)
388 {
389 	struct k_object *zo;
390 	uintptr_t tidx = 0;
391 
392 	if (otype <= K_OBJ_ANY || otype >= K_OBJ_LAST) {
393 		LOG_ERR("bad object type %d requested", otype);
394 		return NULL;
395 	}
396 
397 	switch (otype) {
398 	case K_OBJ_THREAD:
399 		if (!thread_idx_alloc(&tidx)) {
400 			LOG_ERR("out of free thread indexes");
401 			return NULL;
402 		}
403 		break;
404 	/* The following are currently not allowed at all */
405 	case K_OBJ_FUTEX:			/* Lives in user memory */
406 	case K_OBJ_SYS_MUTEX:			/* Lives in user memory */
407 	case K_OBJ_NET_SOCKET:			/* Indeterminate size */
408 		LOG_ERR("forbidden object type '%s' requested",
409 			otype_to_str(otype));
410 		return NULL;
411 	default:
412 		/* Remainder within bounds are permitted */
413 		break;
414 	}
415 
416 	zo = dynamic_object_create(otype, obj_align_get(otype), size);
417 	if (zo == NULL) {
418 		if (otype == K_OBJ_THREAD) {
419 			thread_idx_free(tidx);
420 		}
421 		return NULL;
422 	}
423 
424 	if (otype == K_OBJ_THREAD) {
425 		zo->data.thread_id = tidx;
426 	}
427 
428 	/* The allocating thread implicitly gets permission on kernel objects
429 	 * that it allocates
430 	 */
431 	k_thread_perms_set(zo, _current);
432 
433 	/* Activates reference counting logic for automatic disposal when
434 	 * all permissions have been revoked
435 	 */
436 	zo->flags |= K_OBJ_FLAG_ALLOC;
437 
438 	return zo->name;
439 }
440 
z_impl_k_object_alloc(enum k_objects otype)441 void *z_impl_k_object_alloc(enum k_objects otype)
442 {
443 	return z_object_alloc(otype, 0);
444 }
445 
z_impl_k_object_alloc_size(enum k_objects otype,size_t size)446 void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
447 {
448 	return z_object_alloc(otype, size);
449 }
450 
k_object_free(void * obj)451 void k_object_free(void *obj)
452 {
453 	struct dyn_obj *dyn;
454 
455 	/* This function is intentionally not exposed to user mode.
456 	 * There's currently no robust way to track that an object isn't
457 	 * being used by some other thread
458 	 */
459 
460 	k_spinlock_key_t key = k_spin_lock(&objfree_lock);
461 
462 	dyn = dyn_object_find(obj);
463 	if (dyn != NULL) {
464 		sys_dlist_remove(&dyn->dobj_list);
465 
466 		if (dyn->kobj.type == K_OBJ_THREAD) {
467 			thread_idx_free(dyn->kobj.data.thread_id);
468 		}
469 	}
470 	k_spin_unlock(&objfree_lock, key);
471 
472 	if (dyn != NULL) {
473 		k_free(dyn->data);
474 		k_free(dyn);
475 	}
476 }
477 
k_object_find(const void * obj)478 struct k_object *k_object_find(const void *obj)
479 {
480 	struct k_object *ret;
481 
482 	ret = z_object_gperf_find(obj);
483 
484 	if (ret == NULL) {
485 		struct dyn_obj *dyn;
486 
487 		/* The cast to pointer-to-non-const violates MISRA
488 		 * 11.8 but is justified since we know dynamic objects
489 		 * were not declared with a const qualifier.
490 		 */
491 		dyn = dyn_object_find((void *)obj);
492 		if (dyn != NULL) {
493 			ret = &dyn->kobj;
494 		}
495 	}
496 
497 	return ret;
498 }
499 
k_object_wordlist_foreach(_wordlist_cb_func_t func,void * context)500 void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
501 {
502 	struct dyn_obj *obj, *next;
503 
504 	z_object_gperf_wordlist_foreach(func, context);
505 
506 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
507 
508 	SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
509 		func(&obj->kobj, context);
510 	}
511 	k_spin_unlock(&lists_lock, key);
512 }
513 #endif /* CONFIG_DYNAMIC_OBJECTS */
514 
thread_index_get(struct k_thread * thread)515 static unsigned int thread_index_get(struct k_thread *thread)
516 {
517 	struct k_object *ko;
518 
519 	ko = k_object_find(thread);
520 
521 	if (ko == NULL) {
522 		return -1;
523 	}
524 
525 	return ko->data.thread_id;
526 }
527 
unref_check(struct k_object * ko,uintptr_t index)528 static void unref_check(struct k_object *ko, uintptr_t index)
529 {
530 	k_spinlock_key_t key = k_spin_lock(&obj_lock);
531 
532 	sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
533 
534 #ifdef CONFIG_DYNAMIC_OBJECTS
535 	if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
536 		/* skip unref check for static kernel object */
537 		goto out;
538 	}
539 
540 	void *vko = ko;
541 
542 	struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
543 
544 	__ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
545 
546 	for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
547 		if (ko->perms[i] != 0U) {
548 			goto out;
549 		}
550 	}
551 
552 	/* This object has no more references. Some objects may have
553 	 * dynamically allocated resources, require cleanup, or need to be
554 	 * marked as uninitailized when all references are gone. What
555 	 * specifically needs to happen depends on the object type.
556 	 */
557 	switch (ko->type) {
558 #ifdef CONFIG_PIPES
559 	case K_OBJ_PIPE:
560 		k_pipe_cleanup((struct k_pipe *)ko->name);
561 		break;
562 #endif
563 	case K_OBJ_MSGQ:
564 		k_msgq_cleanup((struct k_msgq *)ko->name);
565 		break;
566 	case K_OBJ_STACK:
567 		k_stack_cleanup((struct k_stack *)ko->name);
568 		break;
569 	default:
570 		/* Nothing to do */
571 		break;
572 	}
573 
574 	sys_dlist_remove(&dyn->dobj_list);
575 	k_free(dyn->data);
576 	k_free(dyn);
577 out:
578 #endif
579 	k_spin_unlock(&obj_lock, key);
580 }
581 
wordlist_cb(struct k_object * ko,void * ctx_ptr)582 static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
583 {
584 	struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
585 
586 	if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
587 				  (struct k_thread *)ko->name != ctx->parent) {
588 		sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
589 	}
590 }
591 
k_thread_perms_inherit(struct k_thread * parent,struct k_thread * child)592 void k_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
593 {
594 	struct perm_ctx ctx = {
595 		thread_index_get(parent),
596 		thread_index_get(child),
597 		parent
598 	};
599 
600 	if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
601 		k_object_wordlist_foreach(wordlist_cb, &ctx);
602 	}
603 }
604 
k_thread_perms_set(struct k_object * ko,struct k_thread * thread)605 void k_thread_perms_set(struct k_object *ko, struct k_thread *thread)
606 {
607 	int index = thread_index_get(thread);
608 
609 	if (index != -1) {
610 		sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
611 	}
612 }
613 
k_thread_perms_clear(struct k_object * ko,struct k_thread * thread)614 void k_thread_perms_clear(struct k_object *ko, struct k_thread *thread)
615 {
616 	int index = thread_index_get(thread);
617 
618 	if (index != -1) {
619 		sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
620 		unref_check(ko, index);
621 	}
622 }
623 
clear_perms_cb(struct k_object * ko,void * ctx_ptr)624 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr)
625 {
626 	uintptr_t id = (uintptr_t)ctx_ptr;
627 
628 	unref_check(ko, id);
629 }
630 
k_thread_perms_all_clear(struct k_thread * thread)631 void k_thread_perms_all_clear(struct k_thread *thread)
632 {
633 	uintptr_t index = thread_index_get(thread);
634 
635 	if ((int)index != -1) {
636 		k_object_wordlist_foreach(clear_perms_cb, (void *)index);
637 	}
638 }
639 
thread_perms_test(struct k_object * ko)640 static int thread_perms_test(struct k_object *ko)
641 {
642 	int index;
643 
644 	if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
645 		return 1;
646 	}
647 
648 	index = thread_index_get(_current);
649 	if (index != -1) {
650 		return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
651 	}
652 	return 0;
653 }
654 
dump_permission_error(struct k_object * ko)655 static void dump_permission_error(struct k_object *ko)
656 {
657 	int index = thread_index_get(_current);
658 	LOG_ERR("thread %p (%d) does not have permission on %s %p",
659 		_current, index,
660 		otype_to_str(ko->type), ko->name);
661 	LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
662 }
663 
k_object_dump_error(int retval,const void * obj,struct k_object * ko,enum k_objects otype)664 void k_object_dump_error(int retval, const void *obj, struct k_object *ko,
665 			enum k_objects otype)
666 {
667 	switch (retval) {
668 	case -EBADF:
669 		LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
670 		if (ko == NULL) {
671 			LOG_ERR("address is not a known kernel object");
672 		} else {
673 			LOG_ERR("address is actually a %s",
674 				otype_to_str(ko->type));
675 		}
676 		break;
677 	case -EPERM:
678 		dump_permission_error(ko);
679 		break;
680 	case -EINVAL:
681 		LOG_ERR("%p used before initialization", obj);
682 		break;
683 	case -EADDRINUSE:
684 		LOG_ERR("%p %s in use", obj, otype_to_str(otype));
685 		break;
686 	default:
687 		/* Not handled error */
688 		break;
689 	}
690 }
691 
z_impl_k_object_access_grant(const void * object,struct k_thread * thread)692 void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
693 {
694 	struct k_object *ko = k_object_find(object);
695 
696 	if (ko != NULL) {
697 		k_thread_perms_set(ko, thread);
698 	}
699 }
700 
k_object_access_revoke(const void * object,struct k_thread * thread)701 void k_object_access_revoke(const void *object, struct k_thread *thread)
702 {
703 	struct k_object *ko = k_object_find(object);
704 
705 	if (ko != NULL) {
706 		k_thread_perms_clear(ko, thread);
707 	}
708 }
709 
z_impl_k_object_release(const void * object)710 void z_impl_k_object_release(const void *object)
711 {
712 	k_object_access_revoke(object, _current);
713 }
714 
k_object_access_all_grant(const void * object)715 void k_object_access_all_grant(const void *object)
716 {
717 	struct k_object *ko = k_object_find(object);
718 
719 	if (ko != NULL) {
720 		ko->flags |= K_OBJ_FLAG_PUBLIC;
721 	}
722 }
723 
k_object_validate(struct k_object * ko,enum k_objects otype,enum _obj_init_check init)724 int k_object_validate(struct k_object *ko, enum k_objects otype,
725 		       enum _obj_init_check init)
726 {
727 	if (unlikely((ko == NULL) ||
728 		(otype != K_OBJ_ANY && ko->type != otype))) {
729 		return -EBADF;
730 	}
731 
732 	/* Manipulation of any kernel objects by a user thread requires that
733 	 * thread be granted access first, even for uninitialized objects
734 	 */
735 	if (unlikely(thread_perms_test(ko) == 0)) {
736 		return -EPERM;
737 	}
738 
739 	/* Initialization state checks. _OBJ_INIT_ANY, we don't care */
740 	if (likely(init == _OBJ_INIT_TRUE)) {
741 		/* Object MUST be initialized */
742 		if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
743 			return -EINVAL;
744 		}
745 	} else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
746 		/* Object MUST NOT be initialized */
747 		if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
748 			return -EADDRINUSE;
749 		}
750 	} else {
751 		/* _OBJ_INIT_ANY */
752 	}
753 
754 	return 0;
755 }
756 
k_object_init(const void * obj)757 void k_object_init(const void *obj)
758 {
759 	struct k_object *ko;
760 
761 	/* By the time we get here, if the caller was from userspace, all the
762 	 * necessary checks have been done in k_object_validate(), which takes
763 	 * place before the object is initialized.
764 	 *
765 	 * This function runs after the object has been initialized and
766 	 * finalizes it
767 	 */
768 
769 	ko = k_object_find(obj);
770 	if (ko == NULL) {
771 		/* Supervisor threads can ignore rules about kernel objects
772 		 * and may declare them on stacks, etc. Such objects will never
773 		 * be usable from userspace, but we shouldn't explode.
774 		 */
775 		return;
776 	}
777 
778 	/* Allows non-initialization system calls to be made on this object */
779 	ko->flags |= K_OBJ_FLAG_INITIALIZED;
780 }
781 
k_object_recycle(const void * obj)782 void k_object_recycle(const void *obj)
783 {
784 	struct k_object *ko = k_object_find(obj);
785 
786 	if (ko != NULL) {
787 		(void)memset(ko->perms, 0, sizeof(ko->perms));
788 		k_thread_perms_set(ko, _current);
789 		ko->flags |= K_OBJ_FLAG_INITIALIZED;
790 	}
791 }
792 
k_object_uninit(const void * obj)793 void k_object_uninit(const void *obj)
794 {
795 	struct k_object *ko;
796 
797 	/* See comments in k_object_init() */
798 	ko = k_object_find(obj);
799 	if (ko == NULL) {
800 		return;
801 	}
802 
803 	ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
804 }
805 
806 /*
807  * Copy to/from helper functions used in syscall handlers
808  */
k_usermode_alloc_from_copy(const void * src,size_t size)809 void *k_usermode_alloc_from_copy(const void *src, size_t size)
810 {
811 	void *dst = NULL;
812 
813 	/* Does the caller in user mode have access to read this memory? */
814 	if (K_SYSCALL_MEMORY_READ(src, size)) {
815 		goto out_err;
816 	}
817 
818 	dst = z_thread_malloc(size);
819 	if (dst == NULL) {
820 		LOG_ERR("out of thread resource pool memory (%zu)", size);
821 		goto out_err;
822 	}
823 
824 	(void)memcpy(dst, src, size);
825 out_err:
826 	return dst;
827 }
828 
user_copy(void * dst,const void * src,size_t size,bool to_user)829 static int user_copy(void *dst, const void *src, size_t size, bool to_user)
830 {
831 	int ret = EFAULT;
832 
833 	/* Does the caller in user mode have access to this memory? */
834 	if (to_user ? K_SYSCALL_MEMORY_WRITE(dst, size) :
835 			K_SYSCALL_MEMORY_READ(src, size)) {
836 		goto out_err;
837 	}
838 
839 	(void)memcpy(dst, src, size);
840 	ret = 0;
841 out_err:
842 	return ret;
843 }
844 
k_usermode_from_copy(void * dst,const void * src,size_t size)845 int k_usermode_from_copy(void *dst, const void *src, size_t size)
846 {
847 	return user_copy(dst, src, size, false);
848 }
849 
k_usermode_to_copy(void * dst,const void * src,size_t size)850 int k_usermode_to_copy(void *dst, const void *src, size_t size)
851 {
852 	return user_copy(dst, src, size, true);
853 }
854 
k_usermode_string_alloc_copy(const char * src,size_t maxlen)855 char *k_usermode_string_alloc_copy(const char *src, size_t maxlen)
856 {
857 	size_t actual_len;
858 	int err;
859 	char *ret = NULL;
860 
861 	actual_len = k_usermode_string_nlen(src, maxlen, &err);
862 	if (err != 0) {
863 		goto out;
864 	}
865 	if (actual_len == maxlen) {
866 		/* Not NULL terminated */
867 		LOG_ERR("string too long %p (%zu)", src, actual_len);
868 		goto out;
869 	}
870 	if (size_add_overflow(actual_len, 1, &actual_len)) {
871 		LOG_ERR("overflow");
872 		goto out;
873 	}
874 
875 	ret = k_usermode_alloc_from_copy(src, actual_len);
876 
877 	/* Someone may have modified the source string during the above
878 	 * checks. Ensure what we actually copied is still terminated
879 	 * properly.
880 	 */
881 	if (ret != NULL) {
882 		ret[actual_len - 1U] = '\0';
883 	}
884 out:
885 	return ret;
886 }
887 
k_usermode_string_copy(char * dst,const char * src,size_t maxlen)888 int k_usermode_string_copy(char *dst, const char *src, size_t maxlen)
889 {
890 	size_t actual_len;
891 	int ret, err;
892 
893 	actual_len = k_usermode_string_nlen(src, maxlen, &err);
894 	if (err != 0) {
895 		ret = EFAULT;
896 		goto out;
897 	}
898 	if (actual_len == maxlen) {
899 		/* Not NULL terminated */
900 		LOG_ERR("string too long %p (%zu)", src, actual_len);
901 		ret = EINVAL;
902 		goto out;
903 	}
904 	if (size_add_overflow(actual_len, 1, &actual_len)) {
905 		LOG_ERR("overflow");
906 		ret = EINVAL;
907 		goto out;
908 	}
909 
910 	ret = k_usermode_from_copy(dst, src, actual_len);
911 
912 	/* See comment above in k_usermode_string_alloc_copy() */
913 	dst[actual_len - 1] = '\0';
914 out:
915 	return ret;
916 }
917 
918 /*
919  * Application memory region initialization
920  */
921 
922 extern char __app_shmem_regions_start[];
923 extern char __app_shmem_regions_end[];
924 
app_shmem_bss_zero(void)925 static int app_shmem_bss_zero(void)
926 {
927 	struct z_app_region *region, *end;
928 
929 
930 	end = (struct z_app_region *)&__app_shmem_regions_end;
931 	region = (struct z_app_region *)&__app_shmem_regions_start;
932 
933 	for ( ; region < end; region++) {
934 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
935 		/* When BSS sections are not present at boot, we need to wait for
936 		 * paging mechanism to be initialized before we can zero out BSS.
937 		 */
938 		extern bool z_sys_post_kernel;
939 		bool do_clear = z_sys_post_kernel;
940 
941 		/* During pre-kernel init, z_sys_post_kernel == false, but
942 		 * with pinned rodata region, so clear. Otherwise skip.
943 		 * In post-kernel init, z_sys_post_kernel == true,
944 		 * skip those in pinned rodata region as they have already
945 		 * been cleared and possibly already in use. Otherwise clear.
946 		 */
947 		if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
948 		    ((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
949 			do_clear = !do_clear;
950 		}
951 
952 		if (do_clear)
953 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
954 		{
955 			(void)memset(region->bss_start, 0, region->bss_size);
956 		}
957 	}
958 
959 	return 0;
960 }
961 
962 SYS_INIT_NAMED(app_shmem_bss_zero_pre, app_shmem_bss_zero,
963 	       PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
964 
965 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
966 /* When BSS sections are not present at boot, we need to wait for
967  * paging mechanism to be initialized before we can zero out BSS.
968  */
969 SYS_INIT_NAMED(app_shmem_bss_zero_post, app_shmem_bss_zero,
970 	       POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
971 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
972 
973 /*
974  * Default handlers if otherwise unimplemented
975  */
976 
handler_bad_syscall(uintptr_t bad_id,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)977 static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
978 				     uintptr_t arg3, uintptr_t arg4,
979 				     uintptr_t arg5, uintptr_t arg6,
980 				     void *ssf)
981 {
982 	LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
983 	arch_syscall_oops(ssf);
984 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
985 }
986 
handler_no_syscall(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)987 static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
988 				    uintptr_t arg3, uintptr_t arg4,
989 				    uintptr_t arg5, uintptr_t arg6, void *ssf)
990 {
991 	LOG_ERR("Unimplemented system call");
992 	arch_syscall_oops(ssf);
993 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
994 }
995 
996 #include <syscall_dispatch.c>
997