1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 
8 #include <zephyr/kernel.h>
9 #include <string.h>
10 #include <zephyr/sys/math_extras.h>
11 #include <zephyr/sys/rb.h>
12 #include <zephyr/kernel_structs.h>
13 #include <zephyr/sys/sys_io.h>
14 #include <ksched.h>
15 #include <zephyr/syscall.h>
16 #include <zephyr/internal/syscall_handler.h>
17 #include <zephyr/device.h>
18 #include <zephyr/init.h>
19 #include <stdbool.h>
20 #include <zephyr/app_memory/app_memdomain.h>
21 #include <zephyr/sys/libc-hooks.h>
22 #include <zephyr/sys/mutex.h>
23 #include <zephyr/sys/util.h>
24 #include <inttypes.h>
25 #include <zephyr/linker/linker-defs.h>
26 
27 #ifdef Z_LIBC_PARTITION_EXISTS
28 K_APPMEM_PARTITION_DEFINE(z_libc_partition);
29 #endif /* Z_LIBC_PARTITION_EXISTS */
30 
31 /* TODO: Find a better place to put this. Since we pull the entire
32  * lib..__modules__crypto__mbedtls.a  globals into app shared memory
33  * section, we can't put this in zephyr_init.c of the mbedtls module.
34  */
35 #ifdef CONFIG_MBEDTLS
36 K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
37 #endif /* CONFIG_MBEDTLS */
38 
39 #include <zephyr/logging/log.h>
40 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
41 
42 /* The originally synchronization strategy made heavy use of recursive
43  * irq_locking, which ports poorly to spinlocks which are
44  * non-recursive.  Rather than try to redesign as part of
45  * spinlockification, this uses multiple locks to preserve the
46  * original semantics exactly.  The locks are named for the data they
47  * protect where possible, or just for the code that uses them where
48  * not.
49  */
50 #ifdef CONFIG_DYNAMIC_OBJECTS
51 static struct k_spinlock lists_lock;       /* kobj dlist */
52 static struct k_spinlock objfree_lock;     /* k_object_free */
53 
54 #ifdef CONFIG_GEN_PRIV_STACKS
55 /* On ARM & ARC MPU & RISC-V PMP we may have two different alignment requirement
56  * when dynamically allocating thread stacks, one for the privileged
57  * stack and other for the user stack, so we need to account the
58  * worst alignment scenario and reserve space for that.
59  */
60 #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU) || defined(CONFIG_RISCV_PMP)
61 #define STACK_ELEMENT_DATA_SIZE(size) \
62 	(sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
63 	Z_THREAD_STACK_OBJ_ALIGN(size) + K_THREAD_STACK_LEN(size))
64 #else
65 #define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
66 	K_THREAD_STACK_LEN(size))
67 #endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU || CONFIG_RISCV_PMP */
68 #else
69 #define STACK_ELEMENT_DATA_SIZE(size) K_THREAD_STACK_LEN(size)
70 #endif /* CONFIG_GEN_PRIV_STACKS */
71 
72 #endif /* CONFIG_DYNAMIC_OBJECTS */
73 static struct k_spinlock obj_lock;         /* kobj struct data */
74 
75 #define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * BITS_PER_BYTE)
76 
77 #ifdef CONFIG_DYNAMIC_OBJECTS
78 extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
79 #endif /* CONFIG_DYNAMIC_OBJECTS */
80 
81 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
82 
otype_to_str(enum k_objects otype)83 const char *otype_to_str(enum k_objects otype)
84 {
85 	const char *ret;
86 	/* -fdata-sections doesn't work right except in very recent
87 	 * GCC and these literal strings would appear in the binary even if
88 	 * otype_to_str was omitted by the linker
89 	 */
90 #ifdef CONFIG_LOG
91 	switch (otype) {
92 	/* otype-to-str.h is generated automatically during build by
93 	 * gen_kobject_list.py
94 	 */
95 	case K_OBJ_ANY:
96 		ret = "generic";
97 		break;
98 #include <zephyr/otype-to-str.h>
99 	default:
100 		ret = "?";
101 		break;
102 	}
103 #else
104 	ARG_UNUSED(otype);
105 	ret = NULL;
106 #endif /* CONFIG_LOG */
107 	return ret;
108 }
109 
110 struct perm_ctx {
111 	int parent_id;
112 	int child_id;
113 	struct k_thread *parent;
114 };
115 
116 #ifdef CONFIG_GEN_PRIV_STACKS
117 /* See write_gperf_table() in scripts/build/gen_kobject_list.py. The privilege
118  * mode stacks are allocated as an array. The base of the array is
119  * aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
120  */
z_priv_stack_find(k_thread_stack_t * stack)121 uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
122 {
123 	struct k_object *obj = k_object_find(stack);
124 
125 	__ASSERT(obj != NULL, "stack object not found");
126 	__ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
127 		 "bad stack object");
128 
129 	return obj->data.stack_data->priv;
130 }
131 #endif /* CONFIG_GEN_PRIV_STACKS */
132 
133 #ifdef CONFIG_DYNAMIC_OBJECTS
134 
135 /*
136  * Note that dyn_obj->data is where the kernel object resides
137  * so it is the one that actually needs to be aligned.
138  * Due to the need to get the fields inside struct dyn_obj
139  * from kernel object pointers (i.e. from data[]), the offset
140  * from data[] needs to be fixed at build time. Therefore,
141  * data[] is declared with __aligned(), such that when dyn_obj
142  * is allocated with alignment, data[] is also aligned.
143  * Due to this requirement, data[] needs to be aligned with
144  * the maximum alignment needed for all kernel objects
145  * (hence the following DYN_OBJ_DATA_ALIGN).
146  */
147 #ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
148 #define DYN_OBJ_DATA_ALIGN_K_THREAD	(ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
149 #else
150 #define DYN_OBJ_DATA_ALIGN_K_THREAD	(sizeof(void *))
151 #endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
152 
153 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
154 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_PMP_STACK_GUARD)
155 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
156 	Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
157 #else
158 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
159 	Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
160 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_PMP_STACK_GUARD */
161 #else
162 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
163 	Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
164 #endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
165 
166 #define DYN_OBJ_DATA_ALIGN		\
167 	MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
168 
169 struct dyn_obj {
170 	struct k_object kobj;
171 	sys_dnode_t dobj_list;
172 
173 	/* The object itself */
174 	void *data;
175 };
176 
177 extern struct k_object *z_object_gperf_find(const void *obj);
178 extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
179 					     void *context);
180 
181 /*
182  * Linked list of allocated kernel objects, for iteration over all allocated
183  * objects (and potentially deleting them during iteration).
184  */
185 static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
186 
187 /*
188  * TODO: Write some hash table code that will replace obj_list.
189  */
190 
obj_size_get(enum k_objects otype)191 static size_t obj_size_get(enum k_objects otype)
192 {
193 	size_t ret;
194 
195 	switch (otype) {
196 #include <zephyr/otype-to-size.h>
197 	default:
198 		ret = sizeof(const struct device);
199 		break;
200 	}
201 
202 	return ret;
203 }
204 
obj_align_get(enum k_objects otype)205 static size_t obj_align_get(enum k_objects otype)
206 {
207 	size_t ret;
208 
209 	switch (otype) {
210 	case K_OBJ_THREAD:
211 #ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
212 		ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
213 #else
214 		ret = __alignof(struct dyn_obj);
215 #endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
216 		break;
217 	default:
218 		ret = __alignof(struct dyn_obj);
219 		break;
220 	}
221 
222 	return ret;
223 }
224 
dyn_object_find(const void * obj)225 static struct dyn_obj *dyn_object_find(const void *obj)
226 {
227 	struct dyn_obj *node;
228 	k_spinlock_key_t key;
229 
230 	/* For any dynamically allocated kernel object, the object
231 	 * pointer is just a member of the containing struct dyn_obj,
232 	 * so just a little arithmetic is necessary to locate the
233 	 * corresponding struct rbnode
234 	 */
235 	key = k_spin_lock(&lists_lock);
236 
237 	SYS_DLIST_FOR_EACH_CONTAINER(&obj_list, node, dobj_list) {
238 		if (node->kobj.name == obj) {
239 			goto end;
240 		}
241 	}
242 
243 	/* No object found */
244 	node = NULL;
245 
246  end:
247 	k_spin_unlock(&lists_lock, key);
248 
249 	return node;
250 }
251 
252 /**
253  * @internal
254  *
255  * @brief Allocate a new thread index for a new thread.
256  *
257  * This finds an unused thread index that can be assigned to a new
258  * thread. If too many threads have been allocated, the kernel will
259  * run out of indexes and this function will fail.
260  *
261  * Note that if an unused index is found, that index will be marked as
262  * used after return of this function.
263  *
264  * @param tidx The new thread index if successful
265  *
266  * @return true if successful, false if failed
267  **/
thread_idx_alloc(uintptr_t * tidx)268 static bool thread_idx_alloc(uintptr_t *tidx)
269 {
270 	int i;
271 	int idx;
272 	int base;
273 
274 	base = 0;
275 	for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
276 		idx = find_lsb_set(_thread_idx_map[i]);
277 
278 		if (idx != 0) {
279 			*tidx = base + (idx - 1);
280 
281 			/* Clear the bit. We already know the array index,
282 			 * and the bit to be cleared.
283 			 */
284 			_thread_idx_map[i] &= ~(BIT(idx - 1));
285 
286 			/* Clear permission from all objects */
287 			k_object_wordlist_foreach(clear_perms_cb,
288 						   (void *)*tidx);
289 
290 			return true;
291 		}
292 
293 		base += 8;
294 	}
295 
296 	return false;
297 }
298 
299 /**
300  * @internal
301  *
302  * @brief Free a thread index.
303  *
304  * This frees a thread index so it can be used by another
305  * thread.
306  *
307  * @param tidx The thread index to be freed
308  **/
thread_idx_free(uintptr_t tidx)309 static void thread_idx_free(uintptr_t tidx)
310 {
311 	/* To prevent leaked permission when index is recycled */
312 	k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
313 
314 	/* Figure out which bits to set in _thread_idx_map[] and set it. */
315 	int base = tidx / NUM_BITS(_thread_idx_map[0]);
316 	int offset = tidx % NUM_BITS(_thread_idx_map[0]);
317 
318 	_thread_idx_map[base] |= BIT(offset);
319 }
320 
dynamic_object_create(enum k_objects otype,size_t align,size_t size)321 static struct k_object *dynamic_object_create(enum k_objects otype, size_t align,
322 					      size_t size)
323 {
324 	struct dyn_obj *dyn;
325 
326 	dyn = z_thread_aligned_alloc(align, sizeof(struct dyn_obj));
327 	if (dyn == NULL) {
328 		return NULL;
329 	}
330 
331 	if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
332 		size_t adjusted_size;
333 
334 		if (size == 0) {
335 			k_free(dyn);
336 			return NULL;
337 		}
338 
339 		adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
340 		dyn->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
341 						     adjusted_size);
342 		if (dyn->data == NULL) {
343 			k_free(dyn);
344 			return NULL;
345 		}
346 
347 #ifdef CONFIG_GEN_PRIV_STACKS
348 		struct z_stack_data *stack_data = (struct z_stack_data *)
349 			((uint8_t *)dyn->data + adjusted_size - sizeof(*stack_data));
350 		stack_data->priv = (uint8_t *)dyn->data;
351 		stack_data->size = adjusted_size;
352 		dyn->kobj.data.stack_data = stack_data;
353 #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU) || defined(CONFIG_RISCV_PMP)
354 		dyn->kobj.name = (void *)ROUND_UP(
355 			  ((uint8_t *)dyn->data + CONFIG_PRIVILEGED_STACK_SIZE),
356 			  Z_THREAD_STACK_OBJ_ALIGN(size));
357 #else
358 		dyn->kobj.name = dyn->data;
359 #endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU || CONFIG_RISCV_PMP */
360 #else
361 		dyn->kobj.name = dyn->data;
362 		dyn->kobj.data.stack_size = adjusted_size;
363 #endif /* CONFIG_GEN_PRIV_STACKS */
364 	} else {
365 		dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
366 		if (dyn->data == NULL) {
367 			k_free(dyn->data);
368 			return NULL;
369 		}
370 		dyn->kobj.name = dyn->data;
371 	}
372 
373 	dyn->kobj.type = otype;
374 	dyn->kobj.flags = 0;
375 	(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
376 
377 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
378 
379 	sys_dlist_append(&obj_list, &dyn->dobj_list);
380 	k_spin_unlock(&lists_lock, key);
381 
382 	return &dyn->kobj;
383 }
384 
k_object_create_dynamic_aligned(size_t align,size_t size)385 struct k_object *k_object_create_dynamic_aligned(size_t align, size_t size)
386 {
387 	struct k_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
388 
389 	if (obj == NULL) {
390 		LOG_ERR("could not allocate kernel object, out of memory");
391 	}
392 
393 	return obj;
394 }
395 
z_object_alloc(enum k_objects otype,size_t size)396 static void *z_object_alloc(enum k_objects otype, size_t size)
397 {
398 	struct k_object *zo;
399 	uintptr_t tidx = 0;
400 
401 	if ((otype <= K_OBJ_ANY) || (otype >= K_OBJ_LAST)) {
402 		LOG_ERR("bad object type %d requested", otype);
403 		return NULL;
404 	}
405 
406 	switch (otype) {
407 	case K_OBJ_THREAD:
408 		if (!thread_idx_alloc(&tidx)) {
409 			LOG_ERR("out of free thread indexes");
410 			return NULL;
411 		}
412 		break;
413 	/* The following are currently not allowed at all */
414 	case K_OBJ_FUTEX:			/* Lives in user memory */
415 	case K_OBJ_SYS_MUTEX:			/* Lives in user memory */
416 	case K_OBJ_NET_SOCKET:			/* Indeterminate size */
417 		LOG_ERR("forbidden object type '%s' requested",
418 			otype_to_str(otype));
419 		return NULL;
420 	default:
421 		/* Remainder within bounds are permitted */
422 		break;
423 	}
424 
425 	zo = dynamic_object_create(otype, obj_align_get(otype), size);
426 	if (zo == NULL) {
427 		if (otype == K_OBJ_THREAD) {
428 			thread_idx_free(tidx);
429 		}
430 		return NULL;
431 	}
432 
433 	if (otype == K_OBJ_THREAD) {
434 		zo->data.thread_id = tidx;
435 	}
436 
437 	/* The allocating thread implicitly gets permission on kernel objects
438 	 * that it allocates
439 	 */
440 	k_thread_perms_set(zo, arch_current_thread());
441 
442 	/* Activates reference counting logic for automatic disposal when
443 	 * all permissions have been revoked
444 	 */
445 	zo->flags |= K_OBJ_FLAG_ALLOC;
446 
447 	return zo->name;
448 }
449 
z_impl_k_object_alloc(enum k_objects otype)450 void *z_impl_k_object_alloc(enum k_objects otype)
451 {
452 	return z_object_alloc(otype, 0);
453 }
454 
z_impl_k_object_alloc_size(enum k_objects otype,size_t size)455 void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
456 {
457 	return z_object_alloc(otype, size);
458 }
459 
k_object_free(void * obj)460 void k_object_free(void *obj)
461 {
462 	struct dyn_obj *dyn;
463 
464 	/* This function is intentionally not exposed to user mode.
465 	 * There's currently no robust way to track that an object isn't
466 	 * being used by some other thread
467 	 */
468 
469 	k_spinlock_key_t key = k_spin_lock(&objfree_lock);
470 
471 	dyn = dyn_object_find(obj);
472 	if (dyn != NULL) {
473 		sys_dlist_remove(&dyn->dobj_list);
474 
475 		if (dyn->kobj.type == K_OBJ_THREAD) {
476 			thread_idx_free(dyn->kobj.data.thread_id);
477 		}
478 	}
479 	k_spin_unlock(&objfree_lock, key);
480 
481 	if (dyn != NULL) {
482 		k_free(dyn->data);
483 		k_free(dyn);
484 	}
485 }
486 
k_object_find(const void * obj)487 struct k_object *k_object_find(const void *obj)
488 {
489 	struct k_object *ret;
490 
491 	ret = z_object_gperf_find(obj);
492 
493 	if (ret == NULL) {
494 		struct dyn_obj *dyn;
495 
496 		/* The cast to pointer-to-non-const violates MISRA
497 		 * 11.8 but is justified since we know dynamic objects
498 		 * were not declared with a const qualifier.
499 		 */
500 		dyn = dyn_object_find(obj);
501 		if (dyn != NULL) {
502 			ret = &dyn->kobj;
503 		}
504 	}
505 
506 	return ret;
507 }
508 
k_object_wordlist_foreach(_wordlist_cb_func_t func,void * context)509 void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
510 {
511 	struct dyn_obj *obj, *next;
512 
513 	z_object_gperf_wordlist_foreach(func, context);
514 
515 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
516 
517 	SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
518 		func(&obj->kobj, context);
519 	}
520 	k_spin_unlock(&lists_lock, key);
521 }
522 #endif /* CONFIG_DYNAMIC_OBJECTS */
523 
thread_index_get(struct k_thread * thread)524 static unsigned int thread_index_get(struct k_thread *thread)
525 {
526 	struct k_object *ko;
527 
528 	ko = k_object_find(thread);
529 
530 	if (ko == NULL) {
531 		return -1;
532 	}
533 
534 	return ko->data.thread_id;
535 }
536 
unref_check(struct k_object * ko,uintptr_t index)537 static void unref_check(struct k_object *ko, uintptr_t index)
538 {
539 	k_spinlock_key_t key = k_spin_lock(&obj_lock);
540 
541 	sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
542 
543 #ifdef CONFIG_DYNAMIC_OBJECTS
544 	if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
545 		/* skip unref check for static kernel object */
546 		goto out;
547 	}
548 
549 	void *vko = ko;
550 
551 	struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
552 
553 	__ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
554 
555 	for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
556 		if (ko->perms[i] != 0U) {
557 			goto out;
558 		}
559 	}
560 
561 	/* This object has no more references. Some objects may have
562 	 * dynamically allocated resources, require cleanup, or need to be
563 	 * marked as uninitialized when all references are gone. What
564 	 * specifically needs to happen depends on the object type.
565 	 */
566 	switch (ko->type) {
567 #ifdef CONFIG_PIPES
568 	case K_OBJ_PIPE:
569 		k_pipe_cleanup((struct k_pipe *)ko->name);
570 		break;
571 #endif /* CONFIG_PIPES */
572 	case K_OBJ_MSGQ:
573 		k_msgq_cleanup((struct k_msgq *)ko->name);
574 		break;
575 	case K_OBJ_STACK:
576 		k_stack_cleanup((struct k_stack *)ko->name);
577 		break;
578 	default:
579 		/* Nothing to do */
580 		break;
581 	}
582 
583 	sys_dlist_remove(&dyn->dobj_list);
584 	k_free(dyn->data);
585 	k_free(dyn);
586 out:
587 #endif /* CONFIG_DYNAMIC_OBJECTS */
588 	k_spin_unlock(&obj_lock, key);
589 }
590 
wordlist_cb(struct k_object * ko,void * ctx_ptr)591 static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
592 {
593 	struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
594 
595 	if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
596 				  ((struct k_thread *)ko->name != ctx->parent)) {
597 		sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
598 	}
599 }
600 
k_thread_perms_inherit(struct k_thread * parent,struct k_thread * child)601 void k_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
602 {
603 	struct perm_ctx ctx = {
604 		thread_index_get(parent),
605 		thread_index_get(child),
606 		parent
607 	};
608 
609 	if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
610 		k_object_wordlist_foreach(wordlist_cb, &ctx);
611 	}
612 }
613 
k_thread_perms_set(struct k_object * ko,struct k_thread * thread)614 void k_thread_perms_set(struct k_object *ko, struct k_thread *thread)
615 {
616 	int index = thread_index_get(thread);
617 
618 	if (index != -1) {
619 		sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
620 	}
621 }
622 
k_thread_perms_clear(struct k_object * ko,struct k_thread * thread)623 void k_thread_perms_clear(struct k_object *ko, struct k_thread *thread)
624 {
625 	int index = thread_index_get(thread);
626 
627 	if (index != -1) {
628 		sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
629 		unref_check(ko, index);
630 	}
631 }
632 
clear_perms_cb(struct k_object * ko,void * ctx_ptr)633 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr)
634 {
635 	uintptr_t id = (uintptr_t)ctx_ptr;
636 
637 	unref_check(ko, id);
638 }
639 
k_thread_perms_all_clear(struct k_thread * thread)640 void k_thread_perms_all_clear(struct k_thread *thread)
641 {
642 	uintptr_t index = thread_index_get(thread);
643 
644 	if ((int)index != -1) {
645 		k_object_wordlist_foreach(clear_perms_cb, (void *)index);
646 	}
647 }
648 
thread_perms_test(struct k_object * ko)649 static int thread_perms_test(struct k_object *ko)
650 {
651 	int index;
652 
653 	if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
654 		return 1;
655 	}
656 
657 	index = thread_index_get(arch_current_thread());
658 	if (index != -1) {
659 		return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
660 	}
661 	return 0;
662 }
663 
dump_permission_error(struct k_object * ko)664 static void dump_permission_error(struct k_object *ko)
665 {
666 	int index = thread_index_get(arch_current_thread());
667 	LOG_ERR("thread %p (%d) does not have permission on %s %p",
668 		arch_current_thread(), index,
669 		otype_to_str(ko->type), ko->name);
670 	LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
671 }
672 
k_object_dump_error(int retval,const void * obj,struct k_object * ko,enum k_objects otype)673 void k_object_dump_error(int retval, const void *obj, struct k_object *ko,
674 			enum k_objects otype)
675 {
676 	switch (retval) {
677 	case -EBADF:
678 		LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
679 		if (ko == NULL) {
680 			LOG_ERR("address is not a known kernel object");
681 		} else {
682 			LOG_ERR("address is actually a %s",
683 				otype_to_str(ko->type));
684 		}
685 		break;
686 	case -EPERM:
687 		dump_permission_error(ko);
688 		break;
689 	case -EINVAL:
690 		LOG_ERR("%p used before initialization", obj);
691 		break;
692 	case -EADDRINUSE:
693 		LOG_ERR("%p %s in use", obj, otype_to_str(otype));
694 		break;
695 	default:
696 		/* Not handled error */
697 		break;
698 	}
699 }
700 
z_impl_k_object_access_grant(const void * object,struct k_thread * thread)701 void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
702 {
703 	struct k_object *ko = k_object_find(object);
704 
705 	if (ko != NULL) {
706 		k_thread_perms_set(ko, thread);
707 	}
708 }
709 
k_object_access_revoke(const void * object,struct k_thread * thread)710 void k_object_access_revoke(const void *object, struct k_thread *thread)
711 {
712 	struct k_object *ko = k_object_find(object);
713 
714 	if (ko != NULL) {
715 		k_thread_perms_clear(ko, thread);
716 	}
717 }
718 
z_impl_k_object_release(const void * object)719 void z_impl_k_object_release(const void *object)
720 {
721 	k_object_access_revoke(object, arch_current_thread());
722 }
723 
k_object_access_all_grant(const void * object)724 void k_object_access_all_grant(const void *object)
725 {
726 	struct k_object *ko = k_object_find(object);
727 
728 	if (ko != NULL) {
729 		ko->flags |= K_OBJ_FLAG_PUBLIC;
730 	}
731 }
732 
k_object_validate(struct k_object * ko,enum k_objects otype,enum _obj_init_check init)733 int k_object_validate(struct k_object *ko, enum k_objects otype,
734 		       enum _obj_init_check init)
735 {
736 	if (unlikely((ko == NULL) ||
737 		((otype != K_OBJ_ANY) && (ko->type != otype)))) {
738 		return -EBADF;
739 	}
740 
741 	/* Manipulation of any kernel objects by a user thread requires that
742 	 * thread be granted access first, even for uninitialized objects
743 	 */
744 	if (unlikely(thread_perms_test(ko) == 0)) {
745 		return -EPERM;
746 	}
747 
748 	/* Initialization state checks. _OBJ_INIT_ANY, we don't care */
749 	if (likely(init == _OBJ_INIT_TRUE)) {
750 		/* Object MUST be initialized */
751 		if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
752 			return -EINVAL;
753 		}
754 	} else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
755 		/* Object MUST NOT be initialized */
756 		if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
757 			return -EADDRINUSE;
758 		}
759 	} else {
760 		/* _OBJ_INIT_ANY */
761 	}
762 
763 	return 0;
764 }
765 
k_object_init(const void * obj)766 void k_object_init(const void *obj)
767 {
768 	struct k_object *ko;
769 
770 	/* By the time we get here, if the caller was from userspace, all the
771 	 * necessary checks have been done in k_object_validate(), which takes
772 	 * place before the object is initialized.
773 	 *
774 	 * This function runs after the object has been initialized and
775 	 * finalizes it
776 	 */
777 
778 	ko = k_object_find(obj);
779 	if (ko == NULL) {
780 		/* Supervisor threads can ignore rules about kernel objects
781 		 * and may declare them on stacks, etc. Such objects will never
782 		 * be usable from userspace, but we shouldn't explode.
783 		 */
784 		return;
785 	}
786 
787 	/* Allows non-initialization system calls to be made on this object */
788 	ko->flags |= K_OBJ_FLAG_INITIALIZED;
789 }
790 
k_object_recycle(const void * obj)791 void k_object_recycle(const void *obj)
792 {
793 	struct k_object *ko = k_object_find(obj);
794 
795 	if (ko != NULL) {
796 		(void)memset(ko->perms, 0, sizeof(ko->perms));
797 		k_thread_perms_set(ko, arch_current_thread());
798 		ko->flags |= K_OBJ_FLAG_INITIALIZED;
799 	}
800 }
801 
k_object_uninit(const void * obj)802 void k_object_uninit(const void *obj)
803 {
804 	struct k_object *ko;
805 
806 	/* See comments in k_object_init() */
807 	ko = k_object_find(obj);
808 	if (ko == NULL) {
809 		return;
810 	}
811 
812 	ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
813 }
814 
815 /*
816  * Copy to/from helper functions used in syscall handlers
817  */
k_usermode_alloc_from_copy(const void * src,size_t size)818 void *k_usermode_alloc_from_copy(const void *src, size_t size)
819 {
820 	void *dst = NULL;
821 
822 	/* Does the caller in user mode have access to read this memory? */
823 	if (K_SYSCALL_MEMORY_READ(src, size)) {
824 		goto out_err;
825 	}
826 
827 	dst = z_thread_malloc(size);
828 	if (dst == NULL) {
829 		LOG_ERR("out of thread resource pool memory (%zu)", size);
830 		goto out_err;
831 	}
832 
833 	(void)memcpy(dst, src, size);
834 out_err:
835 	return dst;
836 }
837 
user_copy(void * dst,const void * src,size_t size,bool to_user)838 static int user_copy(void *dst, const void *src, size_t size, bool to_user)
839 {
840 	int ret = EFAULT;
841 
842 	/* Does the caller in user mode have access to this memory? */
843 	if (to_user ? K_SYSCALL_MEMORY_WRITE(dst, size) :
844 			K_SYSCALL_MEMORY_READ(src, size)) {
845 		goto out_err;
846 	}
847 
848 	(void)memcpy(dst, src, size);
849 	ret = 0;
850 out_err:
851 	return ret;
852 }
853 
k_usermode_from_copy(void * dst,const void * src,size_t size)854 int k_usermode_from_copy(void *dst, const void *src, size_t size)
855 {
856 	return user_copy(dst, src, size, false);
857 }
858 
k_usermode_to_copy(void * dst,const void * src,size_t size)859 int k_usermode_to_copy(void *dst, const void *src, size_t size)
860 {
861 	return user_copy(dst, src, size, true);
862 }
863 
k_usermode_string_alloc_copy(const char * src,size_t maxlen)864 char *k_usermode_string_alloc_copy(const char *src, size_t maxlen)
865 {
866 	size_t actual_len;
867 	int err;
868 	char *ret = NULL;
869 
870 	actual_len = k_usermode_string_nlen(src, maxlen, &err);
871 	if (err != 0) {
872 		goto out;
873 	}
874 	if (actual_len == maxlen) {
875 		/* Not NULL terminated */
876 		LOG_ERR("string too long %p (%zu)", src, actual_len);
877 		goto out;
878 	}
879 	if (size_add_overflow(actual_len, 1, &actual_len)) {
880 		LOG_ERR("overflow");
881 		goto out;
882 	}
883 
884 	ret = k_usermode_alloc_from_copy(src, actual_len);
885 
886 	/* Someone may have modified the source string during the above
887 	 * checks. Ensure what we actually copied is still terminated
888 	 * properly.
889 	 */
890 	if (ret != NULL) {
891 		ret[actual_len - 1U] = '\0';
892 	}
893 out:
894 	return ret;
895 }
896 
k_usermode_string_copy(char * dst,const char * src,size_t maxlen)897 int k_usermode_string_copy(char *dst, const char *src, size_t maxlen)
898 {
899 	size_t actual_len;
900 	int ret, err;
901 
902 	actual_len = k_usermode_string_nlen(src, maxlen, &err);
903 	if (err != 0) {
904 		ret = EFAULT;
905 		goto out;
906 	}
907 	if (actual_len == maxlen) {
908 		/* Not NULL terminated */
909 		LOG_ERR("string too long %p (%zu)", src, actual_len);
910 		ret = EINVAL;
911 		goto out;
912 	}
913 	if (size_add_overflow(actual_len, 1, &actual_len)) {
914 		LOG_ERR("overflow");
915 		ret = EINVAL;
916 		goto out;
917 	}
918 
919 	ret = k_usermode_from_copy(dst, src, actual_len);
920 
921 	/* See comment above in k_usermode_string_alloc_copy() */
922 	dst[actual_len - 1] = '\0';
923 out:
924 	return ret;
925 }
926 
927 /*
928  * Application memory region initialization
929  */
930 
931 extern char __app_shmem_regions_start[];
932 extern char __app_shmem_regions_end[];
933 
app_shmem_bss_zero(void)934 static int app_shmem_bss_zero(void)
935 {
936 	struct z_app_region *region, *end;
937 
938 
939 	end = (struct z_app_region *)&__app_shmem_regions_end[0];
940 	region = (struct z_app_region *)&__app_shmem_regions_start[0];
941 
942 	for ( ; region < end; region++) {
943 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
944 		/* When BSS sections are not present at boot, we need to wait for
945 		 * paging mechanism to be initialized before we can zero out BSS.
946 		 */
947 		extern bool z_sys_post_kernel;
948 		bool do_clear = z_sys_post_kernel;
949 
950 		/* During pre-kernel init, z_sys_post_kernel == false, but
951 		 * with pinned rodata region, so clear. Otherwise skip.
952 		 * In post-kernel init, z_sys_post_kernel == true,
953 		 * skip those in pinned rodata region as they have already
954 		 * been cleared and possibly already in use. Otherwise clear.
955 		 */
956 		if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
957 		    ((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
958 			do_clear = !do_clear;
959 		}
960 
961 		if (do_clear)
962 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
963 		{
964 			(void)memset(region->bss_start, 0, region->bss_size);
965 		}
966 	}
967 
968 	return 0;
969 }
970 
971 SYS_INIT_NAMED(app_shmem_bss_zero_pre, app_shmem_bss_zero,
972 	       PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
973 
974 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
975 /* When BSS sections are not present at boot, we need to wait for
976  * paging mechanism to be initialized before we can zero out BSS.
977  */
978 SYS_INIT_NAMED(app_shmem_bss_zero_post, app_shmem_bss_zero,
979 	       POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
980 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
981 
982 /*
983  * Default handlers if otherwise unimplemented
984  */
985 
handler_bad_syscall(uintptr_t bad_id,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)986 static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
987 				     uintptr_t arg3, uintptr_t arg4,
988 				     uintptr_t arg5, uintptr_t arg6,
989 				     void *ssf)
990 {
991 	ARG_UNUSED(arg2);
992 	ARG_UNUSED(arg3);
993 	ARG_UNUSED(arg4);
994 	ARG_UNUSED(arg5);
995 	ARG_UNUSED(arg6);
996 
997 	LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
998 	arch_syscall_oops(ssf);
999 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1000 }
1001 
handler_no_syscall(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)1002 static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
1003 				    uintptr_t arg3, uintptr_t arg4,
1004 				    uintptr_t arg5, uintptr_t arg6, void *ssf)
1005 {
1006 	ARG_UNUSED(arg1);
1007 	ARG_UNUSED(arg2);
1008 	ARG_UNUSED(arg3);
1009 	ARG_UNUSED(arg4);
1010 	ARG_UNUSED(arg5);
1011 	ARG_UNUSED(arg6);
1012 
1013 	LOG_ERR("Unimplemented system call");
1014 	arch_syscall_oops(ssf);
1015 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1016 }
1017 
1018 #include <zephyr/syscall_dispatch.c>
1019