1 /*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Kernel initialization module
10 *
11 * This module contains routines that are used to initialize the kernel.
12 */
13
14 #include <zephyr.h>
15 #include <offsets_short.h>
16 #include <kernel.h>
17 #include <sys/printk.h>
18 #include <debug/stack.h>
19 #include <random/rand32.h>
20 #include <linker/sections.h>
21 #include <toolchain.h>
22 #include <kernel_structs.h>
23 #include <device.h>
24 #include <init.h>
25 #include <linker/linker-defs.h>
26 #include <ksched.h>
27 #include <string.h>
28 #include <sys/dlist.h>
29 #include <kernel_internal.h>
30 #include <drivers/entropy.h>
31 #include <logging/log_ctrl.h>
32 #include <tracing/tracing.h>
33 #include <stdbool.h>
34 #include <debug/gcov.h>
35 #include <kswap.h>
36 #include <timing/timing.h>
37 #include <logging/log.h>
38 LOG_MODULE_REGISTER(os, CONFIG_KERNEL_LOG_LEVEL);
39
40 /* the only struct z_kernel instance */
41 struct z_kernel _kernel;
42
43 /* init/main and idle threads */
44 K_THREAD_PINNED_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
45 struct k_thread z_main_thread;
46
47 #ifdef CONFIG_MULTITHREADING
48 __pinned_bss
49 struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
50
51 static K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_idle_stacks,
52 CONFIG_MP_NUM_CPUS,
53 CONFIG_IDLE_STACK_SIZE);
54 #endif /* CONFIG_MULTITHREADING */
55
56 /*
57 * storage space for the interrupt stack
58 *
59 * Note: This area is used as the system stack during kernel initialization,
60 * since the kernel hasn't yet set up its own stack areas. The dual purposing
61 * of this area is safe since interrupts are disabled until the kernel context
62 * switches to the init thread.
63 */
64 K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_interrupt_stacks,
65 CONFIG_MP_NUM_CPUS,
66 CONFIG_ISR_STACK_SIZE);
67
68 extern void idle(void *unused1, void *unused2, void *unused3);
69
70
71 /* LCOV_EXCL_START
72 *
73 * This code is called so early in the boot process that code coverage
74 * doesn't work properly. In addition, not all arches call this code,
75 * some like x86 do this with optimized assembly
76 */
77
78 /**
79 *
80 * @brief Clear BSS
81 *
82 * This routine clears the BSS region, so all bytes are 0.
83 *
84 * @return N/A
85 */
86 __boot_func
z_bss_zero(void)87 void z_bss_zero(void)
88 {
89 (void)memset(__bss_start, 0, __bss_end - __bss_start);
90 #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay)
91 (void)memset(&__ccm_bss_start, 0,
92 ((uint32_t) &__ccm_bss_end - (uint32_t) &__ccm_bss_start));
93 #endif
94 #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
95 (void)memset(&__dtcm_bss_start, 0,
96 ((uint32_t) &__dtcm_bss_end - (uint32_t) &__dtcm_bss_start));
97 #endif
98 #ifdef CONFIG_CODE_DATA_RELOCATION
99 extern void bss_zeroing_relocation(void);
100
101 bss_zeroing_relocation();
102 #endif /* CONFIG_CODE_DATA_RELOCATION */
103 #ifdef CONFIG_COVERAGE_GCOV
104 (void)memset(&__gcov_bss_start, 0,
105 ((uintptr_t) &__gcov_bss_end - (uintptr_t) &__gcov_bss_start));
106 #endif
107 }
108
109 #ifdef CONFIG_LINKER_USE_BOOT_SECTION
110 /**
111 * @brief Clear BSS within the bot region
112 *
113 * This routine clears the BSS within the boot region.
114 * This is separate from z_bss_zero() as boot region may
115 * contain symbols required for the boot process before
116 * paging is initialized.
117 */
118 __boot_func
z_bss_zero_boot(void)119 void z_bss_zero_boot(void)
120 {
121 (void)memset(&lnkr_boot_bss_start, 0,
122 (uintptr_t)&lnkr_boot_bss_end
123 - (uintptr_t)&lnkr_boot_bss_start);
124 }
125 #endif /* CONFIG_LINKER_USE_BOOT_SECTION */
126
127 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
128 /**
129 * @brief Clear BSS within the pinned region
130 *
131 * This routine clears the BSS within the pinned region.
132 * This is separate from z_bss_zero() as pinned region may
133 * contain symbols required for the boot process before
134 * paging is initialized.
135 */
136 #ifdef CONFIG_LINKER_USE_BOOT_SECTION
137 __boot_func
138 #else
139 __pinned_func
140 #endif
z_bss_zero_pinned(void)141 void z_bss_zero_pinned(void)
142 {
143 (void)memset(&lnkr_pinned_bss_start, 0,
144 (uintptr_t)&lnkr_pinned_bss_end
145 - (uintptr_t)&lnkr_pinned_bss_start);
146 }
147 #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
148
149 #ifdef CONFIG_STACK_CANARIES
150 extern volatile uintptr_t __stack_chk_guard;
151 #endif /* CONFIG_STACK_CANARIES */
152
153 /* LCOV_EXCL_STOP */
154
155 __pinned_bss
156 bool z_sys_post_kernel;
157
158 extern void boot_banner(void);
159
160 /**
161 *
162 * @brief Mainline for kernel's background thread
163 *
164 * This routine completes kernel initialization by invoking the remaining
165 * init functions, then invokes application's main() routine.
166 *
167 * @return N/A
168 */
169 __boot_func
bg_thread_main(void * unused1,void * unused2,void * unused3)170 static void bg_thread_main(void *unused1, void *unused2, void *unused3)
171 {
172 ARG_UNUSED(unused1);
173 ARG_UNUSED(unused2);
174 ARG_UNUSED(unused3);
175
176 #ifdef CONFIG_MMU
177 /* Invoked here such that backing store or eviction algorithms may
178 * initialize kernel objects, and that all POST_KERNEL and later tasks
179 * may perform memory management tasks (except for z_phys_map() which
180 * is allowed at any time)
181 */
182 z_mem_manage_init();
183 #endif /* CONFIG_MMU */
184 z_sys_post_kernel = true;
185
186 z_sys_init_run_level(_SYS_INIT_LEVEL_POST_KERNEL);
187 #if CONFIG_STACK_POINTER_RANDOM
188 z_stack_adjust_initialized = 1;
189 #endif
190 boot_banner();
191
192 #if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_ARCH_POSIX)
193 void z_cpp_init_static(void);
194 z_cpp_init_static();
195 #endif
196
197 /* Final init level before app starts */
198 z_sys_init_run_level(_SYS_INIT_LEVEL_APPLICATION);
199
200 z_init_static_threads();
201
202 #ifdef CONFIG_KERNEL_COHERENCE
203 __ASSERT_NO_MSG(arch_mem_coherent(&_kernel));
204 #endif
205
206 #ifdef CONFIG_SMP
207 z_smp_init();
208 z_sys_init_run_level(_SYS_INIT_LEVEL_SMP);
209 #endif
210
211 #ifdef CONFIG_MMU
212 z_mem_manage_boot_finish();
213 #endif /* CONFIG_MMU */
214
215 extern void main(void);
216
217 main();
218
219 /* Mark nonessenrial since main() has no more work to do */
220 z_main_thread.base.user_options &= ~K_ESSENTIAL;
221
222 #ifdef CONFIG_COVERAGE_DUMP
223 /* Dump coverage data once the main() has exited. */
224 gcov_coverage_dump();
225 #endif
226 } /* LCOV_EXCL_LINE ... because we just dumped final coverage data */
227
228 #if defined(CONFIG_MULTITHREADING)
229 __boot_func
init_idle_thread(int i)230 static void init_idle_thread(int i)
231 {
232 struct k_thread *thread = &z_idle_threads[i];
233 k_thread_stack_t *stack = z_idle_stacks[i];
234
235 #ifdef CONFIG_THREAD_NAME
236 char tname[8];
237
238 snprintk(tname, 8, "idle %02d", i);
239 #else
240 char *tname = NULL;
241 #endif /* CONFIG_THREAD_NAME */
242
243 z_setup_new_thread(thread, stack,
244 CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i],
245 NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
246 tname);
247 z_mark_thread_as_started(thread);
248
249 #ifdef CONFIG_SMP
250 thread->base.is_idle = 1U;
251 #endif
252 }
253
z_reinit_idle_thread(int i)254 void z_reinit_idle_thread(int i)
255 {
256 init_idle_thread(i);
257 }
258
259 /**
260 *
261 * @brief Initializes kernel data structures
262 *
263 * This routine initializes various kernel data structures, including
264 * the init and idle threads and any architecture-specific initialization.
265 *
266 * Note that all fields of "_kernel" are set to zero on entry, which may
267 * be all the initialization many of them require.
268 *
269 * @return initial stack pointer for the main thread
270 */
271 __boot_func
prepare_multithreading(void)272 static char *prepare_multithreading(void)
273 {
274 char *stack_ptr;
275
276 /* _kernel.ready_q is all zeroes */
277 z_sched_init();
278
279 #ifndef CONFIG_SMP
280 /*
281 * prime the cache with the main thread since:
282 *
283 * - the cache can never be NULL
284 * - the main thread will be the one to run first
285 * - no other thread is initialized yet and thus their priority fields
286 * contain garbage, which would prevent the cache loading algorithm
287 * to work as intended
288 */
289 _kernel.ready_q.cache = &z_main_thread;
290 #endif
291 stack_ptr = z_setup_new_thread(&z_main_thread, z_main_stack,
292 CONFIG_MAIN_STACK_SIZE, bg_thread_main,
293 NULL, NULL, NULL,
294 CONFIG_MAIN_THREAD_PRIORITY,
295 K_ESSENTIAL, "main");
296 z_mark_thread_as_started(&z_main_thread);
297 z_ready_thread(&z_main_thread);
298
299 for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
300 init_idle_thread(i);
301 _kernel.cpus[i].idle_thread = &z_idle_threads[i];
302 _kernel.cpus[i].id = i;
303 _kernel.cpus[i].irq_stack =
304 (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]) +
305 K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]));
306 }
307
308 return stack_ptr;
309 }
310
311 __boot_func
switch_to_main_thread(char * stack_ptr)312 static FUNC_NORETURN void switch_to_main_thread(char *stack_ptr)
313 {
314 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
315 arch_switch_to_main_thread(&z_main_thread, stack_ptr, bg_thread_main);
316 #else
317 ARG_UNUSED(stack_ptr);
318 /*
319 * Context switch to main task (entry function is _main()): the
320 * current fake thread is not on a wait queue or ready queue, so it
321 * will never be rescheduled in.
322 */
323 z_swap_unlocked();
324 #endif
325 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
326 }
327 #endif /* CONFIG_MULTITHREADING */
328
329 #if defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR)
330 __boot_func
z_early_boot_rand_get(uint8_t * buf,size_t length)331 void z_early_boot_rand_get(uint8_t *buf, size_t length)
332 {
333 int n = sizeof(uint32_t);
334 #ifdef CONFIG_ENTROPY_HAS_DRIVER
335 const struct device *entropy = device_get_binding(DT_CHOSEN_ZEPHYR_ENTROPY_LABEL);
336 int rc;
337
338 if (entropy == NULL) {
339 goto sys_rand_fallback;
340 }
341
342 /* Try to see if driver provides an ISR-specific API */
343 rc = entropy_get_entropy_isr(entropy, buf, length, ENTROPY_BUSYWAIT);
344 if (rc == -ENOTSUP) {
345 /* Driver does not provide an ISR-specific API, assume it can
346 * be called from ISR context
347 */
348 rc = entropy_get_entropy(entropy, buf, length);
349 }
350
351 if (rc >= 0) {
352 return;
353 }
354
355 /* Fall through to fallback */
356
357 sys_rand_fallback:
358 #endif
359
360 /* FIXME: this assumes sys_rand32_get() won't use any synchronization
361 * primitive, like semaphores or mutexes. It's too early in the boot
362 * process to use any of them. Ideally, only the path where entropy
363 * devices are available should be built, this is only a fallback for
364 * those devices without a HWRNG entropy driver.
365 */
366
367 while (length > 0U) {
368 uint32_t rndbits;
369 uint8_t *p_rndbits = (uint8_t *)&rndbits;
370
371 rndbits = sys_rand32_get();
372
373 if (length < sizeof(uint32_t)) {
374 n = length;
375 }
376
377 for (int i = 0; i < n; i++) {
378 *buf = *p_rndbits;
379 buf++;
380 p_rndbits++;
381 }
382
383 length -= n;
384 }
385 }
386 /* defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR) */
387 #endif
388
389 /**
390 *
391 * @brief Initialize kernel
392 *
393 * This routine is invoked when the system is ready to run C code. The
394 * processor must be running in 32-bit mode, and the BSS must have been
395 * cleared/zeroed.
396 *
397 * @return Does not return
398 */
399 __boot_func
z_cstart(void)400 FUNC_NORETURN void z_cstart(void)
401 {
402 /* gcov hook needed to get the coverage report.*/
403 gcov_static_init();
404
405 LOG_CORE_INIT();
406
407 /* perform any architecture-specific initialization */
408 arch_kernel_init();
409
410 #if defined(CONFIG_MULTITHREADING)
411 /* Note: The z_ready_thread() call in prepare_multithreading() requires
412 * a dummy thread even if CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN=y
413 */
414 struct k_thread dummy_thread;
415
416 z_dummy_thread_init(&dummy_thread);
417 #endif
418 /* do any necessary initialization of static devices */
419 z_device_state_init();
420
421 /* perform basic hardware initialization */
422 z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_1);
423 z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_2);
424
425 #ifdef CONFIG_STACK_CANARIES
426 uintptr_t stack_guard;
427
428 z_early_boot_rand_get((uint8_t *)&stack_guard, sizeof(stack_guard));
429 __stack_chk_guard = stack_guard;
430 __stack_chk_guard <<= 8;
431 #endif /* CONFIG_STACK_CANARIES */
432
433 #ifdef CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT
434 timing_init();
435 timing_start();
436 #endif
437
438 #ifdef CONFIG_MULTITHREADING
439 switch_to_main_thread(prepare_multithreading());
440 #else
441 #ifdef ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING
442 /* Custom ARCH-specific routine to switch to main()
443 * in the case of no multi-threading.
444 */
445 ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING(bg_thread_main,
446 NULL, NULL, NULL);
447 #else
448 bg_thread_main(NULL, NULL, NULL);
449
450 /* LCOV_EXCL_START
451 * We've already dumped coverage data at this point.
452 */
453 irq_lock();
454 while (true) {
455 }
456 /* LCOV_EXCL_STOP */
457 #endif
458 #endif /* CONFIG_MULTITHREADING */
459
460 /*
461 * Compiler can't tell that the above routines won't return and issues
462 * a warning unless we explicitly tell it that control never gets this
463 * far.
464 */
465
466 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
467 }
468