1 /*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Private kernel definitions
10 *
11 * This file contains private kernel function/macro definitions and various
12 * other definitions for the RISCV processor architecture.
13 */
14
15 #ifndef ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_FUNC_H_
16 #define ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_FUNC_H_
17
18 #include <kernel_arch_data.h>
19 #include <pmp.h>
20
21 #include <zephyr/platform/hooks.h>
22
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26
27 #ifndef _ASMLANGUAGE
28
arch_kernel_init(void)29 static ALWAYS_INLINE void arch_kernel_init(void)
30 {
31 #ifdef CONFIG_THREAD_LOCAL_STORAGE
32 __asm__ volatile ("li tp, 0");
33 #endif
34 #if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
35 csr_write(mscratch, &_kernel.cpus[0]);
36 #endif
37 #ifdef CONFIG_SMP
38 _kernel.cpus[0].arch.hartid = csr_read(mhartid);
39 _kernel.cpus[0].arch.online = true;
40 #endif
41 #if ((CONFIG_MP_MAX_NUM_CPUS) > 1)
42 unsigned int cpu_node_list[] = {
43 DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,))
44 };
45 unsigned int cpu_num, hart_x;
46
47 for (cpu_num = 1, hart_x = 0; cpu_num < arch_num_cpus(); cpu_num++) {
48 if (cpu_node_list[hart_x] == _kernel.cpus[0].arch.hartid) {
49 hart_x++;
50 }
51 _kernel.cpus[cpu_num].arch.hartid = cpu_node_list[hart_x];
52 hart_x++;
53 }
54 #endif
55 #ifdef CONFIG_RISCV_PMP
56 z_riscv_pmp_init();
57 #endif
58 #ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
59 soc_per_core_init_hook();
60 #endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
61 }
62
63 static ALWAYS_INLINE void
arch_switch(void * switch_to,void ** switched_from)64 arch_switch(void *switch_to, void **switched_from)
65 {
66 extern void z_riscv_switch(struct k_thread *new, struct k_thread *old);
67 struct k_thread *new = switch_to;
68 struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
69 switch_handle);
70 #ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL
71 arch_syscall_invoke2((uintptr_t)new, (uintptr_t)old, RV_ECALL_SCHEDULE);
72 #else
73 z_riscv_switch(new, old);
74 #endif
75 }
76
77 /* Thin wrapper around z_riscv_fatal_error_csf */
78 FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
79 const struct arch_esf *esf);
80
81 FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf,
82 const _callee_saved_t *csf);
83
arch_is_in_isr(void)84 static inline bool arch_is_in_isr(void)
85 {
86 #ifdef CONFIG_SMP
87 unsigned int key = arch_irq_lock();
88 bool ret = arch_curr_cpu()->nested != 0U;
89
90 arch_irq_unlock(key);
91 return ret;
92 #else
93 return _kernel.cpus[0].nested != 0U;
94 #endif
95 }
96
97 extern FUNC_NORETURN void z_riscv_userspace_enter(k_thread_entry_t user_entry,
98 void *p1, void *p2, void *p3,
99 uint32_t stack_end,
100 uint32_t stack_start);
101
102 #ifdef CONFIG_IRQ_OFFLOAD
103 int z_irq_do_offload(void);
104 #endif
105
106 #ifdef CONFIG_FPU_SHARING
107 void arch_flush_local_fpu(void);
108 void arch_flush_fpu_ipi(unsigned int cpu);
109 #endif
110
111 #ifndef CONFIG_MULTITHREADING
112 extern FUNC_NORETURN void z_riscv_switch_to_main_no_multithreading(
113 k_thread_entry_t main_func, void *p1, void *p2, void *p3);
114
115 #define ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING \
116 z_riscv_switch_to_main_no_multithreading
117
118 #endif /* !CONFIG_MULTITHREADING */
119
120 #endif /* _ASMLANGUAGE */
121
122 #ifdef __cplusplus
123 }
124 #endif
125
126 #endif /* ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_FUNC_H_ */
127