1 /*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 * Copyright (c) 2016 Cadence Design Systems, Inc.
4 * Copyright (c) 2020 Intel Corporation
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /* this file is only meant to be included by kernel_structs.h */
9
10 #ifndef ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_
11 #define ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_
12
13 #ifndef _ASMLANGUAGE
14 #include <kernel_internal.h>
15 #include <string.h>
16 #include <zephyr/cache.h>
17 #include <zsr.h>
18
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22
23 extern void z_xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
24
25 K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
26 CONFIG_ISR_STACK_SIZE);
27
z_xtensa_kernel_init(void)28 static ALWAYS_INLINE void z_xtensa_kernel_init(void)
29 {
30 _cpu_t *cpu0 = &_kernel.cpus[0];
31
32 #ifdef CONFIG_KERNEL_COHERENCE
33 /* Make sure we don't have live data for unexpected cached
34 * regions due to boot firmware
35 */
36 sys_cache_data_flush_and_invd_all();
37
38 /* Our cache top stash location might have junk in it from a
39 * pre-boot environment. Must be zero or valid!
40 */
41 XTENSA_WSR(ZSR_FLUSH_STR, 0);
42 #endif
43
44 cpu0->nested = 0;
45
46 /* The asm2 scheme keeps the kernel pointer in a scratch SR
47 * (see zsr.h for generation specifics) for easy access. That
48 * saves 4 bytes of immediate value to store the address when
49 * compared to the legacy scheme. But in SMP this record is a
50 * per-CPU thing and having it stored in a SR already is a big
51 * win.
52 */
53 XTENSA_WSR(ZSR_CPU_STR, cpu0);
54 }
55
arch_kernel_init(void)56 static ALWAYS_INLINE void arch_kernel_init(void)
57 {
58 #ifndef CONFIG_XTENSA_MMU
59 /* This is called in z_xtensa_mmu_init() before z_cstart()
60 * so we do not need to call it again.
61 */
62 z_xtensa_kernel_init();
63 #endif
64
65 #ifdef CONFIG_INIT_STACKS
66 memset(Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]), 0xAA,
67 K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
68 #endif
69 }
70
71 void xtensa_switch(void *switch_to, void **switched_from);
72
arch_switch(void * switch_to,void ** switched_from)73 static inline void arch_switch(void *switch_to, void **switched_from)
74 {
75 return xtensa_switch(switch_to, switched_from);
76 }
77
78 #ifdef CONFIG_KERNEL_COHERENCE
arch_cohere_stacks(struct k_thread * old_thread,void * old_switch_handle,struct k_thread * new_thread)79 static ALWAYS_INLINE void arch_cohere_stacks(struct k_thread *old_thread,
80 void *old_switch_handle,
81 struct k_thread *new_thread)
82 {
83 int32_t curr_cpu = _current_cpu->id;
84
85 size_t ostack = old_thread->stack_info.start;
86 size_t osz = old_thread->stack_info.size;
87 size_t osp = (size_t) old_switch_handle;
88
89 size_t nstack = new_thread->stack_info.start;
90 size_t nsz = new_thread->stack_info.size;
91 size_t nsp = (size_t) new_thread->switch_handle;
92
93 int zero = 0;
94
95 __asm__ volatile("wsr %0, " ZSR_FLUSH_STR :: "r"(zero));
96
97 if (old_switch_handle != NULL) {
98 int32_t a0save;
99
100 __asm__ volatile("mov %0, a0;"
101 "call0 xtensa_spill_reg_windows;"
102 "mov a0, %0"
103 : "=r"(a0save));
104 }
105
106 /* The following option ensures that a living thread will never
107 * be executed in a different CPU so we can safely return without
108 * invalidate and/or flush threads cache.
109 */
110 if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
111 return;
112 }
113
114 /* The "live" area (the region between the switch handle,
115 * which is the stack pointer, and the top of the stack
116 * memory) of the inbound stack needs to be invalidated if we
117 * last ran on another cpu: it may contain data that was
118 * modified there, and our cache may be stale.
119 *
120 * The corresponding "dead area" of the inbound stack can be
121 * ignored. We may have cached data in that region, but by
122 * definition any unused stack memory will always be written
123 * before being read (well, unless the code has an
124 * uninitialized data error) so our stale cache will be
125 * automatically overwritten as needed.
126 */
127 if (curr_cpu != new_thread->arch.last_cpu) {
128 sys_cache_data_invd_range((void *)nsp, (nstack + nsz) - nsp);
129 }
130 old_thread->arch.last_cpu = curr_cpu;
131
132 /* Dummy threads appear at system initialization, but don't
133 * have stack_info data and will never be saved. Ignore.
134 */
135 if (old_thread->base.thread_state & _THREAD_DUMMY) {
136 return;
137 }
138
139 /* For the outbound thread, we obviousy want to flush any data
140 * in the live area (for the benefit of whichever CPU runs
141 * this thread next). But we ALSO have to invalidate the dead
142 * region of the stack. Those lines may have DIRTY data in
143 * our own cache, and we cannot be allowed to write them back
144 * later on top of the stack's legitimate owner!
145 *
146 * This work comes in two flavors. In interrupts, the
147 * outgoing context has already been saved for us, so we can
148 * do the flush right here. In direct context switches, we
149 * are still using the stack, so we do the invalidate of the
150 * bottom here, (and flush the line containing SP to handle
151 * the overlap). The remaining flush of the live region
152 * happens in the assembly code once the context is pushed, up
153 * to the stack top stashed in a special register.
154 */
155 if (old_switch_handle != NULL) {
156 sys_cache_data_flush_range((void *)osp, (ostack + osz) - osp);
157 sys_cache_data_invd_range((void *)ostack, osp - ostack);
158 } else {
159 /* When in a switch, our current stack is the outbound
160 * stack. Flush the single line containing the stack
161 * bottom (which is live data) before invalidating
162 * everything below that. Remember that the 16 bytes
163 * below our SP are the calling function's spill area
164 * and may be live too.
165 */
166 __asm__ volatile("mov %0, a1" : "=r"(osp));
167 osp -= 16;
168 sys_cache_data_flush_range((void *)osp, 1);
169 sys_cache_data_invd_range((void *)ostack, osp - ostack);
170
171 uint32_t end = ostack + osz;
172
173 __asm__ volatile("wsr %0, " ZSR_FLUSH_STR :: "r"(end));
174 }
175 }
176 #endif
177
arch_is_in_isr(void)178 static inline bool arch_is_in_isr(void)
179 {
180 return arch_curr_cpu()->nested != 0U;
181 }
182
183 #ifdef __cplusplus
184 }
185 #endif
186
187 #endif /* _ASMLANGUAGE */
188
189 #endif /* ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_ */
190