1 /*
2  * Copyright (c) 2016 Wind River Systems, Inc.
3  * Copyright (c) 2016 Cadence Design Systems, Inc.
4  * Copyright (c) 2020 Intel Corporation
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /* this file is only meant to be included by kernel_structs.h */
9 
10 #ifndef ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_
11 #define ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_
12 
13 #ifndef _ASMLANGUAGE
14 #include <kernel_internal.h>
15 #include <string.h>
16 #include <zephyr/cache.h>
17 #include <zephyr/platform/hooks.h>
18 #include <zephyr/zsr.h>
19 
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23 
24 K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
25 			     CONFIG_ISR_STACK_SIZE);
26 
arch_kernel_init(void)27 static ALWAYS_INLINE void arch_kernel_init(void)
28 {
29 #ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
30 	soc_per_core_init_hook();
31 #endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
32 }
33 
34 void xtensa_switch(void *switch_to, void **switched_from);
35 
arch_switch(void * switch_to,void ** switched_from)36 static ALWAYS_INLINE void arch_switch(void *switch_to, void **switched_from)
37 {
38 	xtensa_switch(switch_to, switched_from);
39 }
40 
41 #ifdef CONFIG_KERNEL_COHERENCE
arch_cohere_stacks(struct k_thread * old_thread,void * old_switch_handle,struct k_thread * new_thread)42 static ALWAYS_INLINE void arch_cohere_stacks(struct k_thread *old_thread,
43 					     void *old_switch_handle,
44 					     struct k_thread *new_thread)
45 {
46 	int32_t curr_cpu = _current_cpu->id;
47 
48 	size_t ostack = old_thread->stack_info.start;
49 	size_t osz    = old_thread->stack_info.size;
50 	size_t osp    = (size_t) old_switch_handle;
51 
52 	size_t nstack = new_thread->stack_info.start;
53 	size_t nsz    = new_thread->stack_info.size;
54 	size_t nsp    = (size_t) new_thread->switch_handle;
55 
56 	int zero = 0;
57 
58 	__asm__ volatile("wsr %0, " ZSR_FLUSH_STR :: "r"(zero));
59 
60 	if (old_switch_handle != NULL) {
61 		int32_t a0save;
62 
63 		__asm__ volatile("mov %0, a0;"
64 				 "call0 xtensa_spill_reg_windows;"
65 				 "mov a0, %0"
66 				 : "=r"(a0save));
67 	}
68 
69 	/* The following option ensures that a living thread will never
70 	 * be executed in a different CPU so we can safely return without
71 	 * invalidate and/or flush threads cache.
72 	 */
73 	if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
74 		return;
75 	}
76 
77 	/* The "live" area (the region between the switch handle,
78 	 * which is the stack pointer, and the top of the stack
79 	 * memory) of the inbound stack needs to be invalidated if we
80 	 * last ran on another cpu: it may contain data that was
81 	 * modified there, and our cache may be stale.
82 	 *
83 	 * The corresponding "dead area" of the inbound stack can be
84 	 * ignored.  We may have cached data in that region, but by
85 	 * definition any unused stack memory will always be written
86 	 * before being read (well, unless the code has an
87 	 * uninitialized data error) so our stale cache will be
88 	 * automatically overwritten as needed.
89 	 */
90 	if (curr_cpu != new_thread->arch.last_cpu) {
91 		sys_cache_data_invd_range((void *)nsp, (nstack + nsz) - nsp);
92 	}
93 	old_thread->arch.last_cpu = curr_cpu;
94 
95 	/* Dummy threads appear at system initialization, but don't
96 	 * have stack_info data and will never be saved.  Ignore.
97 	 */
98 	if (old_thread->base.thread_state & _THREAD_DUMMY) {
99 		return;
100 	}
101 
102 	/* For the outbound thread, we obviousy want to flush any data
103 	 * in the live area (for the benefit of whichever CPU runs
104 	 * this thread next).  But we ALSO have to invalidate the dead
105 	 * region of the stack.  Those lines may have DIRTY data in
106 	 * our own cache, and we cannot be allowed to write them back
107 	 * later on top of the stack's legitimate owner!
108 	 *
109 	 * This work comes in two flavors.  In interrupts, the
110 	 * outgoing context has already been saved for us, so we can
111 	 * do the flush right here.  In direct context switches, we
112 	 * are still using the stack, so we do the invalidate of the
113 	 * bottom here, (and flush the line containing SP to handle
114 	 * the overlap).  The remaining flush of the live region
115 	 * happens in the assembly code once the context is pushed, up
116 	 * to the stack top stashed in a special register.
117 	 */
118 	if (old_switch_handle != NULL) {
119 		sys_cache_data_flush_range((void *)osp, (ostack + osz) - osp);
120 		sys_cache_data_invd_range((void *)ostack, osp - ostack);
121 	} else {
122 		/* When in a switch, our current stack is the outbound
123 		 * stack.  Flush the single line containing the stack
124 		 * bottom (which is live data) before invalidating
125 		 * everything below that.  Remember that the 16 bytes
126 		 * below our SP are the calling function's spill area
127 		 * and may be live too.
128 		 */
129 		__asm__ volatile("mov %0, a1" : "=r"(osp));
130 		osp -= 16;
131 		sys_cache_data_flush_range((void *)osp, 1);
132 		sys_cache_data_invd_range((void *)ostack, osp - ostack);
133 
134 		uint32_t end = ostack + osz;
135 
136 		__asm__ volatile("wsr %0, " ZSR_FLUSH_STR :: "r"(end));
137 	}
138 }
139 #endif
140 
arch_is_in_isr(void)141 static inline bool arch_is_in_isr(void)
142 {
143 	return arch_curr_cpu()->nested != 0U;
144 }
145 
146 #ifdef __cplusplus
147 }
148 #endif
149 
150 #endif /* _ASMLANGUAGE */
151 
152 #endif /* ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_ */
153