1 /*
2 * Copyright (c) 2016 Cadence Design Systems, Inc.
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6 /**
7 * @file
8 * @brief Xtensa specific kernel interface header
9 * This header contains the Xtensa specific kernel interface. It is included
10 * by the generic kernel interface header (include/zephyr/arch/cpu.h)
11 */
12
13 #ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
14 #define ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
15
16 #include <zephyr/irq.h>
17
18 #include <zephyr/devicetree.h>
19 #if !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__)
20 #include <zephyr/types.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/arch/common/sys_bitops.h>
23 #include <zephyr/arch/common/sys_io.h>
24 #include <zephyr/arch/common/ffs.h>
25 #include <zephyr/sw_isr_table.h>
26 #include <zephyr/arch/xtensa/syscall.h>
27 #include <zephyr/arch/xtensa/thread.h>
28 #include <zephyr/arch/xtensa/irq.h>
29 #include <xtensa/config/core.h>
30 #include <zephyr/arch/common/addr_types.h>
31 #include <zephyr/arch/xtensa/gdbstub.h>
32 #include <zephyr/debug/sparse.h>
33 #include <zephyr/arch/xtensa/thread_stack.h>
34 #include <zephyr/sys/slist.h>
35
36 #include <zephyr/drivers/timer/system_timer.h>
37
38 #ifdef CONFIG_XTENSA_MMU
39 #include <zephyr/arch/xtensa/xtensa_mmu.h>
40 #endif
41
42 #ifdef CONFIG_XTENSA_MPU
43 #include <zephyr/arch/xtensa/mpu.h>
44 #endif
45
46 /**
47 * @defgroup xtensa_apis Xtensa APIs
48 * @ingroup arch-interface
49 * @{
50 * @}
51 *
52 * @defgroup xtensa_internal_apis Xtensa Internal APIs
53 * @ingroup xtensa_apis
54 * @{
55 * @}
56 */
57
58 #include <zephyr/arch/xtensa/exception.h>
59
60 #ifdef __cplusplus
61 extern "C" {
62 #endif
63
64 struct arch_mem_domain {
65 #ifdef CONFIG_XTENSA_MMU
66 uint32_t *ptables;
67 uint8_t asid;
68 bool dirty;
69 #endif
70 #ifdef CONFIG_XTENSA_MPU
71 struct xtensa_mpu_map mpu_map;
72 #endif
73 sys_snode_t node;
74 };
75
76 /**
77 * @brief Generate hardware exception.
78 *
79 * This generates hardware exception which is used by ARCH_EXCEPT().
80 *
81 * @param reason_p Reason for exception.
82 */
83 void xtensa_arch_except(int reason_p);
84
85 /**
86 * @brief Generate kernel oops.
87 *
88 * This generates kernel oops which is used by arch_syscall_oops().
89 *
90 * @param reason_p Reason for exception.
91 * @param ssf Stack pointer.
92 */
93 void xtensa_arch_kernel_oops(int reason_p, void *ssf);
94
95 #ifdef CONFIG_USERSPACE
96
97 #define ARCH_EXCEPT(reason_p) do { \
98 if (k_is_user_context()) { \
99 arch_syscall_invoke1(reason_p, \
100 K_SYSCALL_XTENSA_USER_FAULT); \
101 } else { \
102 xtensa_arch_except(reason_p); \
103 } \
104 CODE_UNREACHABLE; \
105 } while (false)
106
107 #else
108
109 #define ARCH_EXCEPT(reason_p) do { \
110 xtensa_arch_except(reason_p); \
111 CODE_UNREACHABLE; \
112 } while (false)
113
114 #endif
115
116 __syscall void xtensa_user_fault(unsigned int reason);
117
118 #include <zephyr/syscalls/arch.h>
119
120 /* internal routine documented in C file, needed by IRQ_CONNECT() macro */
121 void z_irq_priority_set(uint32_t irq, uint32_t prio, uint32_t flags);
122
123 #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
124 { \
125 Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
126 }
127
128 /** Implementation of @ref arch_k_cycle_get_32. */
arch_k_cycle_get_32(void)129 static inline uint32_t arch_k_cycle_get_32(void)
130 {
131 return sys_clock_cycle_get_32();
132 }
133
134 /** Implementation of @ref arch_k_cycle_get_64. */
arch_k_cycle_get_64(void)135 static inline uint64_t arch_k_cycle_get_64(void)
136 {
137 return sys_clock_cycle_get_64();
138 }
139
140 /** Implementation of @ref arch_nop. */
arch_nop(void)141 static ALWAYS_INLINE void arch_nop(void)
142 {
143 __asm__ volatile("nop");
144 }
145
146 /**
147 * @brief Lock VECBASE if supported by hardware.
148 *
149 * The bit 0 of VECBASE acts as a lock bit on hardware supporting
150 * this feature. When this bit is set, VECBASE cannot be changed
151 * until it is cleared by hardware reset. When the hardware does not
152 * support this bit, it is hardwired to 0.
153 */
xtensa_vecbase_lock(void)154 static ALWAYS_INLINE void xtensa_vecbase_lock(void)
155 {
156 int vecbase;
157
158 __asm__ volatile("rsr.vecbase %0" : "=r" (vecbase));
159 __asm__ volatile("wsr.vecbase %0; rsync" : : "r" (vecbase | 1));
160 }
161
162 #if defined(CONFIG_XTENSA_RPO_CACHE) || defined(__DOXYGEN__)
163 #if defined(CONFIG_ARCH_HAS_COHERENCE) || defined(__DOXYGEN__)
164 /** Implementation of @ref arch_mem_coherent. */
arch_mem_coherent(void * ptr)165 static inline bool arch_mem_coherent(void *ptr)
166 {
167 size_t addr = (size_t) ptr;
168
169 return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
170 }
171 #endif
172
173
174 /* Utility to generate an unrolled and optimal[1] code sequence to set
175 * the RPO TLB registers (contra the HAL cacheattr macros, which
176 * generate larger code and can't be called from C), based on the
177 * KERNEL_COHERENCE configuration in use. Selects RPO attribute "2"
178 * for regions (including MMIO registers in region zero) which want to
179 * bypass L1, "4" for the cached region which wants writeback, and
180 * "15" (invalid) elsewhere.
181 *
182 * Note that on cores that have the "translation" option set, we need
183 * to put an identity mapping in the high bits. Also per spec
184 * changing the current code region (by definition cached) requires
185 * that WITLB be followed by an ISYNC and that both instructions live
186 * in the same cache line (two 3-byte instructions fit in an 8-byte
187 * aligned region, so that's guaranteed not to cross a cache line
188 * boundary).
189 *
190 * [1] With the sole exception of gcc's infuriating insistence on
191 * emitting a precomputed literal for addr + addrincr instead of
192 * computing it with a single ADD instruction from values it already
193 * has in registers. Explicitly assigning the variables to registers
194 * via an attribute works, but then emits needless MOV instructions
195 * instead. I tell myself it's just 32 bytes of .text, but... Sigh.
196 */
197 #define _REGION_ATTR(r) \
198 ((r) == 0 ? 2 : \
199 ((r) == CONFIG_XTENSA_CACHED_REGION ? 4 : \
200 ((r) == CONFIG_XTENSA_UNCACHED_REGION ? 2 : 15)))
201
202 #define _SET_ONE_TLB(region) do { \
203 uint32_t attr = _REGION_ATTR(region); \
204 if (XCHAL_HAVE_XLT_CACHEATTR) { \
205 attr |= addr; /* RPO with translation */ \
206 } \
207 if (region != CONFIG_XTENSA_CACHED_REGION) { \
208 __asm__ volatile("wdtlb %0, %1; witlb %0, %1" \
209 :: "r"(attr), "r"(addr)); \
210 } else { \
211 __asm__ volatile("wdtlb %0, %1" \
212 :: "r"(attr), "r"(addr)); \
213 __asm__ volatile("j 1f; .align 8; 1:"); \
214 __asm__ volatile("witlb %0, %1; isync" \
215 :: "r"(attr), "r"(addr)); \
216 } \
217 addr += addrincr; \
218 } while (0)
219
220 /**
221 * @brief Setup RPO TLB registers.
222 */
223 #define ARCH_XTENSA_SET_RPO_TLB() \
224 do { \
225 register uint32_t addr = 0, addrincr = 0x20000000; \
226 FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
227 } while (0)
228 #endif /* CONFIG_XTENSA_RPO_CACHE */
229
230 #if defined(CONFIG_XTENSA_MMU) || defined(__DOXYGEN__)
231 /**
232 * @brief Perform additional steps after MMU initialization.
233 *
234 * This performs additional steps related to memory management
235 * after the main MMU initialization code. This needs to defined
236 * in the SoC layer. Default is do no nothing.
237 *
238 * @param is_core0 True if this is called while executing on
239 * CPU core #0.
240 */
241 void arch_xtensa_mmu_post_init(bool is_core0);
242 #endif
243
244 #ifdef __cplusplus
245 }
246 #endif
247
248 #endif /* !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__) */
249
250 #endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_ */
251