1 /*
2  * Copyright (c) 2017, Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <string.h>
7 #include <xtensa_asm2_context.h>
8 #include <zephyr/kernel.h>
9 #include <ksched.h>
10 #include <zephyr/kernel_structs.h>
11 #include <kernel_internal.h>
12 #include <kswap.h>
13 #include <zephyr/toolchain.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/offsets.h>
16 #include <zephyr/zsr.h>
17 #include <zephyr/arch/common/exc_handle.h>
18 
19 #ifdef CONFIG_XTENSA_GEN_HANDLERS
20 #include <xtensa_handlers.h>
21 #else
22 #include <_soc_inthandlers.h>
23 #endif
24 
25 #include <kernel_internal.h>
26 #include <xtensa_internal.h>
27 #include <xtensa_stack.h>
28 
29 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
30 
31 extern char xtensa_arch_except_epc[];
32 extern char xtensa_arch_kernel_oops_epc[];
33 
xtensa_is_outside_stack_bounds(uintptr_t addr,size_t sz,uint32_t ps)34 bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps)
35 {
36 	uintptr_t start, end;
37 	struct k_thread *thread = arch_current_thread();
38 	bool was_in_isr, invalid;
39 
40 	/* Without userspace, there is no privileged stack so the thread stack
41 	 * is the whole stack (minus reserved area). So there is no need to
42 	 * check for PS == UINT32_MAX for special treatment.
43 	 */
44 	ARG_UNUSED(ps);
45 
46 	/* Since both level 1 interrupts and exceptions go through
47 	 * the same interrupt vector, both of them increase the nested
48 	 * counter in the CPU struct. The architecture vector handler
49 	 * moves execution to the interrupt stack when nested goes from
50 	 * zero to one. Afterwards, any nested interrupts/exceptions will
51 	 * continue running in interrupt stack. Therefore, only when
52 	 * nested > 1, then it was running in the interrupt stack, and
53 	 * we should check bounds against the interrupt stack.
54 	 */
55 	was_in_isr = arch_curr_cpu()->nested > 1;
56 
57 	if ((thread == NULL) || was_in_isr) {
58 		/* We were servicing an interrupt or in early boot environment
59 		 * and are supposed to be on the interrupt stack.
60 		 */
61 		int cpu_id;
62 
63 #ifdef CONFIG_SMP
64 		cpu_id = arch_curr_cpu()->id;
65 #else
66 		cpu_id = 0;
67 #endif
68 
69 		start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]);
70 		end = start + CONFIG_ISR_STACK_SIZE;
71 #ifdef CONFIG_USERSPACE
72 	} else if (ps == UINT32_MAX) {
73 		/* Since the stashed PS is inside struct pointed by frame->ptr_to_bsa,
74 		 * we need to verify that both frame and frame->ptr_to_bsa are valid
75 		 * pointer within the thread stack. Also without PS, we have no idea
76 		 * whether we were in kernel mode (using privileged stack) or user
77 		 * mode (normal thread stack). So we need to check the whole stack
78 		 * area.
79 		 *
80 		 * And... we cannot account for reserved area since we have no idea
81 		 * which to use: ARCH_KERNEL_STACK_RESERVED or ARCH_THREAD_STACK_RESERVED
82 		 * as we don't know whether we were in kernel or user mode.
83 		 */
84 		start = (uintptr_t)thread->stack_obj;
85 		end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size);
86 	} else if (((ps & PS_RING_MASK) == 0U) &&
87 		   ((thread->base.user_options & K_USER) == K_USER)) {
88 		/* Check if this is a user thread, and that it was running in
89 		 * kernel mode. If so, we must have been doing a syscall, so
90 		 * check with privileged stack bounds.
91 		 */
92 		start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
93 		end = thread->stack_info.start;
94 #endif
95 	} else {
96 		start = thread->stack_info.start;
97 		end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size);
98 	}
99 
100 	invalid = (addr <= start) || ((addr + sz) >= end);
101 
102 	return invalid;
103 }
104 
xtensa_is_frame_pointer_valid(_xtensa_irq_stack_frame_raw_t * frame)105 bool xtensa_is_frame_pointer_valid(_xtensa_irq_stack_frame_raw_t *frame)
106 {
107 	_xtensa_irq_bsa_t *bsa;
108 
109 	/* Check if the pointer to the frame is within stack bounds. If not, there is no
110 	 * need to test if the BSA (base save area) pointer is also valid as it is
111 	 * possibly invalid.
112 	 */
113 	if (xtensa_is_outside_stack_bounds((uintptr_t)frame, sizeof(*frame), UINT32_MAX)) {
114 		return false;
115 	}
116 
117 	/* Need to test if the BSA area is also within stack bounds. The information
118 	 * contained within the BSA is only valid if within stack bounds.
119 	 */
120 	bsa = frame->ptr_to_bsa;
121 	if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) {
122 		return false;
123 	}
124 
125 #ifdef CONFIG_USERSPACE
126 	/* With usespace, we have privileged stack and normal thread stack within
127 	 * one stack object. So we need to further test whether the frame pointer
128 	 * resides in the correct stack based on kernel/user mode.
129 	 */
130 	if (xtensa_is_outside_stack_bounds((uintptr_t)frame, sizeof(*frame), bsa->ps)) {
131 		return false;
132 	}
133 #endif
134 
135 	return true;
136 }
137 
xtensa_dump_stack(const void * stack)138 void xtensa_dump_stack(const void *stack)
139 {
140 	_xtensa_irq_stack_frame_raw_t *frame = (void *)stack;
141 	_xtensa_irq_bsa_t *bsa;
142 	uintptr_t num_high_regs;
143 	int reg_blks_remaining;
144 
145 	/* Don't dump stack if the stack pointer is invalid as any frame elements
146 	 * obtained via de-referencing the frame pointer are probably also invalid.
147 	 * Or worse, cause another access violation.
148 	 */
149 	if (!xtensa_is_frame_pointer_valid(frame)) {
150 		return;
151 	}
152 
153 	bsa = frame->ptr_to_bsa;
154 
155 	/* Calculate number of high registers. */
156 	num_high_regs = (uint8_t *)bsa - (uint8_t *)frame + sizeof(void *);
157 	num_high_regs /= sizeof(uintptr_t);
158 
159 	/* And high registers are always comes in 4 in a block. */
160 	reg_blks_remaining = (int)num_high_regs / 4;
161 
162 	LOG_ERR(" **  A0 %p  SP %p  A2 %p  A3 %p",
163 		(void *)bsa->a0,
164 		(void *)((char *)bsa + sizeof(*bsa)),
165 		(void *)bsa->a2, (void *)bsa->a3);
166 
167 	if (reg_blks_remaining > 0) {
168 		reg_blks_remaining--;
169 
170 		LOG_ERR(" **  A4 %p  A5 %p  A6 %p  A7 %p",
171 			(void *)frame->blks[reg_blks_remaining].r0,
172 			(void *)frame->blks[reg_blks_remaining].r1,
173 			(void *)frame->blks[reg_blks_remaining].r2,
174 			(void *)frame->blks[reg_blks_remaining].r3);
175 	}
176 
177 	if (reg_blks_remaining > 0) {
178 		reg_blks_remaining--;
179 
180 		LOG_ERR(" **  A8 %p  A9 %p A10 %p A11 %p",
181 			(void *)frame->blks[reg_blks_remaining].r0,
182 			(void *)frame->blks[reg_blks_remaining].r1,
183 			(void *)frame->blks[reg_blks_remaining].r2,
184 			(void *)frame->blks[reg_blks_remaining].r3);
185 	}
186 
187 	if (reg_blks_remaining > 0) {
188 		reg_blks_remaining--;
189 
190 		LOG_ERR(" ** A12 %p A13 %p A14 %p A15 %p",
191 			(void *)frame->blks[reg_blks_remaining].r0,
192 			(void *)frame->blks[reg_blks_remaining].r1,
193 			(void *)frame->blks[reg_blks_remaining].r2,
194 			(void *)frame->blks[reg_blks_remaining].r3);
195 	}
196 
197 #if XCHAL_HAVE_LOOPS
198 	LOG_ERR(" ** LBEG %p LEND %p LCOUNT %p",
199 		(void *)bsa->lbeg,
200 		(void *)bsa->lend,
201 		(void *)bsa->lcount);
202 #endif
203 
204 	LOG_ERR(" ** SAR %p", (void *)bsa->sar);
205 
206 #if XCHAL_HAVE_THREADPTR
207 	LOG_ERR(" **  THREADPTR %p", (void *)bsa->threadptr);
208 #endif
209 }
210 
get_bits(int offset,int num_bits,unsigned int val)211 static inline unsigned int get_bits(int offset, int num_bits, unsigned int val)
212 {
213 	int mask;
214 
215 	mask = BIT(num_bits) - 1;
216 	val = val >> offset;
217 	return val & mask;
218 }
219 
print_fatal_exception(void * print_stack,int cause,bool is_dblexc,uint32_t depc)220 static void print_fatal_exception(void *print_stack, int cause,
221 				  bool is_dblexc, uint32_t depc)
222 {
223 	void *pc;
224 	uint32_t ps, vaddr;
225 	_xtensa_irq_bsa_t *bsa = (void *)*(int **)print_stack;
226 
227 	__asm__ volatile("rsr.excvaddr %0" : "=r"(vaddr));
228 
229 	if (is_dblexc) {
230 		LOG_ERR(" ** FATAL EXCEPTION (DOUBLE)");
231 	} else {
232 		LOG_ERR(" ** FATAL EXCEPTION");
233 	}
234 
235 	LOG_ERR(" ** CPU %d EXCCAUSE %d (%s)",
236 		arch_curr_cpu()->id, cause,
237 		xtensa_exccause(cause));
238 
239 	/* Don't print information if the BSA area is invalid as any elements
240 	 * obtained via de-referencing the pointer are probably also invalid.
241 	 * Or worse, cause another access violation.
242 	 */
243 	if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) {
244 		LOG_ERR(" ** VADDR %p Invalid SP %p", (void *)vaddr, print_stack);
245 		return;
246 	}
247 
248 	ps = bsa->ps;
249 	pc = (void *)bsa->pc;
250 
251 	LOG_ERR(" **  PC %p VADDR %p", pc, (void *)vaddr);
252 
253 	if (is_dblexc) {
254 		LOG_ERR(" **  DEPC %p", (void *)depc);
255 	}
256 
257 	LOG_ERR(" **  PS %p", (void *)bsa->ps);
258 	LOG_ERR(" **    (INTLEVEL:%d EXCM: %d UM:%d RING:%d WOE:%d OWB:%d CALLINC:%d)",
259 		get_bits(0, 4, ps), get_bits(4, 1, ps),
260 		get_bits(5, 1, ps), get_bits(6, 2, ps),
261 		get_bits(18, 1, ps),
262 		get_bits(8, 4, ps), get_bits(16, 2, ps));
263 }
264 
usage_stop(void)265 static ALWAYS_INLINE void usage_stop(void)
266 {
267 #ifdef CONFIG_SCHED_THREAD_USAGE
268 	z_sched_usage_stop();
269 #endif
270 }
271 
return_to(void * interrupted)272 static inline void *return_to(void *interrupted)
273 {
274 #ifdef CONFIG_MULTITHREADING
275 	return _current_cpu->nested <= 1 ?
276 		z_get_next_switch_handle(interrupted) : interrupted;
277 #else
278 	return interrupted;
279 #endif /* CONFIG_MULTITHREADING */
280 }
281 
282 /* The wrapper code lives here instead of in the python script that
283  * generates _xtensa_handle_one_int*().  Seems cleaner, still kind of
284  * ugly.
285  *
286  * This may be unused depending on number of interrupt levels
287  * supported by the SoC.
288  */
289 #define DEF_INT_C_HANDLER(l)				\
290 __unused void *xtensa_int##l##_c(void *interrupted_stack)	\
291 {							   \
292 	uint32_t irqs, intenable, m;			   \
293 	usage_stop();					   \
294 	__asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
295 	__asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
296 	irqs &= intenable;					\
297 	while ((m = _xtensa_handle_one_int##l(irqs))) {		\
298 		irqs ^= m;					\
299 		__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
300 	}							\
301 	return return_to(interrupted_stack);		\
302 }
303 
304 #if XCHAL_HAVE_NMI
305 #define MAX_INTR_LEVEL XCHAL_NMILEVEL
306 #elif XCHAL_HAVE_INTERRUPTS
307 #define MAX_INTR_LEVEL XCHAL_NUM_INTLEVELS
308 #else
309 #error Xtensa core with no interrupt support is used
310 #define MAX_INTR_LEVEL 0
311 #endif
312 
313 #if MAX_INTR_LEVEL >= 2
314 DEF_INT_C_HANDLER(2)
315 #endif
316 
317 #if MAX_INTR_LEVEL >= 3
318 DEF_INT_C_HANDLER(3)
319 #endif
320 
321 #if MAX_INTR_LEVEL >= 4
322 DEF_INT_C_HANDLER(4)
323 #endif
324 
325 #if MAX_INTR_LEVEL >= 5
326 DEF_INT_C_HANDLER(5)
327 #endif
328 
329 #if MAX_INTR_LEVEL >= 6
330 DEF_INT_C_HANDLER(6)
331 #endif
332 
333 #if MAX_INTR_LEVEL >= 7
334 DEF_INT_C_HANDLER(7)
335 #endif
336 
337 static inline DEF_INT_C_HANDLER(1)
338 
339 /* C handler for level 1 exceptions/interrupts.  Hooked from the
340  * DEF_EXCINT 1 vector declaration in assembly code.  This one looks
341  * different because exceptions and interrupts land at the same
342  * vector; other interrupt levels have their own vectors.
343  */
xtensa_excint1_c(void * esf)344 void *xtensa_excint1_c(void *esf)
345 {
346 	int cause, reason;
347 	int *interrupted_stack = &((struct arch_esf *)esf)->dummy;
348 	_xtensa_irq_bsa_t *bsa = (void *)*(int **)interrupted_stack;
349 	bool is_fatal_error = false;
350 	bool is_dblexc = false;
351 	uint32_t ps;
352 	void *pc, *print_stack = (void *)interrupted_stack;
353 	uint32_t depc = 0;
354 
355 #ifdef CONFIG_XTENSA_MMU
356 	depc = XTENSA_RSR(ZSR_DEPC_SAVE_STR);
357 	cause = XTENSA_RSR(ZSR_EXCCAUSE_SAVE_STR);
358 
359 	is_dblexc = (depc != 0U);
360 #else /* CONFIG_XTENSA_MMU */
361 	__asm__ volatile("rsr.exccause %0" : "=r"(cause));
362 #endif /* CONFIG_XTENSA_MMU */
363 
364 	switch (cause) {
365 	case EXCCAUSE_LEVEL1_INTERRUPT:
366 #ifdef CONFIG_XTENSA_MMU
367 		if (!is_dblexc) {
368 			return xtensa_int1_c(interrupted_stack);
369 		}
370 #else
371 		return xtensa_int1_c(interrupted_stack);
372 #endif /* CONFIG_XTENSA_MMU */
373 		break;
374 #ifndef CONFIG_USERSPACE
375 	/* Syscalls are handled earlier in assembly if MMU is enabled.
376 	 * So we don't need this here.
377 	 */
378 	case EXCCAUSE_SYSCALL:
379 		/* Just report it to the console for now */
380 		LOG_ERR(" ** SYSCALL PS %p PC %p",
381 			(void *)bsa->ps, (void *)bsa->pc);
382 		xtensa_dump_stack(interrupted_stack);
383 
384 		/* Xtensa exceptions don't automatically advance PC,
385 		 * have to skip the SYSCALL instruction manually or
386 		 * else it will just loop forever
387 		 */
388 		bsa->pc += 3;
389 		break;
390 #endif /* !CONFIG_USERSPACE */
391 	default:
392 		reason = K_ERR_CPU_EXCEPTION;
393 
394 		/* If the BSA area is invalid, we cannot trust anything coming out of it. */
395 		if (xtensa_is_outside_stack_bounds((uintptr_t)bsa, sizeof(*bsa), UINT32_MAX)) {
396 			goto skip_checks;
397 		}
398 
399 		ps = bsa->ps;
400 		pc = (void *)bsa->pc;
401 
402 		/* Default for exception */
403 		is_fatal_error = true;
404 
405 		/* We need to distinguish between an ill in xtensa_arch_except,
406 		 * e.g for k_panic, and any other ill. For exceptions caused by
407 		 * xtensa_arch_except calls, we also need to pass the reason_p
408 		 * to xtensa_fatal_error. Since the ARCH_EXCEPT frame is in the
409 		 * BSA, the first arg reason_p is stored at the A2 offset.
410 		 * We assign EXCCAUSE the unused, reserved code 63; this may be
411 		 * problematic if the app or new boards also decide to repurpose
412 		 * this code.
413 		 *
414 		 * Another intentionally ill is from xtensa_arch_kernel_oops.
415 		 * Kernel OOPS has to be explicity raised so we can simply
416 		 * set the reason and continue.
417 		 */
418 		if (cause == EXCCAUSE_ILLEGAL) {
419 			if (pc == (void *)&xtensa_arch_except_epc) {
420 				cause = 63;
421 				__asm__ volatile("wsr.exccause %0" : : "r"(cause));
422 				reason = bsa->a2;
423 			} else if (pc == (void *)&xtensa_arch_kernel_oops_epc) {
424 				cause = 64; /* kernel oops */
425 				reason = K_ERR_KERNEL_OOPS;
426 
427 				/* A3 contains the second argument to
428 				 * xtensa_arch_kernel_oops(reason, ssf)
429 				 * where ssf is the stack frame causing
430 				 * the kernel oops.
431 				 */
432 				print_stack = (void *)bsa->a3;
433 			}
434 		}
435 
436 skip_checks:
437 		if (reason != K_ERR_KERNEL_OOPS) {
438 			print_fatal_exception(print_stack, cause, is_dblexc, depc);
439 		}
440 
441 		/* FIXME: legacy xtensa port reported "HW" exception
442 		 * for all unhandled exceptions, which seems incorrect
443 		 * as these are software errors.  Should clean this
444 		 * up.
445 		 */
446 		xtensa_fatal_error(reason, (void *)print_stack);
447 		break;
448 	}
449 
450 #ifdef CONFIG_XTENSA_MMU
451 	switch (cause) {
452 	case EXCCAUSE_LEVEL1_INTERRUPT:
453 #ifndef CONFIG_USERSPACE
454 	case EXCCAUSE_SYSCALL:
455 #endif /* !CONFIG_USERSPACE */
456 		is_fatal_error = false;
457 		break;
458 	default:
459 		is_fatal_error = true;
460 		break;
461 	}
462 #endif /* CONFIG_XTENSA_MMU */
463 
464 	if (is_dblexc || is_fatal_error) {
465 		uint32_t ignore;
466 
467 		/* We are going to manipulate _current_cpu->nested manually.
468 		 * Since the error is fatal, for recoverable errors, code
469 		 * execution must not return back to the current thread as
470 		 * it is being terminated (via above xtensa_fatal_error()).
471 		 * So we need to prevent more interrupts coming in which
472 		 * will affect the nested value as we are going outside of
473 		 * normal interrupt handling procedure.
474 		 *
475 		 * Setting nested to 1 has two effects:
476 		 * 1. Force return_to() to choose a new thread.
477 		 *    Since the current thread is being terminated, it will
478 		 *    not be chosen again.
479 		 * 2. When context switches to the newly chosen thread,
480 		 *    nested must be zero for normal code execution,
481 		 *    as that is not in interrupt context at all.
482 		 *    After returning from this function, the rest of
483 		 *    interrupt handling code will decrement nested,
484 		 *    resulting it being zero before switching to another
485 		 *    thread.
486 		 */
487 		__asm__ volatile("rsil %0, %1"
488 				: "=r" (ignore) : "i"(XCHAL_EXCM_LEVEL));
489 
490 		_current_cpu->nested = 1;
491 	}
492 
493 #if defined(CONFIG_XTENSA_MMU)
494 	if (is_dblexc) {
495 		XTENSA_WSR(ZSR_DEPC_SAVE_STR, 0);
496 	}
497 #endif /* CONFIG_XTENSA_MMU */
498 
499 	return return_to(interrupted_stack);
500 }
501 
502 #if defined(CONFIG_GDBSTUB)
xtensa_debugint_c(int * interrupted_stack)503 void *xtensa_debugint_c(int *interrupted_stack)
504 {
505 	extern void z_gdb_isr(struct arch_esf *esf);
506 
507 	z_gdb_isr((void *)interrupted_stack);
508 
509 	return return_to(interrupted_stack);
510 }
511 #endif
512