1 /*
2  * Copyright (c) 2019 Nordic Semiconductor ASA.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <zephyr/arch/cpu.h>
9 #include <zephyr/arch/arm/aarch32/cortex_m/cmsis.h>
10 #include <zephyr/sys/barrier.h>
11 
12 static volatile int test_flag;
13 static volatile int expected_reason = -1;
14 
15 /* Used to validate ESF collection during a fault */
16 static volatile int run_esf_validation;
17 static volatile int esf_validation_rv;
18 static volatile uint32_t expected_msp;
19 static K_THREAD_STACK_DEFINE(esf_collection_stack, 2048);
20 static struct k_thread esf_collection_thread;
21 #define MAIN_PRIORITY 7
22 #define PRIORITY 5
23 
24 /**
25  * Validates that pEsf matches state from set_regs_with_known_pattern()
26  */
check_esf_matches_expectations(const z_arch_esf_t * pEsf)27 static int check_esf_matches_expectations(const z_arch_esf_t *pEsf)
28 {
29 	const uint16_t expected_fault_instruction = 0xde5a; /* udf #90 */
30 	const bool caller_regs_match_expected =
31 		(pEsf->basic.r0 == 0) &&
32 		(pEsf->basic.r1 == 1) &&
33 		(pEsf->basic.r2 == 2) &&
34 		(pEsf->basic.r3 == 3) &&
35 		(pEsf->basic.lr == 15) &&
36 		(*(uint16_t *)pEsf->basic.pc == expected_fault_instruction);
37 	if (!caller_regs_match_expected) {
38 		printk("__basic_sf member of ESF is incorrect\n");
39 		return -1;
40 	}
41 
42 #if defined(CONFIG_EXTRA_EXCEPTION_INFO)
43 	const struct _callee_saved *callee_regs = pEsf->extra_info.callee;
44 	const bool callee_regs_match_expected =
45 		(callee_regs->v1 /* r4 */ == 4) &&
46 		(callee_regs->v2 /* r5 */ == 5) &&
47 		(callee_regs->v3 /* r6 */ == 6) &&
48 		(callee_regs->v4 /* r7 */ == 7) &&
49 		(callee_regs->v5 /* r8 */ == 8) &&
50 		(callee_regs->v6 /* r9 */ == 9) &&
51 		(callee_regs->v7 /* r10 */ == 10) &&
52 		(callee_regs->v8 /* r11 */ == 11);
53 	if (!callee_regs_match_expected) {
54 		printk("_callee_saved_t member of ESF is incorrect\n");
55 		return -1;
56 	}
57 
58 	/* we expect the EXC_RETURN value to have:
59 	 *  - PREFIX: bits [31:24] = 0xFF
60 	 *  - Mode, bit [3] = 1 since exception occurred from thread mode
61 	 *  - SPSEL, bit [2] = 1 since frame should reside on PSP
62 	 */
63 	const uint32_t exc_bits_set_mask = 0xff00000C;
64 
65 	if ((pEsf->extra_info.exc_return & exc_bits_set_mask) !=
66 		exc_bits_set_mask) {
67 		printk("Incorrect EXC_RETURN of 0x%08x",
68 			pEsf->extra_info.exc_return);
69 		return -1;
70 	}
71 
72 	/* the psp should match the contents of the esf copy up
73 	 * to the xpsr. (the xpsr value in the copy used for pEsf
74 	 * is overwritten in fault.c)
75 	 */
76 	if (memcmp((void *)callee_regs->psp, pEsf,
77 		offsetof(struct __esf, basic.xpsr)) != 0) {
78 		printk("psp does not match __basic_sf provided\n");
79 		return -1;
80 	}
81 
82 	if (pEsf->extra_info.msp != expected_msp) {
83 		printk("MSP is 0x%08x but should be 0x%08x",
84 			pEsf->extra_info.msp, expected_msp);
85 		return -1;
86 	}
87 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
88 	return 0;
89 }
90 
k_sys_fatal_error_handler(unsigned int reason,const z_arch_esf_t * pEsf)91 void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
92 {
93 	TC_PRINT("Caught system error -- reason %d\n", reason);
94 
95 	if (expected_reason == -1) {
96 		printk("Was not expecting a crash\n");
97 		k_fatal_halt(reason);
98 	}
99 
100 	if (reason != expected_reason) {
101 		printk("Wrong crash type got %d expected %d\n", reason,
102 			expected_reason);
103 		k_fatal_halt(reason);
104 	}
105 
106 	if (run_esf_validation) {
107 		if (check_esf_matches_expectations(pEsf) == 0) {
108 			esf_validation_rv = TC_PASS;
109 		}
110 		run_esf_validation = 0;
111 	}
112 
113 	expected_reason = -1;
114 }
115 
116 /**
117  * Set ARM registers with a known pattern:
118  *  r0-r12 are set to 0...12, respectively
119  *  r13 (sp) is left untouched
120  *  r14 (pc) will point to the faulting instruction (udf #90)
121  *  r15 (lr) is set to 15 (since a fault takes place, we never use the value)
122  *
123  * Note: Routine was written to be ARMV6M compatible
124  *
125  * In k_sys_fatal_error_handler above we will check that the ESF provided
126  * as a parameter matches these expectations.
127  */
set_regs_with_known_pattern(void)128 void set_regs_with_known_pattern(void)
129 {
130 	__asm__ volatile(
131 		"mov r1, #1\n"
132 		"mov r2, #2\n"
133 		"mov r3, #3\n"
134 		"mov r4, #4\n"
135 		"mov r5, #5\n"
136 		"mov r6, #6\n"
137 		"mov r7, #7\n"
138 		"mov r0, #8\n"
139 		"mov r8, r0\n"
140 		"add r0, r0, #1\n"
141 		"mov r9, r0\n"
142 		"add r0, r0, #1\n"
143 		"mov r10, r0\n"
144 		"add r0, r0, #1\n"
145 		"mov r11, r0\n"
146 		"add r0, r0, #1\n"
147 		"mov r12, r0\n"
148 		"add r0, r0, #3\n"
149 		"mov lr, r0\n"
150 		"mov r0, #0\n"
151 		"udf #90\n"
152 	);
153 }
154 
ZTEST(arm_interrupt,test_arm_esf_collection)155 ZTEST(arm_interrupt, test_arm_esf_collection)
156 {
157 	int test_validation_rv;
158 
159 	/* if the check in the fault handler succeeds,
160 	 * this will be set to TC_PASS
161 	 */
162 	esf_validation_rv = TC_FAIL;
163 
164 	/* since the fault is from a task, the interrupt stack (msp)
165 	 * should match whatever the current value is
166 	 */
167 	expected_msp = __get_MSP();
168 
169 	run_esf_validation = 1;
170 	expected_reason = K_ERR_CPU_EXCEPTION;
171 
172 	/* Run test thread and main thread at same priority to guarantee the
173 	 * crashy thread we create below runs to completion before we get
174 	 * to the end of this function
175 	 */
176 	k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
177 
178 	TC_PRINT("Testing ESF Reporting\n");
179 	k_thread_create(&esf_collection_thread, esf_collection_stack,
180 			K_THREAD_STACK_SIZEOF(esf_collection_stack),
181 			(k_thread_entry_t)set_regs_with_known_pattern,
182 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
183 			K_NO_WAIT);
184 
185 	test_validation_rv = esf_validation_rv;
186 
187 	zassert_not_equal(test_validation_rv, TC_FAIL,
188 		"ESF fault collection failed");
189 }
190 
arm_isr_handler(const void * args)191 void arm_isr_handler(const void *args)
192 {
193 	ARG_UNUSED(args);
194 
195 #if defined(CONFIG_CPU_CORTEX_M) && defined(CONFIG_FPU) && \
196 	defined(CONFIG_FPU_SHARING)
197 	/* Clear Floating Point Status and Control Register (FPSCR),
198 	 * to prevent from having the interrupt line set to pending again,
199 	 * in case FPU IRQ is selected by the test as "Available IRQ line"
200 	 */
201 #if defined(CONFIG_ARMV8_1_M_MAINLINE)
202 	/*
203 	 * For ARMv8.1-M with FPU, the FPSCR[18:16] LTPSIZE field must be set
204 	 * to 0b100 for "Tail predication not applied" as it's reset value
205 	 */
206 	__set_FPSCR(4 << FPU_FPDSCR_LTPSIZE_Pos);
207 #else
208 	__set_FPSCR(0);
209 #endif
210 #endif
211 
212 	test_flag++;
213 
214 	if (test_flag == 1) {
215 		/* Intentional Kernel oops */
216 		expected_reason = K_ERR_KERNEL_OOPS;
217 		k_oops();
218 	} else if (test_flag == 2) {
219 		/* Intentional Kernel panic */
220 		expected_reason = K_ERR_KERNEL_PANIC;
221 		k_panic();
222 	} else if (test_flag == 3) {
223 		/* Intentional ASSERT */
224 		expected_reason = K_ERR_KERNEL_PANIC;
225 		__ASSERT(0, "Intentional assert\n");
226 	} else if (test_flag == 4) {
227 #if defined(CONFIG_HW_STACK_PROTECTION)
228 		/*
229 		 * Verify that the Stack Overflow has been reported by the core
230 		 * and the expected reason variable is reset.
231 		 */
232 		int reason = expected_reason;
233 
234 		zassert_equal(reason, -1,
235 			"expected_reason has not been reset (%d)\n", reason);
236 #endif
237 	}
238 }
239 
ZTEST(arm_interrupt,test_arm_interrupt)240 ZTEST(arm_interrupt, test_arm_interrupt)
241 {
242 	/* Determine an NVIC IRQ line that is not currently in use. */
243 	int i;
244 	int init_flag, post_flag, reason;
245 
246 	init_flag = test_flag;
247 
248 	zassert_false(init_flag, "Test flag not initialized to zero\n");
249 
250 	for (i = CONFIG_NUM_IRQS - 1; i >= 0; i--) {
251 		if (NVIC_GetEnableIRQ(i) == 0) {
252 			/*
253 			 * Interrupts configured statically with IRQ_CONNECT(.)
254 			 * are automatically enabled. NVIC_GetEnableIRQ()
255 			 * returning false, here, implies that the IRQ line is
256 			 * either not implemented or it is not enabled, thus,
257 			 * currently not in use by Zephyr.
258 			 */
259 
260 			/* Set the NVIC line to pending. */
261 			NVIC_SetPendingIRQ(i);
262 
263 			if (NVIC_GetPendingIRQ(i)) {
264 				/* If the NVIC line is pending, it is
265 				 * guaranteed that it is implemented; clear the
266 				 * line.
267 				 */
268 				NVIC_ClearPendingIRQ(i);
269 
270 				if (!NVIC_GetPendingIRQ(i)) {
271 					/*
272 					 * If the NVIC line can be successfully
273 					 * un-pended, it is guaranteed that it
274 					 * can be used for software interrupt
275 					 * triggering.
276 					 */
277 					break;
278 				}
279 			}
280 		}
281 	}
282 
283 	zassert_true(i >= 0,
284 		"No available IRQ line to use in the test\n");
285 
286 	TC_PRINT("Available IRQ line: %u\n", i);
287 
288 	/* Verify that triggering an interrupt in an IRQ line,
289 	 * on which an ISR has not yet been installed, leads
290 	 * to a fault of type K_ERR_SPURIOUS_IRQ.
291 	 */
292 	expected_reason = K_ERR_SPURIOUS_IRQ;
293 	NVIC_ClearPendingIRQ(i);
294 	NVIC_EnableIRQ(i);
295 	NVIC_SetPendingIRQ(i);
296 	barrier_dsync_fence_full();
297 	barrier_isync_fence_full();
298 
299 	/* Verify that the spurious ISR has led to the fault and the
300 	 * expected reason variable is reset.
301 	 */
302 	reason = expected_reason;
303 	zassert_equal(reason, -1,
304 		"expected_reason has not been reset (%d)\n", reason);
305 	NVIC_DisableIRQ(i);
306 
307 	arch_irq_connect_dynamic(i, 0 /* highest priority */,
308 		arm_isr_handler,
309 		NULL,
310 		0);
311 
312 	NVIC_ClearPendingIRQ(i);
313 	NVIC_EnableIRQ(i);
314 
315 	for (int j = 1; j <= 3; j++) {
316 
317 		/* Set the dynamic IRQ to pending state. */
318 		NVIC_SetPendingIRQ(i);
319 
320 		/*
321 		 * Instruction barriers to make sure the NVIC IRQ is
322 		 * set to pending state before 'test_flag' is checked.
323 		 */
324 		barrier_dsync_fence_full();
325 		barrier_isync_fence_full();
326 
327 		/* Returning here implies the thread was not aborted. */
328 
329 		/* Confirm test flag is set by the ISR handler. */
330 		post_flag = test_flag;
331 		zassert_true(post_flag == j, "Test flag not set by ISR\n");
332 	}
333 
334 #if defined(CONFIG_HW_STACK_PROTECTION)
335 	/*
336 	 * Simulate a stacking error that is caused explicitly by the
337 	 * exception entry context stacking, to verify that the CPU can
338 	 * correctly report stacking errors that are not also Data
339 	 * access violation errors.
340 	 */
341 	expected_reason = K_ERR_STACK_CHK_FAIL;
342 
343 	__disable_irq();
344 
345 	/* Trigger an interrupt to cause the stacking error */
346 	NVIC_ClearPendingIRQ(i);
347 	NVIC_EnableIRQ(i);
348 	NVIC_SetPendingIRQ(i);
349 
350 	/* Manually set PSP almost at the bottom of the stack. An exception
351 	 * entry will make PSP descend below the limit and into the MPU guard
352 	 * section (or beyond the address pointed by PSPLIM in ARMv8-M MCUs).
353 	 */
354 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) && \
355 	defined(CONFIG_MPU_STACK_GUARD)
356 #define FPU_STACK_EXTRA_SIZE 0x48
357 	/* If an FP context is present, we should not set the PSP
358 	 * too close to the end of the stack, because stacking of
359 	 * the ESF might corrupt kernel memory, making it not
360 	 * possible to continue the test execution.
361 	 */
362 	uint32_t fp_extra_size =
363 		(__get_CONTROL() & CONTROL_FPCA_Msk) ?
364 			FPU_STACK_EXTRA_SIZE : 0;
365 	__set_PSP(_current->stack_info.start + 0x10 + fp_extra_size);
366 #else
367 	__set_PSP(_current->stack_info.start + 0x10);
368 #endif
369 
370 	__enable_irq();
371 	barrier_dsync_fence_full();
372 	barrier_isync_fence_full();
373 
374 	/* No stack variable access below this point.
375 	 * The IRQ will handle the verification.
376 	 */
377 #endif /* CONFIG_HW_STACK_PROTECTION */
378 }
379 
380 #if defined(CONFIG_USERSPACE)
381 #include <zephyr/syscall_handler.h>
382 #include "test_syscalls.h"
383 
z_impl_test_arm_user_interrupt_syscall(void)384 void z_impl_test_arm_user_interrupt_syscall(void)
385 {
386 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
387 	/* Confirm IRQs are not locked */
388 	zassert_false(__get_PRIMASK(), "PRIMASK is set\n");
389 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
390 
391 	static bool first_call = 1;
392 
393 	if (first_call == 1) {
394 
395 		/* First time the syscall is invoked */
396 		first_call = 0;
397 
398 		/* Lock IRQs in supervisor mode */
399 		unsigned int key = irq_lock();
400 
401 		/* Verify that IRQs were not already locked */
402 		zassert_false(key, "IRQs locked in system call\n");
403 	}
404 
405 	/* Confirm IRQs are still locked */
406 	zassert_true(__get_BASEPRI(), "BASEPRI not set\n");
407 #endif
408 }
409 
z_vrfy_test_arm_user_interrupt_syscall(void)410 static inline void z_vrfy_test_arm_user_interrupt_syscall(void)
411 {
412 	z_impl_test_arm_user_interrupt_syscall();
413 }
414 #include <syscalls/test_arm_user_interrupt_syscall_mrsh.c>
415 
ZTEST_USER(arm_interrupt,test_arm_user_interrupt)416 ZTEST_USER(arm_interrupt, test_arm_user_interrupt)
417 {
418 	/* Test thread executing in user mode */
419 	zassert_true(arch_is_user_context(),
420 		"Test thread not running in user mode\n");
421 
422 	/* Attempt to lock IRQs in user mode */
423 	irq_lock();
424 	/* Attempt to lock again should return non-zero value of previous
425 	 * locking attempt, if that were to be successful.
426 	 */
427 	int lock = irq_lock();
428 
429 	zassert_false(lock, "IRQs shown locked in user mode\n");
430 
431 	/* Generate a system call to manage the IRQ locking */
432 	test_arm_user_interrupt_syscall();
433 
434 	/* Attempt to unlock IRQs in user mode */
435 	irq_unlock(0);
436 
437 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
438 	/* The first system call has left the IRQs locked.
439 	 * Generate a second system call to inspect the IRQ locking.
440 	 *
441 	 * In Cortex-M Baseline system calls cannot be invoked
442 	 * with interrupts locked, so we skip this part of the
443 	 * test.
444 	 */
445 	test_arm_user_interrupt_syscall();
446 
447 	/* Verify that thread is not able to infer that IRQs are locked. */
448 	zassert_false(irq_lock(), "IRQs are shown to be locked\n");
449 #endif
450 }
451 #else
ZTEST_USER(arm_interrupt,test_arm_user_interrupt)452 ZTEST_USER(arm_interrupt, test_arm_user_interrupt)
453 {
454 	TC_PRINT("Skipped\n");
455 }
456 #endif /* CONFIG_USERSPACE */
457 
458 #if defined(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION)
459 #pragma GCC push_options
460 #pragma GCC optimize("O0")
461 /* Avoid compiler optimizing null pointer de-referencing. */
ZTEST(arm_interrupt,test_arm_null_pointer_exception)462 ZTEST(arm_interrupt, test_arm_null_pointer_exception)
463 {
464 	int reason;
465 
466 	struct test_struct {
467 		uint32_t val[2];
468 	};
469 
470 	struct test_struct *test_struct_null_pointer = 0x0;
471 
472 	expected_reason = K_ERR_CPU_EXCEPTION;
473 
474 	printk("Reading a null pointer value: 0x%0x\n",
475 		test_struct_null_pointer->val[1]);
476 
477 	reason = expected_reason;
478 	zassert_equal(reason, -1,
479 		"expected_reason has not been reset (%d)\n", reason);
480 }
481 #pragma GCC pop_options
482 #else
ZTEST(arm_interrupt,test_arm_null_pointer_exception)483 ZTEST(arm_interrupt, test_arm_null_pointer_exception)
484 {
485 	TC_PRINT("Skipped\n");
486 }
487 
488 #endif /* CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION */
489 
490 /**
491  * @}
492  */
493