1 /*
2 * Copyright (c) 2019 Nordic Semiconductor ASA.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <zephyr/arch/cpu.h>
9 #include <cmsis_core.h>
10 #include <zephyr/sys/barrier.h>
11
12 static volatile int test_flag;
13 static volatile int expected_reason = -1;
14
15 /* Used to validate ESF collection during a fault */
16 static volatile int run_esf_validation;
17 static volatile int esf_validation_rv;
18 static volatile uint32_t expected_msp;
19 static K_THREAD_STACK_DEFINE(esf_collection_stack, 2048);
20 static struct k_thread esf_collection_thread;
21 #define MAIN_PRIORITY 7
22 #define PRIORITY 5
23
24 /**
25 * Validates that pEsf matches state from set_regs_with_known_pattern()
26 */
check_esf_matches_expectations(const struct arch_esf * pEsf)27 static int check_esf_matches_expectations(const struct arch_esf *pEsf)
28 {
29 const uint16_t expected_fault_instruction = 0xde5a; /* udf #90 */
30 const bool caller_regs_match_expected =
31 (pEsf->basic.r0 == 0) &&
32 (pEsf->basic.r1 == 1) &&
33 (pEsf->basic.r2 == 2) &&
34 (pEsf->basic.r3 == 3) &&
35 (pEsf->basic.lr == 15) &&
36 (*(uint16_t *)pEsf->basic.pc == expected_fault_instruction);
37 if (!caller_regs_match_expected) {
38 printk("__basic_sf member of ESF is incorrect\n");
39 return -1;
40 }
41
42 #if defined(CONFIG_EXTRA_EXCEPTION_INFO)
43 const struct _callee_saved *callee_regs = pEsf->extra_info.callee;
44 const bool callee_regs_match_expected =
45 (callee_regs->v1 /* r4 */ == 4) &&
46 (callee_regs->v2 /* r5 */ == 5) &&
47 (callee_regs->v3 /* r6 */ == 6) &&
48 (callee_regs->v4 /* r7 */ == 7) &&
49 (callee_regs->v5 /* r8 */ == 8) &&
50 (callee_regs->v6 /* r9 */ == 9) &&
51 (callee_regs->v7 /* r10 */ == 10) &&
52 (callee_regs->v8 /* r11 */ == 11);
53 if (!callee_regs_match_expected) {
54 printk("_callee_saved_t member of ESF is incorrect\n");
55 return -1;
56 }
57
58 /* we expect the EXC_RETURN value to have:
59 * - PREFIX: bits [31:24] = 0xFF
60 * - Mode, bit [3] = 1 since exception occurred from thread mode
61 * - SPSEL, bit [2] = 1 since frame should reside on PSP
62 */
63 const uint32_t exc_bits_set_mask = 0xff00000C;
64
65 if ((pEsf->extra_info.exc_return & exc_bits_set_mask) !=
66 exc_bits_set_mask) {
67 printk("Incorrect EXC_RETURN of 0x%08x",
68 pEsf->extra_info.exc_return);
69 return -1;
70 }
71
72 /* the psp should match the contents of the esf copy up
73 * to the xpsr. (the xpsr value in the copy used for pEsf
74 * is overwritten in fault.c)
75 */
76 if (memcmp((void *)callee_regs->psp, pEsf,
77 offsetof(struct arch_esf, basic.xpsr)) != 0) {
78 printk("psp does not match __basic_sf provided\n");
79 return -1;
80 }
81
82 if (pEsf->extra_info.msp != expected_msp) {
83 printk("MSP is 0x%08x but should be 0x%08x",
84 pEsf->extra_info.msp, expected_msp);
85 return -1;
86 }
87 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
88 return 0;
89 }
90
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)91 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
92 {
93 TC_PRINT("Caught system error -- reason %d\n", reason);
94
95 if (expected_reason == -1) {
96 printk("Was not expecting a crash\n");
97 k_fatal_halt(reason);
98 }
99
100 if (reason != expected_reason) {
101 printk("Wrong crash type got %d expected %d\n", reason,
102 expected_reason);
103 k_fatal_halt(reason);
104 }
105
106 if (run_esf_validation) {
107 if (check_esf_matches_expectations(pEsf) == 0) {
108 esf_validation_rv = TC_PASS;
109 }
110 run_esf_validation = 0;
111 }
112
113 expected_reason = -1;
114 }
115
116 /**
117 * Set ARM registers with a known pattern:
118 * r0-r12 are set to 0...12, respectively
119 * r13 (sp) is left untouched
120 * r14 (pc) will point to the faulting instruction (udf #90)
121 * r15 (lr) is set to 15 (since a fault takes place, we never use the value)
122 *
123 * Note: Routine was written to be ARMV6M compatible
124 *
125 * In k_sys_fatal_error_handler above we will check that the ESF provided
126 * as a parameter matches these expectations.
127 */
set_regs_with_known_pattern(void * p1,void * p2,void * p3)128 void set_regs_with_known_pattern(void *p1, void *p2, void *p3)
129 {
130 ARG_UNUSED(p1);
131 ARG_UNUSED(p2);
132 ARG_UNUSED(p3);
133
134 __asm__ volatile(
135 "mov r1, #1\n"
136 "mov r2, #2\n"
137 "mov r3, #3\n"
138 "mov r4, #4\n"
139 "mov r5, #5\n"
140 "mov r6, #6\n"
141 "mov r7, #7\n"
142 "mov r0, #8\n"
143 "mov r8, r0\n"
144 "add r0, r0, #1\n"
145 "mov r9, r0\n"
146 "add r0, r0, #1\n"
147 "mov r10, r0\n"
148 "add r0, r0, #1\n"
149 "mov r11, r0\n"
150 "add r0, r0, #1\n"
151 "mov r12, r0\n"
152 "add r0, r0, #3\n"
153 "mov lr, r0\n"
154 "mov r0, #0\n"
155 "udf #90\n"
156 );
157 }
158
ZTEST(arm_interrupt,test_arm_esf_collection)159 ZTEST(arm_interrupt, test_arm_esf_collection)
160 {
161 int test_validation_rv;
162
163 /* if the check in the fault handler succeeds,
164 * this will be set to TC_PASS
165 */
166 esf_validation_rv = TC_FAIL;
167
168 /* since the fault is from a task, the interrupt stack (msp)
169 * should match whatever the current value is
170 */
171 expected_msp = __get_MSP();
172
173 run_esf_validation = 1;
174 expected_reason = K_ERR_CPU_EXCEPTION;
175
176 /* Run test thread and main thread at same priority to guarantee the
177 * crashy thread we create below runs to completion before we get
178 * to the end of this function
179 */
180 k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
181
182 TC_PRINT("Testing ESF Reporting\n");
183 k_thread_create(&esf_collection_thread, esf_collection_stack,
184 K_THREAD_STACK_SIZEOF(esf_collection_stack),
185 set_regs_with_known_pattern,
186 NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
187 K_NO_WAIT);
188
189 test_validation_rv = esf_validation_rv;
190
191 zassert_not_equal(test_validation_rv, TC_FAIL,
192 "ESF fault collection failed");
193 }
194
arm_isr_handler(const void * args)195 void arm_isr_handler(const void *args)
196 {
197 ARG_UNUSED(args);
198
199 #if defined(CONFIG_CPU_CORTEX_M) && defined(CONFIG_FPU) && \
200 defined(CONFIG_FPU_SHARING)
201 /* Clear Floating Point Status and Control Register (FPSCR),
202 * to prevent from having the interrupt line set to pending again,
203 * in case FPU IRQ is selected by the test as "Available IRQ line"
204 */
205 #if defined(CONFIG_ARMV8_1_M_MAINLINE)
206 /*
207 * For ARMv8.1-M with FPU, the FPSCR[18:16] LTPSIZE field must be set
208 * to 0b100 for "Tail predication not applied" as it's reset value
209 */
210 __set_FPSCR(4 << FPU_FPDSCR_LTPSIZE_Pos);
211 #else
212 __set_FPSCR(0);
213 #endif
214 #endif
215
216 test_flag++;
217
218 if (test_flag == 1) {
219 /* Intentional Kernel oops */
220 expected_reason = K_ERR_KERNEL_OOPS;
221 k_oops();
222 } else if (test_flag == 2) {
223 /* Intentional Kernel panic */
224 expected_reason = K_ERR_KERNEL_PANIC;
225 k_panic();
226 } else if (test_flag == 3) {
227 /* Intentional ASSERT */
228 expected_reason = K_ERR_KERNEL_PANIC;
229 __ASSERT(0, "Intentional assert\n");
230 } else if (test_flag == 4) {
231 #if defined(CONFIG_HW_STACK_PROTECTION)
232 /*
233 * Verify that the Stack Overflow has been reported by the core
234 * and the expected reason variable is reset.
235 */
236 int reason = expected_reason;
237
238 zassert_equal(reason, -1,
239 "expected_reason has not been reset (%d)\n", reason);
240 #endif
241 }
242 }
243
ZTEST(arm_interrupt,test_arm_interrupt)244 ZTEST(arm_interrupt, test_arm_interrupt)
245 {
246 /* Determine an NVIC IRQ line that is not currently in use. */
247 int i;
248 int init_flag, post_flag, reason;
249
250 init_flag = test_flag;
251
252 zassert_false(init_flag, "Test flag not initialized to zero\n");
253
254 for (i = CONFIG_NUM_IRQS - 1; i >= 0; i--) {
255 if (NVIC_GetEnableIRQ(i) == 0) {
256 /*
257 * Interrupts configured statically with IRQ_CONNECT(.)
258 * are automatically enabled. NVIC_GetEnableIRQ()
259 * returning false, here, implies that the IRQ line is
260 * either not implemented or it is not enabled, thus,
261 * currently not in use by Zephyr.
262 */
263
264 /* Set the NVIC line to pending. */
265 NVIC_SetPendingIRQ(i);
266
267 if (NVIC_GetPendingIRQ(i)) {
268 /* If the NVIC line is pending, it is
269 * guaranteed that it is implemented; clear the
270 * line.
271 */
272 NVIC_ClearPendingIRQ(i);
273
274 if (!NVIC_GetPendingIRQ(i)) {
275 /*
276 * If the NVIC line can be successfully
277 * un-pended, it is guaranteed that it
278 * can be used for software interrupt
279 * triggering.
280 */
281 break;
282 }
283 }
284 }
285 }
286
287 zassert_true(i >= 0,
288 "No available IRQ line to use in the test\n");
289
290 TC_PRINT("Available IRQ line: %u\n", i);
291
292 /* Verify that triggering an interrupt in an IRQ line,
293 * on which an ISR has not yet been installed, leads
294 * to a fault of type K_ERR_SPURIOUS_IRQ.
295 */
296 expected_reason = K_ERR_SPURIOUS_IRQ;
297 NVIC_ClearPendingIRQ(i);
298 NVIC_EnableIRQ(i);
299 NVIC_SetPendingIRQ(i);
300 barrier_dsync_fence_full();
301 barrier_isync_fence_full();
302
303 /* Verify that the spurious ISR has led to the fault and the
304 * expected reason variable is reset.
305 */
306 reason = expected_reason;
307 zassert_equal(reason, -1,
308 "expected_reason has not been reset (%d)\n", reason);
309 NVIC_DisableIRQ(i);
310
311 arch_irq_connect_dynamic(i, 0 /* highest priority */,
312 arm_isr_handler,
313 NULL,
314 0);
315
316 NVIC_ClearPendingIRQ(i);
317 NVIC_EnableIRQ(i);
318
319 for (int j = 1; j <= 3; j++) {
320
321 /* Set the dynamic IRQ to pending state. */
322 NVIC_SetPendingIRQ(i);
323
324 /*
325 * Instruction barriers to make sure the NVIC IRQ is
326 * set to pending state before 'test_flag' is checked.
327 */
328 barrier_dsync_fence_full();
329 barrier_isync_fence_full();
330
331 /* Returning here implies the thread was not aborted. */
332
333 /* Confirm test flag is set by the ISR handler. */
334 post_flag = test_flag;
335 zassert_true(post_flag == j, "Test flag not set by ISR\n");
336 }
337
338 #if defined(CONFIG_HW_STACK_PROTECTION)
339 /*
340 * Simulate a stacking error that is caused explicitly by the
341 * exception entry context stacking, to verify that the CPU can
342 * correctly report stacking errors that are not also Data
343 * access violation errors.
344 */
345 expected_reason = K_ERR_STACK_CHK_FAIL;
346
347 __disable_irq();
348
349 /* Trigger an interrupt to cause the stacking error */
350 NVIC_ClearPendingIRQ(i);
351 NVIC_EnableIRQ(i);
352 NVIC_SetPendingIRQ(i);
353
354 /* Manually set PSP almost at the bottom of the stack. An exception
355 * entry will make PSP descend below the limit and into the MPU guard
356 * section (or beyond the address pointed by PSPLIM in ARMv8-M MCUs).
357 */
358 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) && \
359 defined(CONFIG_MPU_STACK_GUARD)
360 #define FPU_STACK_EXTRA_SIZE 0x48
361 /* If an FP context is present, we should not set the PSP
362 * too close to the end of the stack, because stacking of
363 * the ESF might corrupt kernel memory, making it not
364 * possible to continue the test execution.
365 */
366 uint32_t fp_extra_size =
367 (__get_CONTROL() & CONTROL_FPCA_Msk) ?
368 FPU_STACK_EXTRA_SIZE : 0;
369 __set_PSP(_current->stack_info.start + 0x10 + fp_extra_size);
370 #else
371 __set_PSP(_current->stack_info.start + 0x10);
372 #endif
373
374 __enable_irq();
375 barrier_dsync_fence_full();
376 barrier_isync_fence_full();
377
378 /* No stack variable access below this point.
379 * The IRQ will handle the verification.
380 */
381 #endif /* CONFIG_HW_STACK_PROTECTION */
382 }
383
384 #if defined(CONFIG_USERSPACE)
385 #include <zephyr/internal/syscall_handler.h>
386 #include "test_syscalls.h"
387
z_impl_test_arm_user_interrupt_syscall(void)388 void z_impl_test_arm_user_interrupt_syscall(void)
389 {
390 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
391 /* Confirm IRQs are not locked */
392 zassert_false(__get_PRIMASK(), "PRIMASK is set\n");
393 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
394
395 static bool first_call = 1;
396
397 if (first_call == 1) {
398
399 /* First time the syscall is invoked */
400 first_call = 0;
401
402 /* Lock IRQs in supervisor mode */
403 unsigned int key = irq_lock();
404
405 /* Verify that IRQs were not already locked */
406 zassert_false(key, "IRQs locked in system call\n");
407 }
408
409 /* Confirm IRQs are still locked */
410 zassert_true(__get_BASEPRI(), "BASEPRI not set\n");
411 #endif
412 }
413
z_vrfy_test_arm_user_interrupt_syscall(void)414 static inline void z_vrfy_test_arm_user_interrupt_syscall(void)
415 {
416 z_impl_test_arm_user_interrupt_syscall();
417 }
418 #include <zephyr/syscalls/test_arm_user_interrupt_syscall_mrsh.c>
419
ZTEST_USER(arm_interrupt,test_arm_user_interrupt)420 ZTEST_USER(arm_interrupt, test_arm_user_interrupt)
421 {
422 /* Test thread executing in user mode */
423 zassert_true(arch_is_user_context(),
424 "Test thread not running in user mode\n");
425
426 /* Attempt to lock IRQs in user mode */
427 irq_lock();
428 /* Attempt to lock again should return non-zero value of previous
429 * locking attempt, if that were to be successful.
430 */
431 int lock = irq_lock();
432
433 zassert_false(lock, "IRQs shown locked in user mode\n");
434
435 /* Generate a system call to manage the IRQ locking */
436 test_arm_user_interrupt_syscall();
437
438 /* Attempt to unlock IRQs in user mode */
439 irq_unlock(0);
440
441 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
442 /* The first system call has left the IRQs locked.
443 * Generate a second system call to inspect the IRQ locking.
444 *
445 * In Cortex-M Baseline system calls cannot be invoked
446 * with interrupts locked, so we skip this part of the
447 * test.
448 */
449 test_arm_user_interrupt_syscall();
450
451 /* Verify that thread is not able to infer that IRQs are locked. */
452 zassert_false(irq_lock(), "IRQs are shown to be locked\n");
453 #endif
454 }
455 #else
ZTEST_USER(arm_interrupt,test_arm_user_interrupt)456 ZTEST_USER(arm_interrupt, test_arm_user_interrupt)
457 {
458 TC_PRINT("Skipped\n");
459 }
460 #endif /* CONFIG_USERSPACE */
461
462 #if defined(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION)
463 #pragma GCC push_options
464 #pragma GCC optimize("O0")
465 /* Avoid compiler optimizing null pointer de-referencing. */
ZTEST(arm_interrupt,test_arm_null_pointer_exception)466 ZTEST(arm_interrupt, test_arm_null_pointer_exception)
467 {
468 int reason;
469
470 struct test_struct {
471 uint32_t val[2];
472 };
473
474 struct test_struct *test_struct_null_pointer = 0x0;
475
476 expected_reason = K_ERR_CPU_EXCEPTION;
477
478 printk("Reading a null pointer value: 0x%0x\n",
479 test_struct_null_pointer->val[1]);
480
481 reason = expected_reason;
482 zassert_equal(reason, -1,
483 "expected_reason has not been reset (%d)\n", reason);
484 }
485 #pragma GCC pop_options
486 #else
ZTEST(arm_interrupt,test_arm_null_pointer_exception)487 ZTEST(arm_interrupt, test_arm_null_pointer_exception)
488 {
489 TC_PRINT("Skipped\n");
490 }
491
492 #endif /* CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION */
493
494 /**
495 * @}
496 */
497