1/*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 * Copyright (c) 2017-2019 Nordic Semiconductor ASA.
4 * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9/**
10 * @file
11 * @brief Thread context switching for ARM Cortex-M
12 *
13 * This module implements the routines necessary for thread context switching
14 * on ARM Cortex-M CPUs.
15 */
16
17#include <zephyr/toolchain.h>
18#include <zephyr/linker/sections.h>
19#include <offsets_short.h>
20#include <zephyr/arch/cpu.h>
21#include <zephyr/syscall.h>
22#include <zephyr/kernel.h>
23
24#include <zephyr/arch/arm/cortex_m/cpu.h>
25_ASM_FILE_PROLOGUE
26
27GTEXT(z_arm_svc)
28GTEXT(z_arm_pendsv)
29GTEXT(z_do_kernel_oops)
30#if defined(CONFIG_USERSPACE)
31GTEXT(z_arm_do_syscall)
32#endif
33
34GDATA(_kernel)
35
36#if defined(CONFIG_THREAD_LOCAL_STORAGE)
37GDATA(z_arm_tls_ptr)
38#endif
39
40/**
41 *
42 * @brief PendSV exception handler, handling context switches
43 *
44 * The PendSV exception is the only execution context in the system that can
45 * perform context switching. When an execution context finds out it has to
46 * switch contexts, it pends the PendSV exception.
47 *
48 * When PendSV is pended, the decision that a context switch must happen has
49 * already been taken. In other words, when z_arm_pendsv() runs, we *know* we
50 * have to swap *something*.
51 *
52 * For Cortex-M, z_arm_pendsv() is invoked with no arguments.
53 */
54
55SECTION_FUNC(TEXT, z_arm_pendsv)
56
57#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
58    /* Register the context switch */
59    push {r0, lr}
60    bl z_thread_mark_switched_out
61#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
62    pop {r0, r1}
63    mov lr, r1
64#else
65    pop {r0, lr}
66#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
67#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
68
69    /* load _kernel into r1 and current k_thread into r2 */
70    ldr r1, =_kernel
71    ldr r2, [r1, #_kernel_offset_to_current]
72
73#if defined(CONFIG_ARM_STORE_EXC_RETURN)
74    /* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
75    strb lr, [r2, #_thread_offset_to_mode_exc_return]
76#endif
77
78    /* addr of callee-saved regs in thread in r0 */
79    ldr r0, =_thread_offset_to_callee_saved
80    add r0, r2
81
82    /* save callee-saved + psp in thread */
83    mrs ip, PSP
84
85#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
86    /* Store current r4-r7 */
87    stmea r0!, {r4-r7}
88    /* copy r8-r12 into r3-r7 */
89    mov r3, r8
90    mov r4, r9
91    mov r5, r10
92    mov r6, r11
93    mov r7, ip
94    /* store r8-12 */
95    stmea r0!, {r3-r7}
96#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
97    stmia r0, {r4-r11, ip}
98#ifdef CONFIG_FPU_SHARING
99    /* Assess whether switched-out thread had been using the FP registers. */
100    tst lr, #_EXC_RETURN_FTYPE_Msk
101    bne out_fp_endif
102
103    /* FP context active: set FP state and store callee-saved registers.
104     * Note: if Lazy FP stacking is enabled, storing the callee-saved
105     * registers will automatically trigger FP state preservation in
106     * the thread's stack. This will also clear the FPCCR.LSPACT flag.
107     */
108    add r0, r2, #_thread_offset_to_preempt_float
109    vstmia r0, {s16-s31}
110
111out_fp_endif:
112    /* At this point FPCCR.LSPACT is guaranteed to be cleared,
113     * regardless of whether the thread has an active FP context.
114     */
115#endif /* CONFIG_FPU_SHARING */
116#else
117#error Unknown ARM architecture
118#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
119
120    /* Protect the kernel state while we play with the thread lists */
121#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
122    cpsid i
123#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
124    movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
125    msr BASEPRI_MAX, r0
126    isb /* Make the effect of disabling interrupts be realized immediately */
127#else
128#error Unknown ARM architecture
129#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
130
131    /*
132     * Prepare to clear PendSV with interrupts unlocked, but
133     * don't clear it yet. PendSV must not be cleared until
134     * the new thread is context-switched in since all decisions
135     * to pend PendSV have been taken with the current kernel
136     * state and this is what we're handling currently.
137     */
138    ldr r7, =_SCS_ICSR
139    ldr r6, =_SCS_ICSR_UNPENDSV
140
141    /* _kernel is still in r1 */
142
143    /* fetch the thread to run from the ready queue cache */
144    ldr r2, [r1, #_kernel_offset_to_ready_q_cache]
145
146    str r2, [r1, #_kernel_offset_to_current]
147
148    /*
149     * Clear PendSV so that if another interrupt comes in and
150     * decides, with the new kernel state based on the new thread
151     * being context-switched in, that it needs to reschedule, it
152     * will take, but that previously pended PendSVs do not take,
153     * since they were based on the previous kernel state and this
154     * has been handled.
155     */
156
157    /* _SCS_ICSR is still in r7 and _SCS_ICSR_UNPENDSV in r6 */
158    str r6, [r7, #0]
159
160#if defined(CONFIG_THREAD_LOCAL_STORAGE)
161    /* Grab the TLS pointer */
162    ldr r4, =_thread_offset_to_tls
163    adds r4, r2, r4
164    ldr r0, [r4]
165
166    /* For Cortex-M, store TLS pointer in a global variable,
167     * as it lacks the process ID or thread ID register
168     * to be used by toolchain to access thread data.
169     */
170    ldr r4, =z_arm_tls_ptr
171    str r0, [r4]
172#endif
173
174#if defined(CONFIG_ARM_STORE_EXC_RETURN)
175    /* Restore EXC_RETURN value. */
176    ldrsb lr, [r2, #_thread_offset_to_mode_exc_return]
177#endif
178
179    /* Restore previous interrupt disable state (irq_lock key)
180     * (We clear the arch.basepri field after restoring state)
181     */
182#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && (_thread_offset_to_basepri > 124)
183    /* Doing it this way since the offset to thread->arch.basepri can in
184     * some configurations be larger than the maximum of 124 for ldr/str
185     * immediate offsets.
186     */
187    ldr r4, =_thread_offset_to_basepri
188    adds r4, r2, r4
189
190    ldr r0, [r4]
191    movs.n r3, #0
192    str r3, [r4]
193#else
194    ldr r0, [r2, #_thread_offset_to_basepri]
195    movs r3, #0
196    str r3, [r2, #_thread_offset_to_basepri]
197#endif
198
199#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
200    /* BASEPRI not available, previous interrupt disable state
201     * maps to PRIMASK.
202     *
203     * Only enable interrupts if value is 0, meaning interrupts
204     * were enabled before irq_lock was called.
205     */
206    cmp r0, #0
207    bne _thread_irq_disabled
208    cpsie i
209_thread_irq_disabled:
210
211#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
212    /* Re-program dynamic memory map */
213    push {r2,lr}
214    mov r0, r2
215    bl z_arm_configure_dynamic_mpu_regions
216    pop {r2,r3}
217    mov lr, r3
218#endif
219
220#ifdef CONFIG_USERSPACE
221    /* restore mode */
222    ldr r3, =_thread_offset_to_mode
223    adds r3, r2, r3
224    ldr r0, [r3]
225    mrs r3, CONTROL
226    movs.n r1, #1
227    bics r3, r1
228    orrs r3, r0
229    msr CONTROL, r3
230
231    /* ISB is not strictly necessary here (stack pointer is not being
232     * touched), but it's recommended to avoid executing pre-fetched
233     * instructions with the previous privilege.
234     */
235    isb
236
237#endif
238
239    ldr r4, =_thread_offset_to_callee_saved
240    adds r0, r2, r4
241
242    /* restore r4-r12 for new thread */
243    /* first restore r8-r12 located after r4-r7 (4*4bytes) */
244    adds r0, #16
245    ldmia r0!, {r3-r7}
246    /* move to correct registers */
247    mov r8, r3
248    mov r9, r4
249    mov r10, r5
250    mov r11, r6
251    mov ip, r7
252    /* restore r4-r7, go back 9*4 bytes to the start of the stored block */
253    subs r0, #36
254    ldmia r0!, {r4-r7}
255#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
256    /* restore BASEPRI for the incoming thread */
257    msr BASEPRI, r0
258
259#ifdef CONFIG_FPU_SHARING
260    /* Assess whether switched-in thread had been using the FP registers. */
261    tst lr, #_EXC_RETURN_FTYPE_Msk
262    beq in_fp_active
263    /* FP context inactive for swapped-in thread:
264     * - reset FPSCR to 0
265     * - set EXC_RETURN.F_Type (prevents FP frame un-stacking when returning
266     *   from pendSV)
267     */
268    movs.n r3, #0
269    vmsr fpscr, r3
270    b in_fp_endif
271
272in_fp_active:
273    /* FP context active:
274     * - clear EXC_RETURN.F_Type
275     * - FPSCR and caller-saved registers will be restored automatically
276     * - restore callee-saved FP registers
277     */
278    add r0, r2, #_thread_offset_to_preempt_float
279    vldmia r0, {s16-s31}
280in_fp_endif:
281    /* Clear CONTROL.FPCA that may have been set by FP instructions */
282    mrs r3, CONTROL
283    bic r3, #_CONTROL_FPCA_Msk
284    msr CONTROL, r3
285    isb
286#endif
287
288#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
289    /* Re-program dynamic memory map */
290    push {r2,lr}
291    mov r0, r2 /* arch_current_thread() thread */
292    bl z_arm_configure_dynamic_mpu_regions
293    pop {r2,lr}
294#endif
295
296#ifdef CONFIG_USERSPACE
297    /* restore mode */
298    ldr r0, [r2, #_thread_offset_to_mode]
299    mrs r3, CONTROL
300    bic r3, #1
301    orr r3, r0
302    msr CONTROL, r3
303
304    /* ISB is not strictly necessary here (stack pointer is not being
305     * touched), but it's recommended to avoid executing pre-fetched
306     * instructions with the previous privilege.
307     */
308    isb
309
310#endif
311
312    /* load callee-saved + psp from thread */
313    add r0, r2, #_thread_offset_to_callee_saved
314    ldmia r0, {r4-r11, ip}
315#else
316#error Unknown ARM architecture
317#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
318
319    msr PSP, ip
320
321#ifdef CONFIG_BUILTIN_STACK_GUARD
322    /* r2 contains k_thread */
323    add r0, r2, #0
324    push {r2, lr}
325    bl configure_builtin_stack_guard
326    pop {r2, lr}
327#endif /* CONFIG_BUILTIN_STACK_GUARD */
328
329#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
330    /* Register the context switch */
331    push {r0, lr}
332    bl z_thread_mark_switched_in
333#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
334    pop {r0, r1}
335    mov lr, r1
336#else
337    pop {r0, lr}
338#endif
339#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
340
341    /*
342     * Cortex-M: return from PendSV exception
343     */
344    bx lr
345
346/**
347 *
348 * @brief Service call handler
349 *
350 * The service call (svc) is used in the following occasions:
351 * - IRQ offloading
352 * - Kernel run-time exceptions
353 * - System Calls (User mode)
354 *
355 */
356SECTION_FUNC(TEXT, z_arm_svc)
357  /* Use EXC_RETURN state to find out if stack frame is on the
358   * MSP or PSP
359   */
360#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
361  movs r0, #_EXC_RETURN_SPSEL_Msk
362  mov r1, lr
363  tst r1, r0
364  beq _stack_frame_msp
365  mrs r0, PSP
366  bne _stack_frame_endif
367_stack_frame_msp:
368  mrs r0, MSP
369_stack_frame_endif:
370#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
371    tst lr, #_EXC_RETURN_SPSEL_Msk /* did we come from thread mode ? */
372    ite eq  /* if zero (equal), came from handler mode */
373        mrseq r0, MSP   /* handler mode, stack frame is on MSP */
374        mrsne r0, PSP   /* thread mode, stack frame is on PSP */
375#endif
376
377
378    /* Figure out what SVC call number was invoked */
379
380    ldr r1, [r0, #24]   /* grab address of PC from stack frame */
381    /* SVC is a two-byte instruction, point to it and read the
382     * SVC number (lower byte of SCV instruction)
383     */
384#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
385    subs r1, r1, #2
386    ldrb r1, [r1]
387#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
388    ldrb r1, [r1, #-2]
389#endif
390
391   /*
392    * grab service call number:
393    * 0: Unused
394    * 1: irq_offload (if configured)
395    * 2: kernel panic or oops (software generated fatal exception)
396    * 3: System call (if user mode supported)
397    */
398#if defined(CONFIG_USERSPACE)
399    mrs r2, CONTROL
400
401    cmp r1, #3
402    beq _do_syscall
403
404    /*
405     * check that we are privileged before invoking other SVCs
406     * oops if we are unprivileged
407     */
408#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
409    movs r3, #0x1
410    tst r2, r3
411#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
412    tst r2, #0x1
413#endif
414    bne _oops
415
416#endif /* CONFIG_USERSPACE */
417
418    cmp r1, #2
419    beq _oops
420
421#if defined(CONFIG_IRQ_OFFLOAD)
422    push {r0, lr}
423    bl z_irq_do_offload  /* call C routine which executes the offload */
424#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
425    pop {r0, r3}
426    mov lr, r3
427#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
428    pop {r0, lr}
429#endif
430
431    /* exception return is done in z_arm_int_exit() */
432    ldr r0, =z_arm_int_exit
433    bx r0
434
435#endif
436
437_oops:
438    push {r0, lr}
439#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
440#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
441    /* Build _callee_saved_t. To match the struct
442     * definition we push the psp & then r11-r4
443     */
444    mrs r1, PSP
445    push {r1, r2}
446    push {r4-r11}
447    mov  r1, sp /* pointer to _callee_saved_t */
448#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
449#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
450    mov r2, lr /* EXC_RETURN */
451    bl z_do_kernel_oops
452    /* return from SVC exception is done here */
453#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
454#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
455    /* We do not need to restore any register state here
456     * because we did not use any callee-saved registers
457     * in this routine. Therefore, we can just reset
458     * the MSP to its value prior to entering the function
459     */
460    add sp, #40
461#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
462#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
463    pop {r0, pc}
464
465#if defined(CONFIG_USERSPACE)
466    /*
467     * System call will setup a jump to the z_arm_do_syscall() function
468     * when the SVC returns via the bx lr.
469     *
470     * There is some trickery involved here because we have to preserve
471     * the original PC value so that we can return back to the caller of
472     * the SVC.
473     *
474     * On SVC exeption, the stack looks like the following:
475     * r0 - r1 - r2 - r3 - r12 - LR - PC - PSR
476     *
477     * Registers look like:
478     * r0 - arg1
479     * r1 - arg2
480     * r2 - arg3
481     * r3 - arg4
482     * r4 - arg5
483     * r5 - arg6
484     * r6 - call_id
485     * r8 - saved link register
486     */
487_do_syscall:
488#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
489    movs r3, #24
490    ldr r1, [r0, r3]   /* grab address of PC from stack frame */
491    mov r8, r1
492#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
493    ldr r8, [r0, #24]   /* grab address of PC from stack frame */
494#endif
495    ldr r1, =z_arm_do_syscall
496#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
497    str r1, [r0, r3]   /* overwrite the PC to point to z_arm_do_syscall */
498#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
499    str r1, [r0, #24]   /* overwrite the PC to point to z_arm_do_syscall */
500#endif
501
502#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
503    ldr r3, =K_SYSCALL_LIMIT
504    cmp r6, r3
505#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
506    /* validate syscall limit */
507    ldr ip, =K_SYSCALL_LIMIT
508    cmp r6, ip
509#endif
510    /* The supplied syscall_id must be lower than the limit
511     * (Requires unsigned integer comparison)
512     */
513    blo valid_syscall_id
514
515    /* bad syscall id.  Set arg1 to bad id and set call_id to SYSCALL_BAD */
516    str r6, [r0]
517    ldr r6, =K_SYSCALL_BAD
518
519    /* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */
520
521valid_syscall_id:
522    ldr r0, =_kernel
523    ldr r0, [r0, #_kernel_offset_to_current]
524#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
525    mov ip, r2
526    ldr r1, =_thread_offset_to_mode
527    ldr r3, [r0, r1]
528    movs r2, #1
529    bics r3, r2
530    /* Store (privileged) mode in thread's mode state variable */
531    str r3, [r0, r1]
532    mov r2, ip
533    dsb
534    /* set mode to privileged, r2 still contains value from CONTROL */
535    movs r3, #1
536    bics r2, r3
537#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
538    ldr r1, [r0, #_thread_offset_to_mode]
539    bic r1, #1
540    /* Store (privileged) mode in thread's mode state variable */
541    str r1, [r0, #_thread_offset_to_mode]
542    dsb
543    /* set mode to privileged, r2 still contains value from CONTROL */
544    bic r2, #1
545#endif
546    msr CONTROL, r2
547
548    /* ISB is not strictly necessary here (stack pointer is not being
549     * touched), but it's recommended to avoid executing pre-fetched
550     * instructions with the previous privilege.
551     */
552    isb
553
554#if defined(CONFIG_BUILTIN_STACK_GUARD)
555    /* Thread is now in privileged mode; after returning from SCVall it
556     * will use the default (user) stack before switching to the privileged
557     * stack to execute the system call. We need to protect the user stack
558     * against stack overflows until this stack transition.
559     */
560    ldr r1, [r0, #_thread_offset_to_stack_info_start]    /* stack_info.start */
561    msr PSPLIM, r1
562#endif /* CONFIG_BUILTIN_STACK_GUARD */
563
564    /* return from SVC to the modified LR - z_arm_do_syscall */
565    bx lr
566#endif /* CONFIG_USERSPACE */
567