1/*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 * Copyright (c) 2017-2019 Nordic Semiconductor ASA.
4 * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9/**
10 * @file
11 * @brief Thread context switching for ARM Cortex-M
12 *
13 * This module implements the routines necessary for thread context switching
14 * on ARM Cortex-M CPUs.
15 */
16
17#include <zephyr/toolchain.h>
18#include <zephyr/linker/sections.h>
19#include <offsets_short.h>
20#include <zephyr/arch/cpu.h>
21#include <zephyr/syscall.h>
22#include <zephyr/kernel.h>
23
24#include <zephyr/arch/arm/cortex_m/cpu.h>
25_ASM_FILE_PROLOGUE
26
27GTEXT(z_arm_svc)
28GTEXT(z_arm_pendsv)
29GTEXT(z_do_kernel_oops)
30#if defined(CONFIG_USERSPACE)
31GTEXT(z_arm_do_syscall)
32#endif
33
34GDATA(_kernel)
35
36#if defined(CONFIG_THREAD_LOCAL_STORAGE)
37GDATA(z_arm_tls_ptr)
38#endif
39
40/**
41 *
42 * @brief PendSV exception handler, handling context switches
43 *
44 * The PendSV exception is the only execution context in the system that can
45 * perform context switching. When an execution context finds out it has to
46 * switch contexts, it pends the PendSV exception.
47 *
48 * When PendSV is pended, the decision that a context switch must happen has
49 * already been taken. In other words, when z_arm_pendsv() runs, we *know* we
50 * have to swap *something*.
51 *
52 * For Cortex-M, z_arm_pendsv() is invoked with no arguments.
53 */
54
55SECTION_FUNC(TEXT, z_arm_pendsv)
56
57#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
58    /* Register the context switch */
59    push {r0, lr}
60    bl z_thread_mark_switched_out
61#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
62    pop {r0, r1}
63    mov lr, r1
64#else
65    pop {r0, lr}
66#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
67#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
68
69    /* load _kernel into r1 and current k_thread into r2 */
70    ldr r1, =_kernel
71    ldr r2, [r1, #_kernel_offset_to_current]
72
73#if defined(CONFIG_ARM_STORE_EXC_RETURN)
74    /* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
75    strb lr, [r2, #_thread_offset_to_mode_exc_return]
76#endif
77
78    /* addr of callee-saved regs in thread in r0 */
79    ldr r0, =_thread_offset_to_callee_saved
80    add r0, r2
81
82    /* save callee-saved + psp in thread */
83    mrs ip, PSP
84
85#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
86    /* Store current r4-r7 */
87    stmea r0!, {r4-r7}
88    /* copy r8-r12 into r3-r7 */
89    mov r3, r8
90    mov r4, r9
91    mov r5, r10
92    mov r6, r11
93    mov r7, ip
94    /* store r8-12 */
95    stmea r0!, {r3-r7}
96#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
97    stmia r0, {r4-r11, ip}
98#ifdef CONFIG_FPU_SHARING
99    /* Assess whether switched-out thread had been using the FP registers. */
100    tst lr, #_EXC_RETURN_FTYPE_Msk
101    bne .L_out_fp_endif
102
103    /* FP context active: set FP state and store callee-saved registers.
104     * Note: if Lazy FP stacking is enabled, storing the callee-saved
105     * registers will automatically trigger FP state preservation in
106     * the thread's stack. This will also clear the FPCCR.LSPACT flag.
107     */
108    add r0, r2, #_thread_offset_to_preempt_float
109    vstmia r0, {s16-s31}
110
111.L_out_fp_endif:
112    /* At this point FPCCR.LSPACT is guaranteed to be cleared,
113     * regardless of whether the thread has an active FP context.
114     */
115#endif /* CONFIG_FPU_SHARING */
116#else
117#error Unknown ARM architecture
118#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
119
120    /* Protect the kernel state while we play with the thread lists */
121#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
122    cpsid i
123#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
124    movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
125    msr BASEPRI_MAX, r0
126    isb /* Make the effect of disabling interrupts be realized immediately */
127#else
128#error Unknown ARM architecture
129#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
130
131    /*
132     * Prepare to clear PendSV with interrupts unlocked, but
133     * don't clear it yet. PendSV must not be cleared until
134     * the new thread is context-switched in since all decisions
135     * to pend PendSV have been taken with the current kernel
136     * state and this is what we're handling currently.
137     */
138    ldr r7, =_SCS_ICSR
139    ldr r6, =_SCS_ICSR_UNPENDSV
140
141    /* _kernel is still in r1 */
142
143    /* fetch the thread to run from the ready queue cache */
144    ldr r2, [r1, #_kernel_offset_to_ready_q_cache]
145
146    str r2, [r1, #_kernel_offset_to_current]
147
148    /*
149     * Clear PendSV so that if another interrupt comes in and
150     * decides, with the new kernel state based on the new thread
151     * being context-switched in, that it needs to reschedule, it
152     * will take, but that previously pended PendSVs do not take,
153     * since they were based on the previous kernel state and this
154     * has been handled.
155     */
156
157    /* _SCS_ICSR is still in r7 and _SCS_ICSR_UNPENDSV in r6 */
158    str r6, [r7, #0]
159
160#if defined(CONFIG_THREAD_LOCAL_STORAGE)
161    /* Grab the TLS pointer */
162#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
163    ldr r4, =_thread_offset_to_tls
164    adds r4, r2, r4
165    ldr r0, [r4]
166#else
167    ldr r0, [r2, #_thread_offset_to_tls]
168#endif
169
170    /* For Cortex-M, store TLS pointer in a global variable,
171     * as it lacks the process ID or thread ID register
172     * to be used by toolchain to access thread data.
173     */
174    ldr r4, =z_arm_tls_ptr
175    str r0, [r4]
176#endif
177
178#if defined(CONFIG_ARM_STORE_EXC_RETURN)
179    /* Restore EXC_RETURN value. */
180    ldrsb lr, [r2, #_thread_offset_to_mode_exc_return]
181#endif
182
183    /* Restore previous interrupt disable state (irq_lock key)
184     * (We clear the arch.basepri field after restoring state)
185     */
186#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && (_thread_offset_to_basepri > 124)
187    /* Doing it this way since the offset to thread->arch.basepri can in
188     * some configurations be larger than the maximum of 124 for ldr/str
189     * immediate offsets.
190     */
191    ldr r4, =_thread_offset_to_basepri
192    adds r4, r2, r4
193
194    ldr r0, [r4]
195    movs.n r3, #0
196    str r3, [r4]
197#else
198    ldr r0, [r2, #_thread_offset_to_basepri]
199    movs r3, #0
200    str r3, [r2, #_thread_offset_to_basepri]
201#endif
202
203#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
204    /* BASEPRI not available, previous interrupt disable state
205     * maps to PRIMASK.
206     *
207     * Only enable interrupts if value is 0, meaning interrupts
208     * were enabled before irq_lock was called.
209     */
210    cmp r0, #0
211    bne .L_thread_irq_disabled
212    cpsie i
213.L_thread_irq_disabled:
214
215#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
216    /* Re-program dynamic memory map */
217    push {r2,lr}
218    mov r0, r2
219    bl z_arm_configure_dynamic_mpu_regions
220    pop {r2,r3}
221    mov lr, r3
222#endif
223
224#ifdef CONFIG_USERSPACE
225    /* restore mode */
226    ldr r3, =_thread_offset_to_mode
227    adds r3, r2, r3
228    ldr r0, [r3]
229    mrs r3, CONTROL
230    movs.n r1, #1
231    bics r3, r1
232    orrs r3, r0
233    msr CONTROL, r3
234
235    /* ISB is not strictly necessary here (stack pointer is not being
236     * touched), but it's recommended to avoid executing pre-fetched
237     * instructions with the previous privilege.
238     */
239    isb
240
241#endif
242
243    ldr r4, =_thread_offset_to_callee_saved
244    adds r0, r2, r4
245
246    /* restore r4-r12 for new thread */
247    /* first restore r8-r12 located after r4-r7 (4*4bytes) */
248    adds r0, #16
249    ldmia r0!, {r3-r7}
250    /* move to correct registers */
251    mov r8, r3
252    mov r9, r4
253    mov r10, r5
254    mov r11, r6
255    mov ip, r7
256    /* restore r4-r7, go back 9*4 bytes to the start of the stored block */
257    subs r0, #36
258    ldmia r0!, {r4-r7}
259#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
260    /* restore BASEPRI for the incoming thread */
261    msr BASEPRI, r0
262
263#ifdef CONFIG_FPU_SHARING
264    /* Assess whether switched-in thread had been using the FP registers. */
265    tst lr, #_EXC_RETURN_FTYPE_Msk
266    beq .L_in_fp_active
267    /* FP context inactive for swapped-in thread:
268     * - reset FPSCR to 0
269     * - set EXC_RETURN.F_Type (prevents FP frame un-stacking when returning
270     *   from pendSV)
271     */
272    movs.n r3, #0
273    vmsr fpscr, r3
274    b .L_in_fp_endif
275
276.L_in_fp_active:
277    /* FP context active:
278     * - clear EXC_RETURN.F_Type
279     * - FPSCR and caller-saved registers will be restored automatically
280     * - restore callee-saved FP registers
281     */
282    add r0, r2, #_thread_offset_to_preempt_float
283    vldmia r0, {s16-s31}
284.L_in_fp_endif:
285    /* Clear CONTROL.FPCA that may have been set by FP instructions */
286    mrs r3, CONTROL
287    bic r3, #_CONTROL_FPCA_Msk
288    msr CONTROL, r3
289    isb
290#endif
291
292#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
293    /* Re-program dynamic memory map */
294    push {r2,lr}
295    mov r0, r2 /* _current thread */
296    bl z_arm_configure_dynamic_mpu_regions
297    pop {r2,lr}
298#endif
299
300#ifdef CONFIG_USERSPACE
301    /* restore mode */
302    ldr r0, [r2, #_thread_offset_to_mode]
303    mrs r3, CONTROL
304    bic r3, #1
305    orr r3, r0
306    msr CONTROL, r3
307
308    /* ISB is not strictly necessary here (stack pointer is not being
309     * touched), but it's recommended to avoid executing pre-fetched
310     * instructions with the previous privilege.
311     */
312    isb
313
314#endif
315
316    /* load callee-saved + psp from thread */
317    add r0, r2, #_thread_offset_to_callee_saved
318    ldmia r0, {r4-r11, ip}
319#else
320#error Unknown ARM architecture
321#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
322
323    msr PSP, ip
324
325#ifdef CONFIG_BUILTIN_STACK_GUARD
326    /* r2 contains k_thread */
327    add r0, r2, #0
328    push {r2, lr}
329    bl configure_builtin_stack_guard
330    pop {r2, lr}
331#endif /* CONFIG_BUILTIN_STACK_GUARD */
332
333#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
334    /* Register the context switch */
335    push {r0, lr}
336    bl z_thread_mark_switched_in
337#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
338    pop {r0, r1}
339    mov lr, r1
340#else
341    pop {r0, lr}
342#endif
343#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
344
345    /*
346     * Cortex-M: return from PendSV exception
347     */
348    bx lr
349
350/**
351 *
352 * @brief Service call handler
353 *
354 * The service call (svc) is used in the following occasions:
355 * - IRQ offloading
356 * - Kernel run-time exceptions
357 * - System Calls (User mode)
358 *
359 */
360SECTION_FUNC(TEXT, z_arm_svc)
361  /* Use EXC_RETURN state to find out if stack frame is on the
362   * MSP or PSP
363   */
364#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
365  movs r0, #_EXC_RETURN_SPSEL_Msk
366  mov r1, lr
367  tst r1, r0
368  beq .L_stack_frame_msp
369  mrs r0, PSP
370  bne .L_stack_frame_endif
371.L_stack_frame_msp:
372  mrs r0, MSP
373.L_stack_frame_endif:
374#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
375    tst lr, #_EXC_RETURN_SPSEL_Msk /* did we come from thread mode ? */
376    ite eq  /* if zero (equal), came from handler mode */
377        mrseq r0, MSP   /* handler mode, stack frame is on MSP */
378        mrsne r0, PSP   /* thread mode, stack frame is on PSP */
379#endif
380
381
382    /* Figure out what SVC call number was invoked */
383
384    ldr r1, [r0, #24]   /* grab address of PC from stack frame */
385    /* SVC is a two-byte instruction, point to it and read the
386     * SVC number (lower byte of SCV instruction)
387     */
388#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
389    subs r1, r1, #2
390    ldrb r1, [r1]
391#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
392    ldrb r1, [r1, #-2]
393#endif
394
395   /*
396    * grab service call number:
397    * 0: Unused
398    * 1: irq_offload (if configured)
399    * 2: kernel panic or oops (software generated fatal exception)
400    * 3: System call (if user mode supported)
401    */
402#if defined(CONFIG_USERSPACE)
403    mrs r2, CONTROL
404
405    cmp r1, #3
406    beq .L_do_syscall
407
408    /*
409     * check that we are privileged before invoking other SVCs
410     * oops if we are unprivileged
411     */
412#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
413    movs r3, #0x1
414    tst r2, r3
415#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
416    tst r2, #0x1
417#endif
418    bne .L_oops
419
420#endif /* CONFIG_USERSPACE */
421
422    cmp r1, #2
423    beq .L_oops
424
425#if defined(CONFIG_IRQ_OFFLOAD)
426    push {r0, lr}
427    bl z_irq_do_offload  /* call C routine which executes the offload */
428#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
429    pop {r0, r3}
430    mov lr, r3
431#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
432    pop {r0, lr}
433#endif
434
435    /* exception return is done in z_arm_int_exit() */
436    ldr r0, =z_arm_int_exit
437    bx r0
438
439#endif
440
441.L_oops:
442    push {r0, lr}
443#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
444#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
445    /* Build _callee_saved_t. To match the struct
446     * definition we push the psp & then r11-r4
447     */
448    mrs r1, PSP
449    push {r1, r2}
450    push {r4-r11}
451    mov  r1, sp /* pointer to _callee_saved_t */
452#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
453#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
454    mov r2, lr /* EXC_RETURN */
455    bl z_do_kernel_oops
456    /* return from SVC exception is done here */
457#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
458#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
459    /* We do not need to restore any register state here
460     * because we did not use any callee-saved registers
461     * in this routine. Therefore, we can just reset
462     * the MSP to its value prior to entering the function
463     */
464    add sp, #40
465#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
466#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
467    pop {r0, pc}
468
469#if defined(CONFIG_USERSPACE)
470    /*
471     * System call will setup a jump to the z_arm_do_syscall() function
472     * when the SVC returns via the bx lr.
473     *
474     * There is some trickery involved here because we have to preserve
475     * the original PC value so that we can return back to the caller of
476     * the SVC.
477     *
478     * On SVC exeption, the stack looks like the following:
479     * r0 - r1 - r2 - r3 - r12 - LR - PC - PSR
480     *
481     * Registers look like:
482     * r0 - arg1
483     * r1 - arg2
484     * r2 - arg3
485     * r3 - arg4
486     * r4 - arg5
487     * r5 - arg6
488     * r6 - call_id
489     * r8 - saved link register
490     */
491.L_do_syscall:
492#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
493    movs r3, #24
494    ldr r1, [r0, r3]   /* grab address of PC from stack frame */
495    mov r8, r1
496#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
497    ldr r8, [r0, #24]   /* grab address of PC from stack frame */
498#endif
499    ldr r1, =z_arm_do_syscall
500#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
501    str r1, [r0, r3]   /* overwrite the PC to point to z_arm_do_syscall */
502#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
503    str r1, [r0, #24]   /* overwrite the PC to point to z_arm_do_syscall */
504#endif
505
506#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
507    ldr r3, =K_SYSCALL_LIMIT
508    cmp r6, r3
509#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
510    /* validate syscall limit */
511    ldr ip, =K_SYSCALL_LIMIT
512    cmp r6, ip
513#endif
514    /* The supplied syscall_id must be lower than the limit
515     * (Requires unsigned integer comparison)
516     */
517    blo .L_valid_syscall_id
518
519    /* bad syscall id.  Set arg1 to bad id and set call_id to SYSCALL_BAD */
520    str r6, [r0]
521    ldr r6, =K_SYSCALL_BAD
522
523    /* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */
524
525.L_valid_syscall_id:
526    ldr r0, =_kernel
527    ldr r0, [r0, #_kernel_offset_to_current]
528#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
529    mov ip, r2
530    ldr r1, =_thread_offset_to_mode
531    ldr r3, [r0, r1]
532    movs r2, #1
533    bics r3, r2
534    /* Store (privileged) mode in thread's mode state variable */
535    str r3, [r0, r1]
536    mov r2, ip
537    dsb
538    /* set mode to privileged, r2 still contains value from CONTROL */
539    movs r3, #1
540    bics r2, r3
541#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
542    ldr r1, [r0, #_thread_offset_to_mode]
543    bic r1, #1
544    /* Store (privileged) mode in thread's mode state variable */
545    str r1, [r0, #_thread_offset_to_mode]
546    dsb
547    /* set mode to privileged, r2 still contains value from CONTROL */
548    bic r2, #1
549#endif
550    msr CONTROL, r2
551
552    /* ISB is not strictly necessary here (stack pointer is not being
553     * touched), but it's recommended to avoid executing pre-fetched
554     * instructions with the previous privilege.
555     */
556    isb
557
558#if defined(CONFIG_BUILTIN_STACK_GUARD)
559    /* Thread is now in privileged mode; after returning from SCVall it
560     * will use the default (user) stack before switching to the privileged
561     * stack to execute the system call. We need to protect the user stack
562     * against stack overflows until this stack transition.
563     */
564    ldr r1, [r0, #_thread_offset_to_stack_info_start]    /* stack_info.start */
565    msr PSPLIM, r1
566#endif /* CONFIG_BUILTIN_STACK_GUARD */
567
568    /* return from SVC to the modified LR - z_arm_do_syscall */
569    bx lr
570#endif /* CONFIG_USERSPACE */
571