1/*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 * Copyright (c) 2017-2019 Nordic Semiconductor ASA.
4 * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9/**
10 * @file
11 * @brief Thread context switching for ARM Cortex-M and Cortex-R
12 *
13 * This module implements the routines necessary for thread context switching
14 * on ARM Cortex-M and Cortex-R CPUs.
15 */
16
17#include <toolchain.h>
18#include <linker/sections.h>
19#include <offsets_short.h>
20#include <arch/cpu.h>
21#include <syscall.h>
22
23#if defined(CONFIG_CPU_CORTEX_M)
24#include <arch/arm/aarch32/cortex_m/cpu.h>
25#endif
26_ASM_FILE_PROLOGUE
27
28GTEXT(z_arm_svc)
29GTEXT(z_arm_pendsv)
30GTEXT(z_do_kernel_oops)
31#if defined(CONFIG_USERSPACE)
32GTEXT(z_arm_do_syscall)
33#endif
34
35GDATA(_kernel)
36
37#if defined(CONFIG_THREAD_LOCAL_STORAGE) && defined(CONFIG_CPU_CORTEX_M)
38GDATA(z_arm_tls_ptr)
39#endif
40
41/**
42 *
43 * @brief PendSV exception handler, handling context switches
44 *
45 * The PendSV exception is the only execution context in the system that can
46 * perform context switching. When an execution context finds out it has to
47 * switch contexts, it pends the PendSV exception.
48 *
49 * When PendSV is pended, the decision that a context switch must happen has
50 * already been taken. In other words, when z_arm_pendsv() runs, we *know* we
51 * have to swap *something*.
52 *
53 * For Cortex-M, z_arm_pendsv() is invoked with no arguments.
54 *
55 * For Cortex-R, PendSV exception is not supported by the architecture and this
56 * function is directly called either by z_arm_{exc,int}_exit in case of
57 * preemption, or z_arm_svc in case of cooperative switching.
58 */
59
60SECTION_FUNC(TEXT, z_arm_pendsv)
61
62#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
63    /* Register the context switch */
64    push {r0, lr}
65    bl z_thread_mark_switched_out
66#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
67    pop {r0, r1}
68    mov lr, r1
69#else
70    pop {r0, lr}
71#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
72#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
73
74    /* load _kernel into r1 and current k_thread into r2 */
75    ldr r1, =_kernel
76    ldr r2, [r1, #_kernel_offset_to_current]
77
78#if defined(CONFIG_ARM_STORE_EXC_RETURN)
79    /* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
80    strb lr, [r2, #_thread_offset_to_mode_exc_return]
81#endif
82
83    /* addr of callee-saved regs in thread in r0 */
84    ldr r0, =_thread_offset_to_callee_saved
85    add r0, r2
86
87    /* save callee-saved + psp in thread */
88#if defined(CONFIG_CPU_CORTEX_M)
89    mrs ip, PSP
90#endif
91
92#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
93    /* Store current r4-r7 */
94    stmea r0!, {r4-r7}
95    /* copy r8-r12 into r3-r7 */
96    mov r3, r8
97    mov r4, r9
98    mov r5, r10
99    mov r6, r11
100    mov r7, ip
101    /* store r8-12 */
102    stmea r0!, {r3-r7}
103#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
104    stmia r0, {v1-v8, ip}
105#ifdef CONFIG_FPU_SHARING
106    /* Assess whether switched-out thread had been using the FP registers. */
107    tst lr, #_EXC_RETURN_FTYPE_Msk
108    bne out_fp_endif
109
110    /* FP context active: set FP state and store callee-saved registers.
111     * Note: if Lazy FP stacking is enabled, storing the callee-saved
112     * registers will automatically trigger FP state preservation in
113     * the thread's stack. This will also clear the FPCCR.LSPACT flag.
114     */
115    add r0, r2, #_thread_offset_to_preempt_float
116    vstmia r0, {s16-s31}
117
118out_fp_endif:
119    /* At this point FPCCR.LSPACT is guaranteed to be cleared,
120     * regardless of whether the thread has an active FP context.
121     */
122#endif /* CONFIG_FPU_SHARING */
123#elif defined(CONFIG_ARMV7_R)
124    /* Store rest of process context */
125    cps #MODE_SYS
126    stm r0, {r4-r11, sp}
127    cps #MODE_SVC
128#else
129#error Unknown ARM architecture
130#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
131
132    /* Protect the kernel state while we play with the thread lists */
133#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
134    cpsid i
135#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
136    movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
137    msr BASEPRI_MAX, r0
138    isb /* Make the effect of disabling interrupts be realized immediately */
139#elif defined(CONFIG_ARMV7_R)
140    /*
141     * Interrupts are still disabled from arch_swap so empty clause
142     * here to avoid the preprocessor error below
143     */
144#else
145#error Unknown ARM architecture
146#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
147
148    /*
149     * Prepare to clear PendSV with interrupts unlocked, but
150     * don't clear it yet. PendSV must not be cleared until
151     * the new thread is context-switched in since all decisions
152     * to pend PendSV have been taken with the current kernel
153     * state and this is what we're handling currently.
154     */
155#if defined(CONFIG_CPU_CORTEX_M)
156    ldr v4, =_SCS_ICSR
157    ldr v3, =_SCS_ICSR_UNPENDSV
158#endif
159
160    /* _kernel is still in r1 */
161
162    /* fetch the thread to run from the ready queue cache */
163    ldr r2, [r1, #_kernel_offset_to_ready_q_cache]
164
165    str r2, [r1, #_kernel_offset_to_current]
166
167    /*
168     * Clear PendSV so that if another interrupt comes in and
169     * decides, with the new kernel state based on the new thread
170     * being context-switched in, that it needs to reschedule, it
171     * will take, but that previously pended PendSVs do not take,
172     * since they were based on the previous kernel state and this
173     * has been handled.
174     */
175
176    /* _SCS_ICSR is still in v4 and _SCS_ICSR_UNPENDSV in v3 */
177#if defined(CONFIG_CPU_CORTEX_M)
178    str v3, [v4, #0]
179#endif
180
181#if defined(CONFIG_THREAD_LOCAL_STORAGE)
182    /* Grab the TLS pointer */
183    ldr r4, =_thread_offset_to_tls
184    adds r4, r2, r4
185    ldr r0, [r4]
186
187#if defined(CONFIG_CPU_CORTEX_R)
188    /* Store TLS pointer in the "Process ID" register.
189     * This register is used as a base pointer to all
190     * thread variables with offsets added by toolchain.
191     */
192    mcr 15, 0, r0, cr13, cr0, 3
193#endif
194
195#if defined(CONFIG_CPU_CORTEX_M)
196    /* For Cortex-M, store TLS pointer in a global variable,
197     * as it lacks the process ID or thread ID register
198     * to be used by toolchain to access thread data.
199     */
200    ldr r4, =z_arm_tls_ptr
201    str r0, [r4]
202#endif
203
204#endif
205
206#if defined(CONFIG_ARM_STORE_EXC_RETURN)
207    /* Restore EXC_RETURN value. */
208    ldrsb lr, [r2, #_thread_offset_to_mode_exc_return]
209#endif
210
211    /* Restore previous interrupt disable state (irq_lock key)
212     * (We clear the arch.basepri field after restoring state)
213     */
214#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && (_thread_offset_to_basepri > 124)
215    /* Doing it this way since the offset to thread->arch.basepri can in
216     * some configurations be larger than the maximum of 124 for ldr/str
217     * immediate offsets.
218     */
219    ldr r4, =_thread_offset_to_basepri
220    adds r4, r2, r4
221
222    ldr r0, [r4]
223    movs.n r3, #0
224    str r3, [r4]
225#else
226    ldr r0, [r2, #_thread_offset_to_basepri]
227    movs r3, #0
228    str r3, [r2, #_thread_offset_to_basepri]
229#endif
230
231#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
232    /* BASEPRI not available, previous interrupt disable state
233     * maps to PRIMASK.
234     *
235     * Only enable interrupts if value is 0, meaning interrupts
236     * were enabled before irq_lock was called.
237     */
238    cmp r0, #0
239    bne _thread_irq_disabled
240    cpsie i
241_thread_irq_disabled:
242
243#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
244    /* Re-program dynamic memory map */
245    push {r2,lr}
246    mov r0, r2
247    bl z_arm_configure_dynamic_mpu_regions
248    pop {r2,r3}
249    mov lr, r3
250#endif
251
252#ifdef CONFIG_USERSPACE
253    /* restore mode */
254    ldr r3, =_thread_offset_to_mode
255    adds r3, r2, r3
256    ldr r0, [r3]
257    mrs r3, CONTROL
258    movs.n r1, #1
259    bics r3, r1
260    orrs r3, r0
261    msr CONTROL, r3
262
263    /* ISB is not strictly necessary here (stack pointer is not being
264     * touched), but it's recommended to avoid executing pre-fetched
265     * instructions with the previous privilege.
266     */
267    isb
268
269#endif
270
271    ldr r4, =_thread_offset_to_callee_saved
272    adds r0, r2, r4
273
274    /* restore r4-r12 for new thread */
275    /* first restore r8-r12 located after r4-r7 (4*4bytes) */
276    adds r0, #16
277    ldmia r0!, {r3-r7}
278    /* move to correct registers */
279    mov r8, r3
280    mov r9, r4
281    mov r10, r5
282    mov r11, r6
283    mov ip, r7
284    /* restore r4-r7, go back 9*4 bytes to the start of the stored block */
285    subs r0, #36
286    ldmia r0!, {r4-r7}
287#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
288    /* restore BASEPRI for the incoming thread */
289    msr BASEPRI, r0
290
291#ifdef CONFIG_FPU_SHARING
292    /* Assess whether switched-in thread had been using the FP registers. */
293    tst lr, #_EXC_RETURN_FTYPE_Msk
294    beq in_fp_active
295    /* FP context inactive for swapped-in thread:
296     * - reset FPSCR to 0
297     * - set EXC_RETURN.F_Type (prevents FP frame un-stacking when returning
298     *   from pendSV)
299     */
300    movs.n r3, #0
301    vmsr fpscr, r3
302    b in_fp_endif
303
304in_fp_active:
305    /* FP context active:
306     * - clear EXC_RETURN.F_Type
307     * - FPSCR and caller-saved registers will be restored automatically
308     * - restore callee-saved FP registers
309     */
310    add r0, r2, #_thread_offset_to_preempt_float
311    vldmia r0, {s16-s31}
312in_fp_endif:
313    /* Clear CONTROL.FPCA that may have been set by FP instructions */
314    mrs r3, CONTROL
315    bic r3, #_CONTROL_FPCA_Msk
316    msr CONTROL, r3
317    isb
318#endif
319
320#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
321    /* Re-program dynamic memory map */
322    push {r2,lr}
323    mov r0, r2 /* _current thread */
324    bl z_arm_configure_dynamic_mpu_regions
325    pop {r2,lr}
326#endif
327
328#ifdef CONFIG_USERSPACE
329    /* restore mode */
330    ldr r0, [r2, #_thread_offset_to_mode]
331    mrs r3, CONTROL
332    bic r3, #1
333    orr r3, r0
334    msr CONTROL, r3
335
336    /* ISB is not strictly necessary here (stack pointer is not being
337     * touched), but it's recommended to avoid executing pre-fetched
338     * instructions with the previous privilege.
339     */
340    isb
341
342#endif
343
344    /* load callee-saved + psp from thread */
345    add r0, r2, #_thread_offset_to_callee_saved
346    ldmia r0, {v1-v8, ip}
347#elif defined(CONFIG_ARMV7_R)
348_thread_irq_disabled:
349    /* load _kernel into r1 and current k_thread into r2 */
350    ldr r1, =_kernel
351    ldr r2, [r1, #_kernel_offset_to_current]
352
353    /* addr of callee-saved regs in thread in r0 */
354    ldr r0, =_thread_offset_to_callee_saved
355    add r0, r2
356
357    /* restore r4-r11 and sp for incoming thread */
358    cps #MODE_SYS
359    ldm r0, {r4-r11, sp}
360    cps #MODE_SVC
361
362#if defined (CONFIG_ARM_MPU)
363    /* r2 contains k_thread */
364    mov r0, r2
365    /* Re-program dynamic memory map */
366    push {r2, lr}
367    bl z_arm_configure_dynamic_mpu_regions
368    pop {r2, lr}
369#endif
370#else
371#error Unknown ARM architecture
372#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
373
374#if defined(CONFIG_CPU_CORTEX_M)
375    msr PSP, ip
376#endif
377
378#ifdef CONFIG_BUILTIN_STACK_GUARD
379    /* r2 contains k_thread */
380    add r0, r2, #0
381    push {r2, lr}
382    bl configure_builtin_stack_guard
383    pop {r2, lr}
384#endif /* CONFIG_BUILTIN_STACK_GUARD */
385
386#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
387    /* Register the context switch */
388    push {r0, lr}
389    bl z_thread_mark_switched_in
390#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
391    pop {r0, r1}
392    mov lr, r1
393#else
394    pop {r0, lr}
395#endif
396#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
397
398    /*
399     * Cortex-M: return from PendSV exception
400     * Cortex-R: return to the caller (z_arm_{exc,int}_exit, or z_arm_svc)
401     */
402    bx lr
403
404#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) || \
405  defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
406
407/**
408 *
409 * @brief Service call handler
410 *
411 * The service call (svc) is used in the following occasions:
412 * - IRQ offloading
413 * - Kernel run-time exceptions
414 * - System Calls (User mode)
415 *
416 * @return N/A
417 */
418SECTION_FUNC(TEXT, z_arm_svc)
419  /* Use EXC_RETURN state to find out if stack frame is on the
420   * MSP or PSP
421   */
422#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
423  movs r0, #_EXC_RETURN_SPSEL_Msk
424  mov r1, lr
425  tst r1, r0
426  beq _stack_frame_msp
427  mrs r0, PSP
428  bne _stack_frame_endif
429_stack_frame_msp:
430  mrs r0, MSP
431_stack_frame_endif:
432#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
433    tst lr, #_EXC_RETURN_SPSEL_Msk /* did we come from thread mode ? */
434    ite eq  /* if zero (equal), came from handler mode */
435        mrseq r0, MSP   /* handler mode, stack frame is on MSP */
436        mrsne r0, PSP   /* thread mode, stack frame is on PSP */
437#endif
438
439
440    /* Figure out what SVC call number was invoked */
441
442    ldr r1, [r0, #24]   /* grab address of PC from stack frame */
443    /* SVC is a two-byte instruction, point to it and read the
444     * SVC number (lower byte of SCV instruction)
445     */
446#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
447    subs r1, r1, #2
448    ldrb r1, [r1]
449#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
450    ldrb r1, [r1, #-2]
451#endif
452
453   /*
454    * grab service call number:
455    * 0: Unused
456    * 1: irq_offload (if configured)
457    * 2: kernel panic or oops (software generated fatal exception)
458    * 3: System call (if user mode supported)
459    */
460#if defined(CONFIG_USERSPACE)
461    mrs r2, CONTROL
462
463    cmp r1, #3
464    beq _do_syscall
465
466    /*
467     * check that we are privileged before invoking other SVCs
468     * oops if we are unprivileged
469     */
470#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
471    movs r3, #0x1
472    tst r2, r3
473#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
474    tst r2, #0x1
475#endif
476    bne _oops
477
478#endif /* CONFIG_USERSPACE */
479
480    cmp r1, #2
481    beq _oops
482
483#if defined(CONFIG_IRQ_OFFLOAD)
484    push {r0, lr}
485    bl z_irq_do_offload  /* call C routine which executes the offload */
486#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
487    pop {r0, r3}
488    mov lr, r3
489#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
490    pop {r0, lr}
491#endif
492
493    /* exception return is done in z_arm_int_exit() */
494    b z_arm_int_exit
495#endif
496
497_oops:
498    push {r0, lr}
499    bl z_do_kernel_oops
500    /* return from SVC exception is done here */
501    pop {r0, pc}
502
503#if defined(CONFIG_USERSPACE)
504    /*
505     * System call will setup a jump to the z_arm_do_syscall() function
506     * when the SVC returns via the bx lr.
507     *
508     * There is some trickery involved here because we have to preserve
509     * the original PC value so that we can return back to the caller of
510     * the SVC.
511     *
512     * On SVC exeption, the stack looks like the following:
513     * r0 - r1 - r2 - r3 - r12 - LR - PC - PSR
514     *
515     * Registers look like:
516     * r0 - arg1
517     * r1 - arg2
518     * r2 - arg3
519     * r3 - arg4
520     * r4 - arg5
521     * r5 - arg6
522     * r6 - call_id
523     * r8 - saved link register
524     */
525_do_syscall:
526#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
527    movs r3, #24
528    ldr r1, [r0, r3]   /* grab address of PC from stack frame */
529    mov r8, r1
530#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
531    ldr r8, [r0, #24]   /* grab address of PC from stack frame */
532#endif
533    ldr r1, =z_arm_do_syscall
534#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
535    str r1, [r0, r3]   /* overwrite the PC to point to z_arm_do_syscall */
536#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
537    str r1, [r0, #24]   /* overwrite the PC to point to z_arm_do_syscall */
538#endif
539
540#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
541    ldr r3, =K_SYSCALL_LIMIT
542    cmp r6, r3
543#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
544    /* validate syscall limit */
545    ldr ip, =K_SYSCALL_LIMIT
546    cmp r6, ip
547#endif
548    /* The supplied syscall_id must be lower than the limit
549     * (Requires unsigned integer comparison)
550     */
551    blo valid_syscall_id
552
553    /* bad syscall id.  Set arg1 to bad id and set call_id to SYSCALL_BAD */
554    str r6, [r0]
555    ldr r6, =K_SYSCALL_BAD
556
557    /* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */
558
559valid_syscall_id:
560    ldr r0, =_kernel
561    ldr r0, [r0, #_kernel_offset_to_current]
562#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
563    mov ip, r2
564    ldr r1, =_thread_offset_to_mode
565    ldr r3, [r0, r1]
566    movs r2, #1
567    bics r3, r2
568    /* Store (privileged) mode in thread's mode state variable */
569    str r3, [r0, r1]
570    mov r2, ip
571    dsb
572    /* set mode to privileged, r2 still contains value from CONTROL */
573    movs r3, #1
574    bics r2, r3
575#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
576    ldr r1, [r0, #_thread_offset_to_mode]
577    bic r1, #1
578    /* Store (privileged) mode in thread's mode state variable */
579    str r1, [r0, #_thread_offset_to_mode]
580    dsb
581    /* set mode to privileged, r2 still contains value from CONTROL */
582    bic r2, #1
583#endif
584    msr CONTROL, r2
585
586    /* ISB is not strictly necessary here (stack pointer is not being
587     * touched), but it's recommended to avoid executing pre-fetched
588     * instructions with the previous privilege.
589     */
590    isb
591
592#if defined(CONFIG_BUILTIN_STACK_GUARD)
593    /* Thread is now in privileged mode; after returning from SCVall it
594     * will use the default (user) stack before switching to the privileged
595     * stack to execute the system call. We need to protect the user stack
596     * against stack overflows until this stack transition.
597     */
598    ldr r1, [r0, #_thread_offset_to_stack_info_start]    /* stack_info.start */
599    msr PSPLIM, r1
600#endif /* CONFIG_BUILTIN_STACK_GUARD */
601
602    /* return from SVC to the modified LR - z_arm_do_syscall */
603    bx lr
604#endif /* CONFIG_USERSPACE */
605
606#elif defined(CONFIG_ARMV7_R)
607
608/**
609 *
610 * @brief Service call handler
611 *
612 * The service call (svc) is used in the following occasions:
613 * - Cooperative context switching
614 * - IRQ offloading
615 * - Kernel run-time exceptions
616 *
617 * @return N/A
618 */
619SECTION_FUNC(TEXT, z_arm_svc)
620#if defined(CONFIG_USERSPACE)
621    /* Determine if incoming thread was in user context */
622    push {r0}
623    mrs r0, spsr
624    and r0, #MODE_MASK
625    cmp r0, #MODE_USR
626    bne svc_system_thread
627
628    ldr r0, =_kernel
629    ldr r0, [r0, #_kernel_offset_to_current]
630
631    /* Save away user stack pointer */
632    cps #MODE_SYS
633    str sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */
634
635    /* Switch to privileged stack */
636    ldr sp, [r0, #_thread_offset_to_priv_stack_end] /* priv stack end */
637    cps #MODE_SVC
638
639svc_system_thread:
640    pop {r0}
641#endif
642
643    /*
644     * Switch to system mode to store r0-r3 to the process stack pointer.
645     * Save r12 and the lr as we could be swapping in another process and
646     * returning to a different location.
647     */
648    srsdb #MODE_SYS!
649    cps #MODE_SYS
650    push {r0-r3, r12, lr}
651    cps #MODE_SVC
652
653    /*
654     * Store lr_svc to the SVC mode stack. This value will be restored prior to
655     * exiting the SVC call in z_arm_int_exit.
656     */
657    push {lr}
658
659    /* Align stack at double-word boundary */
660    and r3, sp, #4
661    sub sp, sp, r3
662    push {r2, r3}
663
664    /* Increment interrupt nesting count */
665    ldr r2, =_kernel
666    ldr r0, [r2, #_kernel_offset_to_nested]
667    add r0, r0, #1
668    str r0, [r2, #_kernel_offset_to_nested]
669
670    /* Get SVC number */
671    mrs r0, spsr
672    tst r0, #0x20
673
674    ldreq r1, [lr, #-4]
675    biceq r1, #0xff000000
676    beq demux
677
678    ldr r1, [lr, #-2]
679    and r1, #0xff
680
681   /*
682    * grab service call number:
683    * 0: context switch
684    * 1: irq_offload (if configured)
685    * 2: kernel panic or oops (software generated fatal exception)
686    * 3: system calls for memory protection
687    */
688demux:
689
690#if defined(CONFIG_USERSPACE)
691    cmp r1, #_SVC_CALL_SYSTEM_CALL
692    beq _do_syscall
693#endif
694
695    cmp r1, #_SVC_CALL_CONTEXT_SWITCH
696    beq _context_switch
697
698    cmp r1, #_SVC_CALL_RUNTIME_EXCEPT
699    beq _oops
700
701#if CONFIG_IRQ_OFFLOAD
702    blx z_irq_do_offload  /* call C routine which executes the offload */
703
704    /* exception return is done in z_arm_int_exit() */
705    b z_arm_int_exit
706#endif
707
708_context_switch:
709    /* handler mode exit, to PendSV */
710    bl z_arm_pendsv
711
712    b z_arm_int_exit
713
714_oops:
715    /*
716     * Pass the exception frame to z_do_kernel_oops.  r0 contains the
717     * exception reason.
718     */
719    cps #MODE_SYS
720    mov r0, sp
721    cps #MODE_SVC
722    bl z_do_kernel_oops
723    b z_arm_int_exit
724
725#if defined(CONFIG_USERSPACE)
726    /*
727     * System call will setup a jump to the _do_arm_syscall function
728     * when the SVC returns via the bx lr.
729     *
730     * There is some trickery involved here because we have to preserve
731     * the original PC value so that we can return back to the caller of
732     * the SVC.
733     *
734     * On SVC exception, the USER/SYSTEM stack looks like the following:
735     *
736     * sp+0:   r0
737     * sp+4:   r1
738     * sp+8:   r2
739     * sp+12:  r3
740     * sp+16:  r12
741     * sp+20:  LR_svc (address of opcode just following SVC opcode )
742     *
743     * Registers look like:
744     * r0 - arg1
745     * r1 - arg2
746     * r2 - arg3
747     * r3 - arg4
748     * r4 - arg5
749     * r5 - arg6
750     * r6 - call_id
751     * r8 - saved link register
752     */
753_do_syscall:
754    /* validate syscall limit, only set priv mode if valid */
755    ldr ip, =K_SYSCALL_LIMIT
756    cmp r6, ip
757    blo valid_syscall_id
758
759    /* bad syscall id.  Set arg0 to bad id and set call_id to SYSCALL_BAD */
760    cps #MODE_SYS
761    str r6, [sp]
762    cps #MODE_SVC
763    ldr r6, =K_SYSCALL_BAD
764
765valid_syscall_id:
766    push {r0, r1}
767    ldr r0, =_kernel
768    ldr r0, [r0, #_kernel_offset_to_current]
769    ldr r1, [r0, #_thread_offset_to_mode]
770    bic r1, #1
771    /* Store (privileged) mode in thread's mode state variable */
772    str r1, [r0, #_thread_offset_to_mode]
773    dsb
774
775    /* ISB is not strictly necessary here (stack pointer is not being
776     * touched), but it's recommended to avoid executing pre-fetched
777     * instructions with the previous privilege.
778     */
779    isb
780
781    /*
782     * restore r0-r3 from supervisor stack before changing to system mode.
783     * r0,r1 saved just after valid_syscall_id
784     * r2,r3 saved just after z_arm_svc
785     */
786    pop {r0-r3}
787
788    add sp,sp,r3 /* un-do stack pointer alignment to double-word boundary */
789
790    /* Switch to system mode */
791    cps #MODE_SYS
792
793    /*
794     * Restore the nested level.  The thread that is doing the system call may
795     * be put to sleep, as in the case of waiting in k_msgq_get() with
796     * K_FOREVER, so we don't want the nesting level to be elevated during
797     * that complete time.
798     */
799    ldr r2, =_kernel
800    ldr r1, [r2, #_kernel_offset_to_nested]
801    sub r1, r1, #1
802    str r1, [r2, #_kernel_offset_to_nested]
803
804    /*
805     * restore r0-r3 from stack since we've used them above during demux
806     */
807    ldr r0, [sp, #0]
808    ldr r1, [sp, #4]
809    ldr r2, [sp, #8]
810    ldr r3, [sp, #12]
811
812    /*
813     * grab return address from USER/SYSTEM stack frame
814     * (just past the SVC opcode)
815     */
816    ldr r8, [sp, #20]
817
818    /*
819     * User stack left with:
820     *
821     * sp:    r0
822     * sp+4:  r1
823     * sp+8:  r2
824     * sp+12: r3
825     * sp+16: r12
826     * sp+20: LR_svc (address of opcode just following SVC opcode )
827     */
828
829    /* branch to _arm_do_syscall.  We will not return here. */
830    b z_arm_do_syscall
831#endif
832
833GTEXT(z_arm_cortex_r_svc)
834SECTION_FUNC(TEXT, z_arm_cortex_r_svc)
835    svc #_SVC_CALL_CONTEXT_SWITCH
836    bx lr
837
838#else
839#error Unknown ARM architecture
840#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
841