1/* 2 * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved. 3 * SPDX-License-Identifier: Apache-2.0 4 */ 5 6/** 7 * @file 8 * @brief Thread context switching for ARM Cortex-A and Cortex-R (AArch32) 9 * 10 * This module implements the routines necessary for thread context switching 11 * on ARM Cortex-A and Cortex-R CPUs. 12 */ 13 14#include <zephyr/toolchain.h> 15#include <zephyr/linker/sections.h> 16#include <zephyr/arch/cpu.h> 17#include <offsets_short.h> 18#include <zephyr/kernel.h> 19#include "macro_priv.inc" 20 21_ASM_FILE_PROLOGUE 22 23GTEXT(z_arm_svc) 24GTEXT(z_arm_context_switch) 25GTEXT(z_do_kernel_oops) 26GTEXT(z_arm_do_syscall) 27 28/* 29 * Routine to handle context switches 30 * 31 * This function is directly called either by _isr_wrapper() in case of 32 * preemption, or arch_switch() in case of cooperative switching. 33 * 34 * void z_arm_context_switch(struct k_thread *new, struct k_thread *old); 35 */ 36SECTION_FUNC(TEXT, z_arm_context_switch) 37 38 ldr r2, =_thread_offset_to_callee_saved 39 add r2, r1, r2 40 41 stm r2, {r4-r11, sp, lr} 42 43 /* save current thread's exception depth */ 44 get_cpu r2 45 ldrb r3, [r2, #_cpu_offset_to_exc_depth] 46 strb r3, [r1, #_thread_offset_to_exception_depth] 47 48 /* retrieve next thread's exception depth */ 49 ldrb r3, [r0, #_thread_offset_to_exception_depth] 50 strb r3, [r2, #_cpu_offset_to_exc_depth] 51 52 /* save old thread into switch handle which is required by 53 * z_sched_switch_spin(). 54 * 55 * Note that this step must be done after all relevant state is 56 * saved. 57 */ 58 dsb 59 str r1, [r1, #___thread_t_switch_handle_OFFSET] 60 61#if defined(CONFIG_THREAD_LOCAL_STORAGE) 62 /* Grab the TLS pointer */ 63 ldr r3, [r0, #_thread_offset_to_tls] 64 65 /* Store TLS pointer in the "Process ID" register. 66 * This register is used as a base pointer to all 67 * thread variables with offsets added by toolchain. 68 */ 69 mcr 15, 0, r3, c13, c0, 2 70#endif 71 72 ldr r2, =_thread_offset_to_callee_saved 73 add r2, r0, r2 74 ldm r2, {r4-r11, sp, lr} 75 76#if defined (CONFIG_ARM_MPU) 77 /* Re-program dynamic memory map */ 78 push {r0, lr} 79 bl z_arm_configure_dynamic_mpu_regions 80 pop {r0, lr} 81#endif 82 83#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING 84 push {lr} 85 bl z_thread_mark_switched_in 86 pop {lr} 87#endif 88 89 bx lr 90 91/** 92 * 93 * @brief Service call handler 94 * 95 * The service call (svc) is used in the following occasions: 96 * - Cooperative context switching 97 * - IRQ offloading 98 * - Kernel run-time exceptions 99 * 100 */ 101SECTION_FUNC(TEXT, z_arm_svc) 102 z_arm_cortex_ar_enter_exc 103 104 /* Get SVC number */ 105 cps #MODE_SVC 106 mrs r0, spsr 107 tst r0, #0x20 108 ldreq r1, [lr, #-4] 109 biceq r1, #0xff000000 110 beq demux 111 112 ldr r1, [lr, #-2] 113 and r1, #0xff 114 115 /* 116 * grab service call number: 117 * TODO 0: context switch 118 * 1: irq_offload (if configured) 119 * 2: kernel panic or oops (software generated fatal exception) 120 * TODO 3: system calls for memory protection 121 */ 122demux: 123 cps #MODE_SYS 124 125 cmp r1, #_SVC_CALL_RUNTIME_EXCEPT 126 beq _oops 127 128#ifdef CONFIG_IRQ_OFFLOAD 129 cmp r1, #_SVC_CALL_IRQ_OFFLOAD 130 beq offload 131 b inv 132offload: 133 get_cpu r2 134 ldr r3, [r2, #___cpu_t_nested_OFFSET] 135 add r3, r3, #1 136 str r3, [r2, #___cpu_t_nested_OFFSET] 137 138 /* If not nested: switch to IRQ stack and save current sp on it. */ 139 cmp r3, #1 140 bhi 1f 141 mov r0, sp 142 cps #MODE_IRQ 143 push {r0} 144 1451: 146 blx z_irq_do_offload 147 b z_arm_cortex_ar_irq_done 148#endif 149 b inv 150 151_oops: 152 /* 153 * Pass the exception frame to z_do_kernel_oops. 154 */ 155 mov r0, sp 156 /* Zero callee_regs and exc_return (only used on Cortex-M) */ 157 mov r1, #0 158 mov r2, #0 159 bl z_do_kernel_oops 160 161inv: 162 mov r0, #0 /* K_ERR_CPU_EXCEPTION */ 163 mov r1, sp 164 bl z_arm_fatal_error 165 166 /* Return here only in case of recoverable error */ 167 b z_arm_cortex_ar_exit_exc 168