1 /*
2 * Copyright (c) 2018 Linaro, Limited
3 * Copyright (c) 2023 Arm Limited
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/kernel.h>
9 #include <kernel_internal.h>
10
11 #include <errno.h>
12
13 /* The 'key' actually represents the BASEPRI register
14 * prior to disabling interrupts via the BASEPRI mechanism.
15 *
16 * arch_swap() itself does not do much.
17 *
18 * It simply stores the intlock key (the BASEPRI value) parameter into
19 * current->basepri, and then triggers a PendSV exception, which does
20 * the heavy lifting of context switching.
21
22 * This is the only place we have to save BASEPRI since the other paths to
23 * z_arm_pendsv all come from handling an interrupt, which means we know the
24 * interrupts were not locked: in that case the BASEPRI value is 0.
25 *
26 * Given that arch_swap() is called to effect a cooperative context switch,
27 * only the caller-saved integer registers need to be saved in the thread of the
28 * outgoing thread. This is all performed by the hardware, which stores it in
29 * its exception stack frame, created when handling the z_arm_pendsv exception.
30 *
31 * On ARMv6-M, the intlock key is represented by the PRIMASK register,
32 * as BASEPRI is not available.
33 */
arch_swap(unsigned int key)34 int arch_swap(unsigned int key)
35 {
36 /* store off key and return value */
37 _current->arch.basepri = key;
38 _current->arch.swap_return_value = -EAGAIN;
39
40 /* set pending bit to make sure we will take a PendSV exception */
41 SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
42
43 /* clear mask or enable all irqs to take a pendsv */
44 irq_unlock(0);
45
46 /* Context switch is performed here. Returning implies the
47 * thread has been context-switched-in again.
48 */
49 return _current->arch.swap_return_value;
50 }
51
z_arm_pendsv_c(uintptr_t exc_ret)52 uintptr_t z_arm_pendsv_c(uintptr_t exc_ret)
53 {
54 /* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
55 IF_ENABLED(CONFIG_ARM_STORE_EXC_RETURN,
56 (_kernel.cpus[0].current->arch.mode_exc_return = (uint8_t)exc_ret;));
57
58 /* Protect the kernel state while we play with the thread lists */
59 uint32_t basepri = arch_irq_lock();
60
61 /* fetch the thread to run from the ready queue cache */
62 struct k_thread *current = _kernel.cpus[0].current = _kernel.ready_q.cache;
63
64 /*
65 * Clear PendSV so that if another interrupt comes in and
66 * decides, with the new kernel state based on the new thread
67 * being context-switched in, that it needs to reschedule, it
68 * will take, but that previously pended PendSVs do not take,
69 * since they were based on the previous kernel state and this
70 * has been handled.
71 */
72 SCB->ICSR = SCB_ICSR_PENDSVCLR_Msk;
73
74 /* For Cortex-M, store TLS pointer in a global variable,
75 * as it lacks the process ID or thread ID register
76 * to be used by toolchain to access thread data.
77 */
78 IF_ENABLED(CONFIG_THREAD_LOCAL_STORAGE,
79 (extern uintptr_t z_arm_tls_ptr; z_arm_tls_ptr = current->tls));
80
81 IF_ENABLED(CONFIG_ARM_STORE_EXC_RETURN,
82 (exc_ret = (exc_ret & 0xFFFFFF00) | current->arch.mode_exc_return));
83
84 /* Restore previous interrupt disable state (irq_lock key)
85 * (We clear the arch.basepri field after restoring state)
86 */
87 basepri = current->arch.basepri;
88 current->arch.basepri = 0;
89
90 arch_irq_unlock(basepri);
91
92 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
93 /* Re-program dynamic memory map */
94 z_arm_configure_dynamic_mpu_regions(current);
95 #endif
96
97 /* restore mode */
98 IF_ENABLED(CONFIG_USERSPACE, ({
99 CONTROL_Type ctrl = {.w = __get_CONTROL()};
100 /* exit privileged state when returning to thread mode. */
101 ctrl.b.nPRIV = 0;
102 /* __set_CONTROL inserts an ISB which is may not be necessary here
103 * (stack pointer may not be touched), but it's recommended to avoid
104 * executing pre-fetched instructions with the previous privilege.
105 */
106 __set_CONTROL(ctrl.w | current->arch.mode);
107 }));
108
109 return exc_ret;
110 }
111