1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
7 #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
8
9 #include <ksched.h>
10 #include <zephyr/spinlock.h>
11 #include <zephyr/sys/barrier.h>
12 #include <kernel_arch_func.h>
13
14 #ifdef CONFIG_STACK_SENTINEL
15 extern void z_check_stack_sentinel(void);
16 #else
17 #define z_check_stack_sentinel() /**/
18 #endif /* CONFIG_STACK_SENTINEL */
19
20 extern struct k_spinlock _sched_spinlock;
21
22 /* In SMP, the irq_lock() is a spinlock which is implicitly released
23 * and reacquired on context switch to preserve the existing
24 * semantics. This means that whenever we are about to return to a
25 * thread (via either z_swap() or interrupt/exception return!) we need
26 * to restore the lock state to whatever the thread's counter
27 * expects.
28 */
29 void z_smp_release_global_lock(struct k_thread *thread);
30
31 /* context switching and scheduling-related routines */
32 #ifdef CONFIG_USE_SWITCH
33
34 /* Spin, with the scheduler lock held (!), on a thread that is known
35 * (!!) to have released the lock and be on a path where it will
36 * deterministically (!!!) reach arch_switch() in very small constant
37 * time.
38 *
39 * This exists to treat an unavoidable SMP race when threads swap --
40 * their thread record is in the queue (and visible to other CPUs)
41 * before arch_switch() finishes saving state. We must spin for the
42 * switch handle before entering a new thread. See docs on
43 * arch_switch().
44 *
45 * Stated differently: there's a chicken and egg bug with the question
46 * of "is a thread running or not?". The thread needs to mark itself
47 * "not running" from its own context, but at that moment it obviously
48 * is still running until it reaches arch_switch()! Locking can't
49 * treat this because the scheduler lock can't be released by the
50 * switched-to thread, which is going to (obviously) be running its
51 * own code and doesn't know it was switched out.
52 */
z_sched_switch_spin(struct k_thread * thread)53 static inline void z_sched_switch_spin(struct k_thread *thread)
54 {
55 #ifdef CONFIG_SMP
56 volatile void **shp = (void *)&thread->switch_handle;
57
58 while (*shp == NULL) {
59 arch_spin_relax();
60 }
61 /* Read barrier: don't allow any subsequent loads in the
62 * calling code to reorder before we saw switch_handle go
63 * non-null.
64 */
65 barrier_dmem_fence_full();
66 #endif /* CONFIG_SMP */
67 }
68
69 /* New style context switching. arch_switch() is a lower level
70 * primitive that doesn't know about the scheduler or return value.
71 * Needed for SMP, where the scheduler requires spinlocking that we
72 * don't want to have to do in per-architecture assembly.
73 *
74 * Note that is_spinlock is a compile-time construct which will be
75 * optimized out when this function is expanded.
76 */
do_swap(unsigned int key,struct k_spinlock * lock,bool is_spinlock)77 static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
78 struct k_spinlock *lock,
79 bool is_spinlock)
80 {
81 struct k_thread *new_thread, *old_thread;
82
83 #ifdef CONFIG_SPIN_VALIDATE
84 /* Make sure the key acts to unmask interrupts, if it doesn't,
85 * then we are context switching out of a nested lock
86 * (i.e. breaking the lock of someone up the stack) which is
87 * forbidden! The sole exception are dummy threads used
88 * during initialization (where we start with interrupts
89 * masked and switch away to begin scheduling) and the case of
90 * a dead current thread that was just aborted (where the
91 * damage was already done by the abort anyway).
92 *
93 * (Note that this is disabled on ARM64, where system calls
94 * can sometimes run with interrupts masked in ways that don't
95 * represent lock state. See #35307)
96 */
97 # ifndef CONFIG_ARM64
98 __ASSERT(arch_irq_unlocked(key) ||
99 _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
100 "Context switching while holding lock!");
101 # endif /* CONFIG_ARM64 */
102 #endif /* CONFIG_SPIN_VALIDATE */
103
104 old_thread = _current;
105
106 z_check_stack_sentinel();
107
108 old_thread->swap_retval = -EAGAIN;
109
110 /* We always take the scheduler spinlock if we don't already
111 * have it. We "release" other spinlocks here. But we never
112 * drop the interrupt lock.
113 */
114 if (is_spinlock && lock != NULL && lock != &_sched_spinlock) {
115 k_spin_release(lock);
116 }
117 if (!is_spinlock || lock != &_sched_spinlock) {
118 (void) k_spin_lock(&_sched_spinlock);
119 }
120
121 new_thread = z_swap_next_thread();
122
123 if (new_thread != old_thread) {
124 z_sched_usage_switch(new_thread);
125
126 #ifdef CONFIG_SMP
127 new_thread->base.cpu = arch_curr_cpu()->id;
128
129 if (!is_spinlock) {
130 z_smp_release_global_lock(new_thread);
131 }
132 #endif /* CONFIG_SMP */
133 z_thread_mark_switched_out();
134 z_sched_switch_spin(new_thread);
135 z_current_thread_set(new_thread);
136
137 #ifdef CONFIG_TIMESLICING
138 z_reset_time_slice(new_thread);
139 #endif /* CONFIG_TIMESLICING */
140
141 #ifdef CONFIG_SPIN_VALIDATE
142 z_spin_lock_set_owner(&_sched_spinlock);
143 #endif /* CONFIG_SPIN_VALIDATE */
144
145 arch_cohere_stacks(old_thread, NULL, new_thread);
146
147 #ifdef CONFIG_SMP
148 /* Now add _current back to the run queue, once we are
149 * guaranteed to reach the context switch in finite
150 * time. See z_sched_switch_spin().
151 */
152 z_requeue_current(old_thread);
153 #endif /* CONFIG_SMP */
154 void *newsh = new_thread->switch_handle;
155
156 if (IS_ENABLED(CONFIG_SMP)) {
157 /* Active threads must have a null here. And
158 * it must be seen before the scheduler lock
159 * is released!
160 */
161 new_thread->switch_handle = NULL;
162 barrier_dmem_fence_full(); /* write barrier */
163 }
164 k_spin_release(&_sched_spinlock);
165 arch_switch(newsh, &old_thread->switch_handle);
166 } else {
167 k_spin_release(&_sched_spinlock);
168 }
169
170 if (is_spinlock) {
171 arch_irq_unlock(key);
172 } else {
173 irq_unlock(key);
174 }
175
176 return _current->swap_retval;
177 }
178
z_swap_irqlock(unsigned int key)179 static inline int z_swap_irqlock(unsigned int key)
180 {
181 return do_swap(key, NULL, false);
182 }
183
z_swap(struct k_spinlock * lock,k_spinlock_key_t key)184 static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
185 {
186 return do_swap(key.key, lock, true);
187 }
188
z_swap_unlocked(void)189 static inline void z_swap_unlocked(void)
190 {
191 (void) do_swap(arch_irq_lock(), NULL, true);
192 }
193
194 #else /* !CONFIG_USE_SWITCH */
195
196 extern int arch_swap(unsigned int key);
197
z_sched_switch_spin(struct k_thread * thread)198 static inline void z_sched_switch_spin(struct k_thread *thread)
199 {
200 ARG_UNUSED(thread);
201 }
202
z_swap_irqlock(unsigned int key)203 static inline int z_swap_irqlock(unsigned int key)
204 {
205 int ret;
206 z_check_stack_sentinel();
207 ret = arch_swap(key);
208 return ret;
209 }
210
211 /* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
212 * can't be in SMP. The k_spin_release() call is just for validation
213 * handling.
214 */
z_swap(struct k_spinlock * lock,k_spinlock_key_t key)215 static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
216 {
217 k_spin_release(lock);
218 return z_swap_irqlock(key.key);
219 }
220
z_swap_unlocked(void)221 static inline void z_swap_unlocked(void)
222 {
223 (void) z_swap_irqlock(arch_irq_lock());
224 }
225
226 #endif /* !CONFIG_USE_SWITCH */
227
228 /**
229 * Set up a "dummy" thread, used at early initialization to launch the
230 * first thread on a CPU.
231 *
232 * Needs to set enough fields such that the context switching code can
233 * use it to properly store state, which will just be discarded.
234 *
235 * The memory of the dummy thread can be completely uninitialized.
236 */
237 void z_dummy_thread_init(struct k_thread *dummy_thread);
238
239 #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
240