1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
7 #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
8 
9 #include <ksched.h>
10 #include <zephyr/spinlock.h>
11 #include <zephyr/sys/barrier.h>
12 #include <kernel_arch_func.h>
13 
14 #ifdef CONFIG_STACK_SENTINEL
15 extern void z_check_stack_sentinel(void);
16 #else
17 #define z_check_stack_sentinel() /**/
18 #endif /* CONFIG_STACK_SENTINEL */
19 
20 extern struct k_spinlock _sched_spinlock;
21 
22 /* In SMP, the irq_lock() is a spinlock which is implicitly released
23  * and reacquired on context switch to preserve the existing
24  * semantics.  This means that whenever we are about to return to a
25  * thread (via either z_swap() or interrupt/exception return!) we need
26  * to restore the lock state to whatever the thread's counter
27  * expects.
28  */
29 void z_smp_release_global_lock(struct k_thread *thread);
30 
31 /* context switching and scheduling-related routines */
32 #ifdef CONFIG_USE_SWITCH
33 
34 /* Spin, with the scheduler lock held (!), on a thread that is known
35  * (!!) to have released the lock and be on a path where it will
36  * deterministically (!!!) reach arch_switch() in very small constant
37  * time.
38  *
39  * This exists to treat an unavoidable SMP race when threads swap --
40  * their thread record is in the queue (and visible to other CPUs)
41  * before arch_switch() finishes saving state.  We must spin for the
42  * switch handle before entering a new thread.  See docs on
43  * arch_switch().
44  *
45  * Stated differently: there's a chicken and egg bug with the question
46  * of "is a thread running or not?".  The thread needs to mark itself
47  * "not running" from its own context, but at that moment it obviously
48  * is still running until it reaches arch_switch()!  Locking can't
49  * treat this because the scheduler lock can't be released by the
50  * switched-to thread, which is going to (obviously) be running its
51  * own code and doesn't know it was switched out.
52  */
z_sched_switch_spin(struct k_thread * thread)53 static inline void z_sched_switch_spin(struct k_thread *thread)
54 {
55 #ifdef CONFIG_SMP
56 	volatile void **shp = (void *)&thread->switch_handle;
57 
58 	while (*shp == NULL) {
59 		arch_spin_relax();
60 	}
61 	/* Read barrier: don't allow any subsequent loads in the
62 	 * calling code to reorder before we saw switch_handle go
63 	 * non-null.
64 	 */
65 	barrier_dmem_fence_full();
66 #endif /* CONFIG_SMP */
67 }
68 
69 /* New style context switching.  arch_switch() is a lower level
70  * primitive that doesn't know about the scheduler or return value.
71  * Needed for SMP, where the scheduler requires spinlocking that we
72  * don't want to have to do in per-architecture assembly.
73  *
74  * Note that is_spinlock is a compile-time construct which will be
75  * optimized out when this function is expanded.
76  */
do_swap(unsigned int key,struct k_spinlock * lock,bool is_spinlock)77 static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
78 					  struct k_spinlock *lock,
79 					  bool is_spinlock)
80 {
81 	struct k_thread *new_thread, *old_thread;
82 
83 #ifdef CONFIG_SPIN_VALIDATE
84 	/* Make sure the key acts to unmask interrupts, if it doesn't,
85 	 * then we are context switching out of a nested lock
86 	 * (i.e. breaking the lock of someone up the stack) which is
87 	 * forbidden!  The sole exception are dummy threads used
88 	 * during initialization (where we start with interrupts
89 	 * masked and switch away to begin scheduling) and the case of
90 	 * a dead current thread that was just aborted (where the
91 	 * damage was already done by the abort anyway).
92 	 *
93 	 * (Note that this is disabled on ARM64, where system calls
94 	 * can sometimes run with interrupts masked in ways that don't
95 	 * represent lock state.  See #35307)
96 	 */
97 # ifndef CONFIG_ARM64
98 	__ASSERT(arch_irq_unlocked(key) ||
99 		 arch_current_thread()->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
100 		 "Context switching while holding lock!");
101 # endif /* CONFIG_ARM64 */
102 #endif /* CONFIG_SPIN_VALIDATE */
103 
104 	old_thread = arch_current_thread();
105 
106 	z_check_stack_sentinel();
107 
108 	old_thread->swap_retval = -EAGAIN;
109 
110 	/* We always take the scheduler spinlock if we don't already
111 	 * have it.  We "release" other spinlocks here.  But we never
112 	 * drop the interrupt lock.
113 	 */
114 	if (is_spinlock && lock != NULL && lock != &_sched_spinlock) {
115 		k_spin_release(lock);
116 	}
117 	if (!is_spinlock || lock != &_sched_spinlock) {
118 		(void) k_spin_lock(&_sched_spinlock);
119 	}
120 
121 	new_thread = z_swap_next_thread();
122 
123 	if (new_thread != old_thread) {
124 		z_sched_usage_switch(new_thread);
125 
126 #ifdef CONFIG_SMP
127 		_current_cpu->swap_ok = 0;
128 		new_thread->base.cpu = arch_curr_cpu()->id;
129 
130 		if (!is_spinlock) {
131 			z_smp_release_global_lock(new_thread);
132 		}
133 #endif /* CONFIG_SMP */
134 		z_thread_mark_switched_out();
135 		z_sched_switch_spin(new_thread);
136 		arch_current_thread_set(new_thread);
137 
138 #ifdef CONFIG_TIMESLICING
139 		z_reset_time_slice(new_thread);
140 #endif /* CONFIG_TIMESLICING */
141 
142 #ifdef CONFIG_SPIN_VALIDATE
143 		z_spin_lock_set_owner(&_sched_spinlock);
144 #endif /* CONFIG_SPIN_VALIDATE */
145 
146 		arch_cohere_stacks(old_thread, NULL, new_thread);
147 
148 #ifdef CONFIG_SMP
149 		/* Now add arch_current_thread() back to the run queue, once we are
150 		 * guaranteed to reach the context switch in finite
151 		 * time.  See z_sched_switch_spin().
152 		 */
153 		z_requeue_current(old_thread);
154 #endif /* CONFIG_SMP */
155 		void *newsh = new_thread->switch_handle;
156 
157 		if (IS_ENABLED(CONFIG_SMP)) {
158 			/* Active threads must have a null here.  And
159 			 * it must be seen before the scheduler lock
160 			 * is released!
161 			 */
162 			new_thread->switch_handle = NULL;
163 			barrier_dmem_fence_full(); /* write barrier */
164 		}
165 		k_spin_release(&_sched_spinlock);
166 		arch_switch(newsh, &old_thread->switch_handle);
167 	} else {
168 		k_spin_release(&_sched_spinlock);
169 	}
170 
171 	if (is_spinlock) {
172 		arch_irq_unlock(key);
173 	} else {
174 		irq_unlock(key);
175 	}
176 
177 	return arch_current_thread()->swap_retval;
178 }
179 
z_swap_irqlock(unsigned int key)180 static inline int z_swap_irqlock(unsigned int key)
181 {
182 	return do_swap(key, NULL, false);
183 }
184 
z_swap(struct k_spinlock * lock,k_spinlock_key_t key)185 static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
186 {
187 	return do_swap(key.key, lock, true);
188 }
189 
z_swap_unlocked(void)190 static inline void z_swap_unlocked(void)
191 {
192 	(void) do_swap(arch_irq_lock(), NULL, true);
193 }
194 
195 #else /* !CONFIG_USE_SWITCH */
196 
197 extern int arch_swap(unsigned int key);
198 
z_sched_switch_spin(struct k_thread * thread)199 static inline void z_sched_switch_spin(struct k_thread *thread)
200 {
201 	ARG_UNUSED(thread);
202 }
203 
z_swap_irqlock(unsigned int key)204 static inline int z_swap_irqlock(unsigned int key)
205 {
206 	int ret;
207 	z_check_stack_sentinel();
208 	ret = arch_swap(key);
209 	return ret;
210 }
211 
212 /* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
213  * can't be in SMP.  The k_spin_release() call is just for validation
214  * handling.
215  */
z_swap(struct k_spinlock * lock,k_spinlock_key_t key)216 static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
217 {
218 	k_spin_release(lock);
219 	return z_swap_irqlock(key.key);
220 }
221 
z_swap_unlocked(void)222 static inline void z_swap_unlocked(void)
223 {
224 	(void) z_swap_irqlock(arch_irq_lock());
225 }
226 
227 #endif /* !CONFIG_USE_SWITCH */
228 
229 /**
230  * Set up a "dummy" thread, used at early initialization to launch the
231  * first thread on a CPU.
232  *
233  * Needs to set enough fields such that the context switching code can
234  * use it to properly store state, which will just be discarded.
235  *
236  * The memory of the dummy thread can be completely uninitialized.
237  */
z_dummy_thread_init(struct k_thread * dummy_thread)238 static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
239 {
240 	dummy_thread->base.thread_state = _THREAD_DUMMY;
241 #ifdef CONFIG_SCHED_CPU_MASK
242 	dummy_thread->base.cpu_mask = -1;
243 #endif /* CONFIG_SCHED_CPU_MASK */
244 	dummy_thread->base.user_options = K_ESSENTIAL;
245 #ifdef CONFIG_THREAD_STACK_INFO
246 	dummy_thread->stack_info.start = 0U;
247 	dummy_thread->stack_info.size = 0U;
248 #endif /* CONFIG_THREAD_STACK_INFO */
249 #ifdef CONFIG_USERSPACE
250 	dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
251 #endif /* CONFIG_USERSPACE */
252 #if (K_HEAP_MEM_POOL_SIZE > 0)
253 	k_thread_system_pool_assign(dummy_thread);
254 #else
255 	dummy_thread->resource_pool = NULL;
256 #endif /* K_HEAP_MEM_POOL_SIZE */
257 
258 #ifdef CONFIG_TIMESLICE_PER_THREAD
259 	dummy_thread->base.slice_ticks = 0;
260 #endif /* CONFIG_TIMESLICE_PER_THREAD */
261 
262 	arch_current_thread_set(dummy_thread);
263 }
264 #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
265