1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
7 #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
8 
9 #include <ksched.h>
10 #include <spinlock.h>
11 #include <kernel_arch_func.h>
12 
13 #ifdef CONFIG_STACK_SENTINEL
14 extern void z_check_stack_sentinel(void);
15 #else
16 #define z_check_stack_sentinel() /**/
17 #endif
18 
19 extern struct k_spinlock sched_spinlock;
20 
21 /* In SMP, the irq_lock() is a spinlock which is implicitly released
22  * and reacquired on context switch to preserve the existing
23  * semantics.  This means that whenever we are about to return to a
24  * thread (via either z_swap() or interrupt/exception return!) we need
25  * to restore the lock state to whatever the thread's counter
26  * expects.
27  */
28 void z_smp_release_global_lock(struct k_thread *thread);
29 
30 /* context switching and scheduling-related routines */
31 #ifdef CONFIG_USE_SWITCH
32 
33 /* There is an unavoidable SMP race when threads swap -- their thread
34  * record is in the queue (and visible to other CPUs) before
35  * arch_switch() finishes saving state.  We must spin for the switch
36  * handle before entering a new thread.  See docs on arch_switch().
37  *
38  * Note: future SMP architectures may need a fence/barrier or cache
39  * invalidation here.  Current ones don't, and sadly Zephyr doesn't
40  * have a framework for that yet.
41  */
wait_for_switch(struct k_thread * thread)42 static inline void wait_for_switch(struct k_thread *thread)
43 {
44 #ifdef CONFIG_SMP
45 	volatile void **shp = (void *)&thread->switch_handle;
46 
47 	while (*shp == NULL) {
48 		k_busy_wait(1);
49 	}
50 #endif
51 }
52 
53 /* New style context switching.  arch_switch() is a lower level
54  * primitive that doesn't know about the scheduler or return value.
55  * Needed for SMP, where the scheduler requires spinlocking that we
56  * don't want to have to do in per-architecture assembly.
57  *
58  * Note that is_spinlock is a compile-time construct which will be
59  * optimized out when this function is expanded.
60  */
do_swap(unsigned int key,struct k_spinlock * lock,int is_spinlock)61 static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
62 					  struct k_spinlock *lock,
63 					  int is_spinlock)
64 {
65 	ARG_UNUSED(lock);
66 	struct k_thread *new_thread, *old_thread;
67 
68 #ifdef CONFIG_SPIN_VALIDATE
69 	/* Make sure the key acts to unmask interrupts, if it doesn't,
70 	 * then we are context switching out of a nested lock
71 	 * (i.e. breaking the lock of someone up the stack) which is
72 	 * forbidden!  The sole exception are dummy threads used
73 	 * during initialization (where we start with interrupts
74 	 * masked and switch away to begin scheduling) and the case of
75 	 * a dead current thread that was just aborted (where the
76 	 * damage was already done by the abort anyway).
77 	 *
78 	 * (Note that this is disabled on ARM64, where system calls
79 	 * can sometimes run with interrupts masked in ways that don't
80 	 * represent lock state.  See #35307)
81 	 */
82 # ifndef CONFIG_ARM64
83 	__ASSERT(arch_irq_unlocked(key) ||
84 		 _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
85 		 "Context switching while holding lock!");
86 # endif
87 #endif
88 
89 	old_thread = _current;
90 
91 	z_check_stack_sentinel();
92 
93 	old_thread->swap_retval = -EAGAIN;
94 
95 	/* We always take the scheduler spinlock if we don't already
96 	 * have it.  We "release" other spinlocks here.  But we never
97 	 * drop the interrupt lock.
98 	 */
99 	if (is_spinlock && lock != NULL && lock != &sched_spinlock) {
100 		k_spin_release(lock);
101 	}
102 	if (!is_spinlock || lock != &sched_spinlock) {
103 		(void) k_spin_lock(&sched_spinlock);
104 	}
105 
106 	new_thread = z_swap_next_thread();
107 
108 	if (new_thread != old_thread) {
109 #ifdef CONFIG_TIMESLICING
110 		z_reset_time_slice();
111 #endif
112 
113 #ifdef CONFIG_SMP
114 		_current_cpu->swap_ok = 0;
115 		new_thread->base.cpu = arch_curr_cpu()->id;
116 
117 		if (!is_spinlock) {
118 			z_smp_release_global_lock(new_thread);
119 		}
120 #endif
121 		z_thread_mark_switched_out();
122 		wait_for_switch(new_thread);
123 		_current_cpu->current = new_thread;
124 
125 #ifdef CONFIG_SPIN_VALIDATE
126 		z_spin_lock_set_owner(&sched_spinlock);
127 #endif
128 
129 		arch_cohere_stacks(old_thread, NULL, new_thread);
130 
131 #ifdef CONFIG_SMP
132 		/* Add _current back to the run queue HERE. After
133 		 * wait_for_switch() we are guaranteed to reach the
134 		 * context switch in finite time, avoiding a potential
135 		 * deadlock.
136 		 */
137 		z_requeue_current(old_thread);
138 #endif
139 		void *newsh = new_thread->switch_handle;
140 
141 		if (IS_ENABLED(CONFIG_SMP)) {
142 			/* Active threads MUST have a null here */
143 			new_thread->switch_handle = NULL;
144 		}
145 		k_spin_release(&sched_spinlock);
146 		arch_switch(newsh, &old_thread->switch_handle);
147 	} else {
148 		k_spin_release(&sched_spinlock);
149 	}
150 
151 	if (is_spinlock) {
152 		arch_irq_unlock(key);
153 	} else {
154 		irq_unlock(key);
155 	}
156 
157 	return _current->swap_retval;
158 }
159 
z_swap_irqlock(unsigned int key)160 static inline int z_swap_irqlock(unsigned int key)
161 {
162 	return do_swap(key, NULL, 0);
163 }
164 
z_swap(struct k_spinlock * lock,k_spinlock_key_t key)165 static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
166 {
167 	return do_swap(key.key, lock, 1);
168 }
169 
z_swap_unlocked(void)170 static inline void z_swap_unlocked(void)
171 {
172 	(void) do_swap(arch_irq_lock(), NULL, 1);
173 }
174 
175 #else /* !CONFIG_USE_SWITCH */
176 
177 extern int arch_swap(unsigned int key);
178 
z_swap_irqlock(unsigned int key)179 static inline int z_swap_irqlock(unsigned int key)
180 {
181 	int ret;
182 	z_check_stack_sentinel();
183 	ret = arch_swap(key);
184 	return ret;
185 }
186 
187 /* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
188  * can't be in SMP.  The k_spin_release() call is just for validation
189  * handling.
190  */
z_swap(struct k_spinlock * lock,k_spinlock_key_t key)191 static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
192 {
193 	k_spin_release(lock);
194 	return z_swap_irqlock(key.key);
195 }
196 
z_swap_unlocked(void)197 static inline void z_swap_unlocked(void)
198 {
199 	(void) z_swap_irqlock(arch_irq_lock());
200 }
201 
202 #endif /* !CONFIG_USE_SWITCH */
203 
204 /**
205  * Set up a "dummy" thread, used at early initialization to launch the
206  * first thread on a CPU.
207  *
208  * Needs to set enough fields such that the context switching code can
209  * use it to properly store state, which will just be discarded.
210  *
211  * The memory of the dummy thread can be completely uninitialized.
212  */
z_dummy_thread_init(struct k_thread * dummy_thread)213 static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
214 {
215 	dummy_thread->base.thread_state = _THREAD_DUMMY;
216 #ifdef CONFIG_SCHED_CPU_MASK
217 	dummy_thread->base.cpu_mask = -1;
218 #endif
219 	dummy_thread->base.user_options = K_ESSENTIAL;
220 #ifdef CONFIG_THREAD_STACK_INFO
221 	dummy_thread->stack_info.start = 0U;
222 	dummy_thread->stack_info.size = 0U;
223 #endif
224 #ifdef CONFIG_USERSPACE
225 	dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
226 #endif
227 
228 	_current_cpu->current = dummy_thread;
229 }
230 #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
231