1 /* Copyright (c) 2022 Intel corporation
2 * SPDX-License-Identifier: Apache-2.0
3 */
4
5 #include <zephyr/kernel.h>
6 #include <zephyr/kernel_structs.h>
7 #include <zephyr/kernel/smp.h>
8 #include <zephyr/spinlock.h>
9 #include <kswap.h>
10 #include <kernel_internal.h>
11
12 static atomic_t global_lock;
13
14 /**
15 * Flag to tell recently powered up CPU to start
16 * initialization routine.
17 *
18 * 0 to tell powered up CPU to wait.
19 * 1 to tell powered up CPU to continue initialization.
20 */
21 static atomic_t cpu_start_flag;
22
23 /**
24 * Flag to tell caller that the target CPU is now
25 * powered up and ready to be initialized.
26 *
27 * 0 if target CPU is not yet ready.
28 * 1 if target CPU has powered up and ready to be initialized.
29 */
30 static atomic_t ready_flag;
31
32 /**
33 * Struct holding the function to be called before handing off
34 * to schedule and its argument.
35 */
36 static struct cpu_start_cb {
37 /**
38 * Function to be called before handing off to scheduler.
39 * Can be NULL.
40 */
41 smp_init_fn fn;
42
43 /** Argument to @ref cpu_start_fn.fn. */
44 void *arg;
45
46 /** Invoke scheduler after CPU has started if true. */
47 bool invoke_sched;
48
49 #ifdef CONFIG_SYS_CLOCK_EXISTS
50 /** True if smp_timer_init() needs to be called. */
51 bool reinit_timer;
52 #endif /* CONFIG_SYS_CLOCK_EXISTS */
53 } cpu_start_fn;
54
55 static struct k_spinlock cpu_start_lock;
56
z_smp_global_lock(void)57 unsigned int z_smp_global_lock(void)
58 {
59 unsigned int key = arch_irq_lock();
60
61 if (!_current->base.global_lock_count) {
62 while (!atomic_cas(&global_lock, 0, 1)) {
63 arch_spin_relax();
64 }
65 }
66
67 _current->base.global_lock_count++;
68
69 return key;
70 }
71
z_smp_global_unlock(unsigned int key)72 void z_smp_global_unlock(unsigned int key)
73 {
74 if (_current->base.global_lock_count != 0U) {
75 _current->base.global_lock_count--;
76
77 if (!_current->base.global_lock_count) {
78 (void)atomic_clear(&global_lock);
79 }
80 }
81
82 arch_irq_unlock(key);
83 }
84
85 /* Called from within z_swap(), so assumes lock already held */
z_smp_release_global_lock(struct k_thread * thread)86 void z_smp_release_global_lock(struct k_thread *thread)
87 {
88 if (!thread->base.global_lock_count) {
89 (void)atomic_clear(&global_lock);
90 }
91 }
92
93 /* Tiny delay that relaxes bus traffic to avoid spamming a shared
94 * memory bus looking at an atomic variable
95 */
local_delay(void)96 static inline void local_delay(void)
97 {
98 for (volatile int i = 0; i < 1000; i++) {
99 }
100 }
101
wait_for_start_signal(atomic_t * start_flag)102 static void wait_for_start_signal(atomic_t *start_flag)
103 {
104 /* Wait for the signal to begin scheduling */
105 while (!atomic_get(start_flag)) {
106 local_delay();
107 }
108 }
109
smp_init_top(void * arg)110 static inline void smp_init_top(void *arg)
111 {
112 struct cpu_start_cb csc = arg ? *(struct cpu_start_cb *)arg : (struct cpu_start_cb){0};
113
114 /* Let start_cpu() know that this CPU has powered up. */
115 (void)atomic_set(&ready_flag, 1);
116
117 /* Wait for the CPU start caller to signal that
118 * we can start initialization.
119 */
120 wait_for_start_signal(&cpu_start_flag);
121
122 if ((arg == NULL) || csc.invoke_sched) {
123 /* Initialize the dummy thread struct so that
124 * the scheduler can schedule actual threads to run.
125 */
126 z_dummy_thread_init(&_thread_dummy);
127 }
128
129 #ifdef CONFIG_SYS_CLOCK_EXISTS
130 if ((arg == NULL) || csc.reinit_timer) {
131 smp_timer_init();
132 }
133 #endif /* CONFIG_SYS_CLOCK_EXISTS */
134
135 /* Do additional initialization steps if needed. */
136 if (csc.fn != NULL) {
137 csc.fn(csc.arg);
138 }
139
140 if ((arg != NULL) && !csc.invoke_sched) {
141 /* Don't invoke scheduler. */
142 return;
143 }
144
145 /* Let scheduler decide what thread to run next. */
146 z_swap_unlocked();
147
148 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
149 }
150
start_cpu(int id,struct cpu_start_cb * csc)151 static void start_cpu(int id, struct cpu_start_cb *csc)
152 {
153 /* Clear the ready flag so the newly powered up CPU can
154 * signal that it has powered up.
155 */
156 (void)atomic_clear(&ready_flag);
157
158 /* Power up the CPU */
159 arch_cpu_start(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
160 smp_init_top, csc);
161
162 /* Wait until the newly powered up CPU to signal that
163 * it has powered up.
164 */
165 while (!atomic_get(&ready_flag)) {
166 local_delay();
167 }
168 }
169
k_smp_cpu_start(int id,smp_init_fn fn,void * arg)170 void k_smp_cpu_start(int id, smp_init_fn fn, void *arg)
171 {
172 k_spinlock_key_t key = k_spin_lock(&cpu_start_lock);
173
174 cpu_start_fn.fn = fn;
175 cpu_start_fn.arg = arg;
176 cpu_start_fn.invoke_sched = true;
177
178 #ifdef CONFIG_SYS_CLOCK_EXISTS
179 cpu_start_fn.reinit_timer = true;
180 #endif /* CONFIG_SYS_CLOCK_EXISTS */
181
182 /* We are only starting one CPU so we do not need to synchronize
183 * across all CPUs using the start_flag. So just set it to 1.
184 */
185 (void)atomic_set(&cpu_start_flag, 1); /* async, don't care */
186
187 /* Initialize various CPU structs related to this CPU. */
188 z_init_cpu(id);
189
190 /* Start the CPU! */
191 start_cpu(id, &cpu_start_fn);
192
193 k_spin_unlock(&cpu_start_lock, key);
194 }
195
k_smp_cpu_resume(int id,smp_init_fn fn,void * arg,bool reinit_timer,bool invoke_sched)196 void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg,
197 bool reinit_timer, bool invoke_sched)
198 {
199 k_spinlock_key_t key = k_spin_lock(&cpu_start_lock);
200
201 cpu_start_fn.fn = fn;
202 cpu_start_fn.arg = arg;
203 cpu_start_fn.invoke_sched = invoke_sched;
204
205 #ifdef CONFIG_SYS_CLOCK_EXISTS
206 cpu_start_fn.reinit_timer = reinit_timer;
207 #else
208 ARG_UNUSED(reinit_timer);
209 #endif /* CONFIG_SYS_CLOCK_EXISTS */
210
211 /* We are only starting one CPU so we do not need to synchronize
212 * across all CPUs using the start_flag. So just set it to 1.
213 */
214 (void)atomic_set(&cpu_start_flag, 1);
215
216 /* Start the CPU! */
217 start_cpu(id, &cpu_start_fn);
218
219 k_spin_unlock(&cpu_start_lock, key);
220 }
221
z_smp_init(void)222 void z_smp_init(void)
223 {
224 /* We are powering up all CPUs and we want to synchronize their
225 * entry into scheduler. So set the start flag to 0 here.
226 */
227 (void)atomic_clear(&cpu_start_flag);
228
229 /* Just start CPUs one by one. */
230 unsigned int num_cpus = arch_num_cpus();
231
232 for (int i = 1; i < num_cpus; i++) {
233 z_init_cpu(i);
234 start_cpu(i, NULL);
235 }
236
237 /* Let loose those CPUs so they can start scheduling
238 * threads to run.
239 */
240 (void)atomic_set(&cpu_start_flag, 1);
241 }
242
z_smp_cpu_mobile(void)243 bool z_smp_cpu_mobile(void)
244 {
245 unsigned int k = arch_irq_lock();
246 bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
247
248 arch_irq_unlock(k);
249 return !pinned;
250 }
251