1 /*
2 * Copyright 2020 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 */
7
8 /**
9 * @file
10 * @brief codes required for AArch64 multicore and Zephyr smp support
11 */
12
13 #include <zephyr/cache.h>
14 #include <zephyr/device.h>
15 #include <zephyr/devicetree.h>
16 #include <zephyr/kernel.h>
17 #include <zephyr/kernel_structs.h>
18 #include <ksched.h>
19 #include <ipi.h>
20 #include <zephyr/init.h>
21 #include <zephyr/arch/arm64/mm.h>
22 #include <zephyr/arch/cpu.h>
23 #include <zephyr/drivers/interrupt_controller/gic.h>
24 #include <zephyr/drivers/pm_cpu_ops.h>
25 #include <zephyr/arch/arch_interface.h>
26 #include <zephyr/platform/hooks.h>
27 #include <zephyr/sys/barrier.h>
28 #include <zephyr/irq.h>
29 #include "boot.h"
30
31 #define INV_MPID UINT64_MAX
32
33 #define SGI_SCHED_IPI 0
34 #define SGI_MMCFG_IPI 1
35 #define SGI_FPU_IPI 2
36
37 struct boot_params {
38 uint64_t mpid;
39 char *sp;
40 uint8_t voting[CONFIG_MP_MAX_NUM_CPUS];
41 arch_cpustart_t fn;
42 void *arg;
43 int cpu_num;
44 };
45
46 /* Offsets used in reset.S */
47 BUILD_ASSERT(offsetof(struct boot_params, mpid) == BOOT_PARAM_MPID_OFFSET);
48 BUILD_ASSERT(offsetof(struct boot_params, sp) == BOOT_PARAM_SP_OFFSET);
49 BUILD_ASSERT(offsetof(struct boot_params, voting) == BOOT_PARAM_VOTING_OFFSET);
50
51 volatile struct boot_params __aligned(L1_CACHE_BYTES) arm64_cpu_boot_params = {
52 .mpid = -1,
53 };
54
55 const uint64_t cpu_node_list[] = {
56 DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,))
57 };
58
59 /* cpu_map saves the maping of core id and mpid */
60 static uint64_t cpu_map[CONFIG_MP_MAX_NUM_CPUS] = {
61 [0 ... (CONFIG_MP_MAX_NUM_CPUS - 1)] = INV_MPID
62 };
63
64 extern void z_arm64_mm_init(bool is_primary_core);
65
66 /* Called from Zephyr initialization */
arch_cpu_start(int cpu_num,k_thread_stack_t * stack,int sz,arch_cpustart_t fn,void * arg)67 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
68 arch_cpustart_t fn, void *arg)
69 {
70 int cpu_count;
71 static int i;
72 uint64_t cpu_mpid = 0;
73 uint64_t master_core_mpid;
74
75 /* Now it is on master core */
76 __ASSERT(arch_curr_cpu()->id == 0, "");
77 master_core_mpid = MPIDR_TO_CORE(GET_MPIDR());
78
79 cpu_count = ARRAY_SIZE(cpu_node_list);
80
81 #ifdef CONFIG_ARM64_FALLBACK_ON_RESERVED_CORES
82 __ASSERT(cpu_count >= CONFIG_MP_MAX_NUM_CPUS,
83 "The count of CPU Core nodes in dts is not greater or equal to CONFIG_MP_MAX_NUM_CPUS\n");
84 #else
85 __ASSERT(cpu_count == CONFIG_MP_MAX_NUM_CPUS,
86 "The count of CPU Cores nodes in dts is not equal to CONFIG_MP_MAX_NUM_CPUS\n");
87 #endif
88
89 arm64_cpu_boot_params.sp = K_KERNEL_STACK_BUFFER(stack) + sz;
90 arm64_cpu_boot_params.fn = fn;
91 arm64_cpu_boot_params.arg = arg;
92 arm64_cpu_boot_params.cpu_num = cpu_num;
93
94 for (; i < cpu_count; i++) {
95 if (cpu_node_list[i] == master_core_mpid) {
96 continue;
97 }
98
99 cpu_mpid = cpu_node_list[i];
100
101 barrier_dsync_fence_full();
102
103 /* store mpid last as this is our synchronization point */
104 arm64_cpu_boot_params.mpid = cpu_mpid;
105
106 sys_cache_data_flush_range((void *)&arm64_cpu_boot_params,
107 sizeof(arm64_cpu_boot_params));
108
109 if (pm_cpu_on(cpu_mpid, (uint64_t)&__start)) {
110 printk("Failed to boot secondary CPU core %d (MPID:%#llx)\n",
111 cpu_num, cpu_mpid);
112 #ifdef CONFIG_ARM64_FALLBACK_ON_RESERVED_CORES
113 printk("Falling back on reserved cores\n");
114 continue;
115 #else
116 k_panic();
117 #endif
118 }
119
120 break;
121 }
122 if (i++ == cpu_count) {
123 printk("Can't find CPU Core %d from dts and failed to boot it\n", cpu_num);
124 k_panic();
125 }
126
127 /* Wait secondary cores up, see arch_secondary_cpu_init */
128 while (arm64_cpu_boot_params.fn) {
129 wfe();
130 }
131
132 cpu_map[cpu_num] = cpu_mpid;
133
134 printk("Secondary CPU core %d (MPID:%#llx) is up\n", cpu_num, cpu_mpid);
135 }
136
137 /* the C entry of secondary cores */
arch_secondary_cpu_init(int cpu_num)138 void arch_secondary_cpu_init(int cpu_num)
139 {
140 cpu_num = arm64_cpu_boot_params.cpu_num;
141 arch_cpustart_t fn;
142 void *arg;
143
144 __ASSERT(arm64_cpu_boot_params.mpid == MPIDR_TO_CORE(GET_MPIDR()), "");
145
146 /* Initialize tpidrro_el0 with our struct _cpu instance address */
147 write_tpidrro_el0((uintptr_t)&_kernel.cpus[cpu_num]);
148
149 z_arm64_mm_init(false);
150
151 #ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
152 z_arm64_safe_exception_stack_init();
153 #endif
154
155 #ifdef CONFIG_SMP
156 arm_gic_secondary_init();
157
158 irq_enable(SGI_SCHED_IPI);
159 #ifdef CONFIG_USERSPACE
160 irq_enable(SGI_MMCFG_IPI);
161 #endif
162 #ifdef CONFIG_FPU_SHARING
163 irq_enable(SGI_FPU_IPI);
164 #endif
165 #endif
166
167 #ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
168 soc_per_core_init_hook();
169 #endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
170
171 fn = arm64_cpu_boot_params.fn;
172 arg = arm64_cpu_boot_params.arg;
173 barrier_dsync_fence_full();
174
175 /*
176 * Secondary core clears .fn to announce its presence.
177 * Primary core is polling for this. We no longer own
178 * arm64_cpu_boot_params afterwards.
179 */
180 arm64_cpu_boot_params.fn = NULL;
181 barrier_dsync_fence_full();
182 sev();
183
184 fn(arg);
185 }
186
187 #ifdef CONFIG_SMP
188
send_ipi(unsigned int ipi,uint32_t cpu_bitmap)189 static void send_ipi(unsigned int ipi, uint32_t cpu_bitmap)
190 {
191 uint64_t mpidr = MPIDR_TO_CORE(GET_MPIDR());
192
193 /*
194 * Send SGI to all cores except itself
195 */
196 unsigned int num_cpus = arch_num_cpus();
197
198 for (int i = 0; i < num_cpus; i++) {
199 if ((cpu_bitmap & BIT(i)) == 0) {
200 continue;
201 }
202
203 uint64_t target_mpidr = cpu_map[i];
204 uint8_t aff0;
205
206 if (mpidr == target_mpidr || target_mpidr == INV_MPID) {
207 continue;
208 }
209
210 aff0 = MPIDR_AFFLVL(target_mpidr, 0);
211 gic_raise_sgi(ipi, target_mpidr, 1 << aff0);
212 }
213 }
214
sched_ipi_handler(const void * unused)215 void sched_ipi_handler(const void *unused)
216 {
217 ARG_UNUSED(unused);
218
219 z_sched_ipi();
220 }
221
arch_sched_broadcast_ipi(void)222 void arch_sched_broadcast_ipi(void)
223 {
224 send_ipi(SGI_SCHED_IPI, IPI_ALL_CPUS_MASK);
225 }
226
arch_sched_directed_ipi(uint32_t cpu_bitmap)227 void arch_sched_directed_ipi(uint32_t cpu_bitmap)
228 {
229 send_ipi(SGI_SCHED_IPI, cpu_bitmap);
230 }
231
232 #ifdef CONFIG_USERSPACE
mem_cfg_ipi_handler(const void * unused)233 void mem_cfg_ipi_handler(const void *unused)
234 {
235 ARG_UNUSED(unused);
236 unsigned int key = arch_irq_lock();
237
238 /*
239 * Make sure a domain switch by another CPU is effective on this CPU.
240 * This is a no-op if the page table is already the right one.
241 * Lock irq to prevent the interrupt during mem region switch.
242 */
243 z_arm64_swap_mem_domains(arch_current_thread());
244 arch_irq_unlock(key);
245 }
246
z_arm64_mem_cfg_ipi(void)247 void z_arm64_mem_cfg_ipi(void)
248 {
249 send_ipi(SGI_MMCFG_IPI, IPI_ALL_CPUS_MASK);
250 }
251 #endif
252
253 #ifdef CONFIG_FPU_SHARING
flush_fpu_ipi_handler(const void * unused)254 void flush_fpu_ipi_handler(const void *unused)
255 {
256 ARG_UNUSED(unused);
257
258 disable_irq();
259 arch_flush_local_fpu();
260 /* no need to re-enable IRQs here */
261 }
262
arch_flush_fpu_ipi(unsigned int cpu)263 void arch_flush_fpu_ipi(unsigned int cpu)
264 {
265 const uint64_t mpidr = cpu_map[cpu];
266 uint8_t aff0;
267
268 if (mpidr == INV_MPID) {
269 return;
270 }
271
272 aff0 = MPIDR_AFFLVL(mpidr, 0);
273 gic_raise_sgi(SGI_FPU_IPI, mpidr, 1 << aff0);
274 }
275
276 /*
277 * Make sure there is no pending FPU flush request for this CPU while
278 * waiting for a contended spinlock to become available. This prevents
279 * a deadlock when the lock we need is already taken by another CPU
280 * that also wants its FPU content to be reinstated while such content
281 * is still live in this CPU's FPU.
282 */
arch_spin_relax(void)283 void arch_spin_relax(void)
284 {
285 if (arm_gic_irq_is_pending(SGI_FPU_IPI)) {
286 arm_gic_irq_clear_pending(SGI_FPU_IPI);
287 /*
288 * We may not be in IRQ context here hence cannot use
289 * arch_flush_local_fpu() directly.
290 */
291 arch_float_disable(_current_cpu->arch.fpu_owner);
292 }
293 }
294 #endif
295
arch_smp_init(void)296 int arch_smp_init(void)
297 {
298 cpu_map[0] = MPIDR_TO_CORE(GET_MPIDR());
299
300 /*
301 * SGI0 is use for sched ipi, this might be changed to use Kconfig
302 * option
303 */
304 IRQ_CONNECT(SGI_SCHED_IPI, IRQ_DEFAULT_PRIORITY, sched_ipi_handler, NULL, 0);
305 irq_enable(SGI_SCHED_IPI);
306
307 #ifdef CONFIG_USERSPACE
308 IRQ_CONNECT(SGI_MMCFG_IPI, IRQ_DEFAULT_PRIORITY,
309 mem_cfg_ipi_handler, NULL, 0);
310 irq_enable(SGI_MMCFG_IPI);
311 #endif
312 #ifdef CONFIG_FPU_SHARING
313 IRQ_CONNECT(SGI_FPU_IPI, IRQ_DEFAULT_PRIORITY, flush_fpu_ipi_handler, NULL, 0);
314 irq_enable(SGI_FPU_IPI);
315 #endif
316
317 return 0;
318 }
319
320 #endif
321