1 /*
2  * Copyright (c) 2023, 2024 Arm Limited (or its affiliates).
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <zephyr/kernel/thread_stack.h>
7 #include <zephyr/kernel.h>
8 #include <zephyr/arch/arm/cortex_a_r/lib_helpers.h>
9 #include <zephyr/drivers/interrupt_controller/gic.h>
10 #include <ipi.h>
11 #include "boot.h"
12 #include "zephyr/cache.h"
13 #include "zephyr/kernel/thread_stack.h"
14 #include "zephyr/toolchain/gcc.h"
15 #include <zephyr/platform/hooks.h>
16 
17 #define INV_MPID	UINT32_MAX
18 
19 #define SGI_SCHED_IPI	0
20 #define SGI_MMCFG_IPI	1
21 #define SGI_FPU_IPI	2
22 
23 K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks,
24 				   CONFIG_MP_MAX_NUM_CPUS,
25 				   CONFIG_ISR_STACK_SIZE);
26 
27 K_KERNEL_STACK_ARRAY_DECLARE(z_arm_fiq_stack,
28 			    CONFIG_MP_MAX_NUM_CPUS,
29 			    CONFIG_ARMV7_FIQ_STACK_SIZE);
30 
31 K_KERNEL_STACK_ARRAY_DECLARE(z_arm_abort_stack,
32 			    CONFIG_MP_MAX_NUM_CPUS,
33 			    CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
34 
35 K_KERNEL_STACK_ARRAY_DECLARE(z_arm_undef_stack,
36 			    CONFIG_MP_MAX_NUM_CPUS,
37 			    CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
38 
39 K_KERNEL_STACK_ARRAY_DECLARE(z_arm_svc_stack,
40 			    CONFIG_MP_MAX_NUM_CPUS,
41 			    CONFIG_ARMV7_SVC_STACK_SIZE);
42 
43 K_KERNEL_STACK_ARRAY_DECLARE(z_arm_sys_stack,
44 			    CONFIG_MP_MAX_NUM_CPUS,
45 			    CONFIG_ARMV7_SVC_STACK_SIZE);
46 
47 struct boot_params {
48 	uint32_t mpid;
49 	char *irq_sp;
50 	char *fiq_sp;
51 	char *abt_sp;
52 	char *udf_sp;
53 	char *svc_sp;
54 	char *sys_sp;
55 	uint8_t voting[CONFIG_MP_MAX_NUM_CPUS];
56 	arch_cpustart_t fn;
57 	void *arg;
58 	int cpu_num;
59 };
60 
61 /* Offsets used in reset.S */
62 BUILD_ASSERT(offsetof(struct boot_params, mpid) == BOOT_PARAM_MPID_OFFSET);
63 BUILD_ASSERT(offsetof(struct boot_params, irq_sp) == BOOT_PARAM_IRQ_SP_OFFSET);
64 BUILD_ASSERT(offsetof(struct boot_params, fiq_sp) == BOOT_PARAM_FIQ_SP_OFFSET);
65 BUILD_ASSERT(offsetof(struct boot_params, abt_sp) == BOOT_PARAM_ABT_SP_OFFSET);
66 BUILD_ASSERT(offsetof(struct boot_params, udf_sp) == BOOT_PARAM_UDF_SP_OFFSET);
67 BUILD_ASSERT(offsetof(struct boot_params, svc_sp) == BOOT_PARAM_SVC_SP_OFFSET);
68 BUILD_ASSERT(offsetof(struct boot_params, sys_sp) == BOOT_PARAM_SYS_SP_OFFSET);
69 BUILD_ASSERT(offsetof(struct boot_params, voting) == BOOT_PARAM_VOTING_OFFSET);
70 
71 volatile struct boot_params arm_cpu_boot_params = {
72 	.mpid = -1,
73 	.irq_sp = (char *)(z_interrupt_stacks + CONFIG_ISR_STACK_SIZE),
74 	.fiq_sp = (char *)(z_arm_fiq_stack + CONFIG_ARMV7_FIQ_STACK_SIZE),
75 	.abt_sp = (char *)(z_arm_abort_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE),
76 	.udf_sp = (char *)(z_arm_undef_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE),
77 	.svc_sp = (char *)(z_arm_svc_stack + CONFIG_ARMV7_SVC_STACK_SIZE),
78 	.sys_sp = (char *)(z_arm_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE),
79 };
80 
81 const uint32_t cpu_node_list[] = {
82 	DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,))};
83 
84 /* cpu_map saves the maping of core id and mpid */
85 static uint32_t cpu_map[CONFIG_MP_MAX_NUM_CPUS] = {
86 	[0 ... (CONFIG_MP_MAX_NUM_CPUS - 1)] = INV_MPID
87 };
88 
89 #ifdef CONFIG_ARM_MPU
90 extern void z_arm_mpu_init(void);
91 extern void z_arm_configure_static_mpu_regions(void);
92 #elif defined(CONFIG_ARM_AARCH32_MMU)
93 extern int z_arm_mmu_init(void);
94 #endif
95 
96 /* Called from Zephyr initialization */
arch_cpu_start(int cpu_num,k_thread_stack_t * stack,int sz,arch_cpustart_t fn,void * arg)97 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg)
98 {
99 	int cpu_count, i, j;
100 	uint32_t cpu_mpid = 0;
101 	uint32_t master_core_mpid;
102 
103 	/* Now it is on master core */
104 	__ASSERT(arch_curr_cpu()->id == 0, "");
105 	master_core_mpid = MPIDR_TO_CORE(GET_MPIDR());
106 
107 	cpu_count = ARRAY_SIZE(cpu_node_list);
108 	__ASSERT(cpu_count == CONFIG_MP_MAX_NUM_CPUS,
109 		"The count of CPU Cores nodes in dts is not equal to CONFIG_MP_MAX_NUM_CPUS\n");
110 
111 	for (i = 0, j = 0; i < cpu_count; i++) {
112 		if (cpu_node_list[i] == master_core_mpid) {
113 			continue;
114 		}
115 		if (j == cpu_num - 1) {
116 			cpu_mpid = cpu_node_list[i];
117 			break;
118 		}
119 		j++;
120 	}
121 	if (i == cpu_count) {
122 		printk("Can't find CPU Core %d from dts and failed to boot it\n", cpu_num);
123 		return;
124 	}
125 
126 	/* Pass stack address to secondary core */
127 	arm_cpu_boot_params.irq_sp = K_KERNEL_STACK_BUFFER(stack) + sz;
128 	arm_cpu_boot_params.fiq_sp = K_KERNEL_STACK_BUFFER(z_arm_fiq_stack[cpu_num])
129 				     + CONFIG_ARMV7_FIQ_STACK_SIZE;
130 	arm_cpu_boot_params.abt_sp = K_KERNEL_STACK_BUFFER(z_arm_abort_stack[cpu_num])
131 				     + CONFIG_ARMV7_EXCEPTION_STACK_SIZE;
132 	arm_cpu_boot_params.udf_sp = K_KERNEL_STACK_BUFFER(z_arm_undef_stack[cpu_num])
133 				     + CONFIG_ARMV7_EXCEPTION_STACK_SIZE;
134 	arm_cpu_boot_params.svc_sp = K_KERNEL_STACK_BUFFER(z_arm_svc_stack[cpu_num])
135 				     + CONFIG_ARMV7_SVC_STACK_SIZE;
136 	arm_cpu_boot_params.sys_sp = K_KERNEL_STACK_BUFFER(z_arm_sys_stack[cpu_num])
137 				     + CONFIG_ARMV7_SYS_STACK_SIZE;
138 
139 	arm_cpu_boot_params.fn = fn;
140 	arm_cpu_boot_params.arg = arg;
141 	arm_cpu_boot_params.cpu_num = cpu_num;
142 
143 	/* we need the barrier here to make sure the above changes to
144 	 * arm_cpu_boot_params are completed before we set the mpid
145 	 */
146 	barrier_dsync_fence_full();
147 
148 	/* store mpid last as this is our synchronization point */
149 	arm_cpu_boot_params.mpid = cpu_mpid;
150 
151 	sys_cache_data_invd_range(
152 			(void *)&arm_cpu_boot_params,
153 			sizeof(arm_cpu_boot_params));
154 
155 	/*! TODO: Support PSCI
156 	 *  \todo Support PSCI
157 	 */
158 
159 	/* Wait secondary cores up, see arch_secondary_cpu_init */
160 	while (arm_cpu_boot_params.fn) {
161 		wfe();
162 	}
163 
164 	cpu_map[cpu_num] = cpu_mpid;
165 
166 	printk("Secondary CPU core %d (MPID:%#x) is up\n", cpu_num, cpu_mpid);
167 }
168 
169 /* the C entry of secondary cores */
arch_secondary_cpu_init(void)170 void arch_secondary_cpu_init(void)
171 {
172 	int cpu_num = arm_cpu_boot_params.cpu_num;
173 	arch_cpustart_t fn;
174 	void *arg;
175 
176 	__ASSERT(arm_cpu_boot_params.mpid == MPIDR_TO_CORE(GET_MPIDR()), "");
177 
178 	/* Initialize tpidrro_el0 with our struct _cpu instance address */
179 	write_tpidruro((uintptr_t)&_kernel.cpus[cpu_num]);
180 
181 #ifdef CONFIG_ARM_MPU
182 
183 	/*! TODO: Unify mpu and mmu initialization function
184 	 *  \todo Unify mpu and mmu initialization function
185 	 */
186 	z_arm_mpu_init();
187 	z_arm_configure_static_mpu_regions();
188 #elif defined(CONFIG_ARM_AARCH32_MMU)
189 	z_arm_mmu_init();
190 #endif
191 
192 #ifdef CONFIG_SMP
193 	arm_gic_secondary_init();
194 
195 	irq_enable(SGI_SCHED_IPI);
196 
197 	/*! TODO: FPU irq
198 	 *  \todo FPU irq
199 	 */
200 #endif
201 
202 #ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
203 	soc_per_core_init_hook();
204 #endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
205 
206 	fn = arm_cpu_boot_params.fn;
207 	arg = arm_cpu_boot_params.arg;
208 	barrier_dsync_fence_full();
209 
210 	/*
211 	 * Secondary core clears .fn to announce its presence.
212 	 * Primary core is polling for this. We no longer own
213 	 * arm_cpu_boot_params afterwards.
214 	 */
215 	arm_cpu_boot_params.fn = NULL;
216 	barrier_dsync_fence_full();
217 
218 	sev();
219 
220 	fn(arg);
221 }
222 
223 #ifdef CONFIG_SMP
224 
send_ipi(unsigned int ipi,uint32_t cpu_bitmap)225 static void send_ipi(unsigned int ipi, uint32_t cpu_bitmap)
226 {
227 	uint32_t mpidr = MPIDR_TO_CORE(GET_MPIDR());
228 
229 	/*
230 	 * Send SGI to all cores except itself
231 	 */
232 	unsigned int num_cpus = arch_num_cpus();
233 
234 	for (int i = 0; i < num_cpus; i++) {
235 		if ((cpu_bitmap & BIT(i)) == 0) {
236 			continue;
237 		}
238 
239 		uint32_t target_mpidr = cpu_map[i];
240 		uint8_t aff0;
241 
242 		if (mpidr == target_mpidr || mpidr == INV_MPID) {
243 			continue;
244 		}
245 
246 		aff0 = MPIDR_AFFLVL(target_mpidr, 0);
247 		gic_raise_sgi(ipi, (uint64_t)target_mpidr, 1 << aff0);
248 	}
249 }
250 
sched_ipi_handler(const void * unused)251 void sched_ipi_handler(const void *unused)
252 {
253 	ARG_UNUSED(unused);
254 
255 	z_sched_ipi();
256 }
257 
arch_sched_broadcast_ipi(void)258 void arch_sched_broadcast_ipi(void)
259 {
260 	send_ipi(SGI_SCHED_IPI, IPI_ALL_CPUS_MASK);
261 }
262 
arch_sched_directed_ipi(uint32_t cpu_bitmap)263 void arch_sched_directed_ipi(uint32_t cpu_bitmap)
264 {
265 	send_ipi(SGI_SCHED_IPI, cpu_bitmap);
266 }
267 
arch_smp_init(void)268 int arch_smp_init(void)
269 {
270 	cpu_map[0] = MPIDR_TO_CORE(GET_MPIDR());
271 
272 	/*
273 	 * SGI0 is use for sched ipi, this might be changed to use Kconfig
274 	 * option
275 	 */
276 	IRQ_CONNECT(SGI_SCHED_IPI, IRQ_DEFAULT_PRIORITY, sched_ipi_handler, NULL, 0);
277 	irq_enable(SGI_SCHED_IPI);
278 
279 	return 0;
280 }
281 
282 #endif
283