1 /*
2  * Copyright (c) 2019 Synopsys.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief codes required for ARC multicore and Zephyr smp support
10  *
11  */
12 #include <zephyr/device.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/kernel_structs.h>
15 #include <ksched.h>
16 #include <ipi.h>
17 #include <zephyr/init.h>
18 #include <zephyr/irq.h>
19 #include <zephyr/platform/hooks.h>
20 #include <arc_irq_offload.h>
21 
22 volatile struct {
23 	arch_cpustart_t fn;
24 	void *arg;
25 } arc_cpu_init[CONFIG_MP_MAX_NUM_CPUS];
26 
27 /*
28  * arc_cpu_wake_flag is used to sync up master core and slave cores
29  * Slave core will spin for arc_cpu_wake_flag until master core sets
30  * it to the core id of slave core. Then, slave core clears it to notify
31  * master core that it's waken
32  *
33  */
34 volatile uint32_t arc_cpu_wake_flag;
35 
36 volatile char *arc_cpu_sp;
37 /*
38  * _curr_cpu is used to record the struct of _cpu_t of each cpu.
39  * for efficient usage in assembly
40  */
41 volatile _cpu_t *_curr_cpu[CONFIG_MP_MAX_NUM_CPUS];
42 
43 /* Called from Zephyr initialization */
arch_cpu_start(int cpu_num,k_thread_stack_t * stack,int sz,arch_cpustart_t fn,void * arg)44 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
45 		    arch_cpustart_t fn, void *arg)
46 {
47 	_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
48 	arc_cpu_init[cpu_num].fn = fn;
49 	arc_cpu_init[cpu_num].arg = arg;
50 
51 	/* set the initial sp of target sp through arc_cpu_sp
52 	 * arc_cpu_wake_flag will protect arc_cpu_sp that
53 	 * only one slave cpu can read it per time
54 	 */
55 	arc_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz;
56 
57 	arc_cpu_wake_flag = cpu_num;
58 
59 	/* wait slave cpu to start */
60 	while (arc_cpu_wake_flag != 0U) {
61 		;
62 	}
63 }
64 
65 #ifdef CONFIG_SMP
arc_connect_debug_mask_update(int cpu_num)66 static void arc_connect_debug_mask_update(int cpu_num)
67 {
68 	uint32_t core_mask = 1 << cpu_num;
69 
70 	/*
71 	 * MDB debugger may modify debug_select and debug_mask registers on start, so we can't
72 	 * rely on debug_select reset value.
73 	 */
74 	if (cpu_num != ARC_MP_PRIMARY_CPU_ID) {
75 		core_mask |= z_arc_connect_debug_select_read();
76 	}
77 
78 	z_arc_connect_debug_select_set(core_mask);
79 	/* Debugger halts cores at all conditions:
80 	 * ARC_CONNECT_CMD_DEBUG_MASK_H: Core global halt.
81 	 * ARC_CONNECT_CMD_DEBUG_MASK_AH: Actionpoint halt.
82 	 * ARC_CONNECT_CMD_DEBUG_MASK_BH: Software breakpoint halt.
83 	 * ARC_CONNECT_CMD_DEBUG_MASK_SH: Self halt.
84 	 */
85 	z_arc_connect_debug_mask_set(core_mask,	(ARC_CONNECT_CMD_DEBUG_MASK_SH
86 		| ARC_CONNECT_CMD_DEBUG_MASK_BH | ARC_CONNECT_CMD_DEBUG_MASK_AH
87 		| ARC_CONNECT_CMD_DEBUG_MASK_H));
88 }
89 #endif
90 
91 void arc_core_private_intc_init(void);
92 
93 /* the C entry of slave cores */
arch_secondary_cpu_init(int cpu_num)94 void arch_secondary_cpu_init(int cpu_num)
95 {
96 	arch_cpustart_t fn;
97 
98 #ifdef CONFIG_SMP
99 	struct arc_connect_bcr bcr;
100 
101 	bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
102 
103 	if (bcr.dbg) {
104 		/* configure inter-core debug unit if available */
105 		arc_connect_debug_mask_update(cpu_num);
106 	}
107 
108 	z_irq_setup();
109 
110 	arc_core_private_intc_init();
111 
112 	arc_irq_offload_init_smp();
113 
114 	z_arc_connect_ici_clear();
115 	z_irq_priority_set(DT_IRQN(DT_NODELABEL(ici)),
116 			   DT_IRQ(DT_NODELABEL(ici), priority), 0);
117 	irq_enable(DT_IRQN(DT_NODELABEL(ici)));
118 #endif
119 
120 #ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
121 	soc_per_core_init_hook();
122 #endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
123 
124 	/* call the function set by arch_cpu_start */
125 	fn = arc_cpu_init[cpu_num].fn;
126 
127 	fn(arc_cpu_init[cpu_num].arg);
128 }
129 
130 #ifdef CONFIG_SMP
131 
sched_ipi_handler(const void * unused)132 static void sched_ipi_handler(const void *unused)
133 {
134 	ARG_UNUSED(unused);
135 
136 	z_arc_connect_ici_clear();
137 	z_sched_ipi();
138 }
139 
arch_sched_directed_ipi(uint32_t cpu_bitmap)140 void arch_sched_directed_ipi(uint32_t cpu_bitmap)
141 {
142 	unsigned int i;
143 	unsigned int num_cpus = arch_num_cpus();
144 
145 	/* Send sched_ipi request to other cores
146 	 * if the target is current core, hardware will ignore it
147 	 */
148 
149 	for (i = 0U; i < num_cpus; i++) {
150 		if ((cpu_bitmap & BIT(i)) != 0) {
151 			z_arc_connect_ici_generate(i);
152 		}
153 	}
154 }
155 
arch_sched_broadcast_ipi(void)156 void arch_sched_broadcast_ipi(void)
157 {
158 	arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
159 }
160 
arch_smp_init(void)161 int arch_smp_init(void)
162 {
163 	struct arc_connect_bcr bcr;
164 
165 	/* necessary master core init */
166 	_curr_cpu[0] = &(_kernel.cpus[0]);
167 
168 	bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
169 
170 	if (bcr.dbg) {
171 		/* configure inter-core debug unit if available */
172 		arc_connect_debug_mask_update(ARC_MP_PRIMARY_CPU_ID);
173 	}
174 
175 	if (bcr.ipi) {
176 	/* register ici interrupt, just need master core to register once */
177 		z_arc_connect_ici_clear();
178 		IRQ_CONNECT(DT_IRQN(DT_NODELABEL(ici)),
179 			    DT_IRQ(DT_NODELABEL(ici), priority),
180 			    sched_ipi_handler, NULL, 0);
181 
182 		irq_enable(DT_IRQN(DT_NODELABEL(ici)));
183 	} else {
184 		__ASSERT(0,
185 			"ARC connect has no inter-core interrupt\n");
186 		return -ENODEV;
187 	}
188 
189 	if (bcr.gfrc) {
190 		/* global free running count init */
191 		z_arc_connect_gfrc_enable();
192 
193 		/* when all cores halt, gfrc halt */
194 		z_arc_connect_gfrc_core_set((1 << arch_num_cpus()) - 1);
195 		z_arc_connect_gfrc_clear();
196 	} else {
197 		__ASSERT(0,
198 			"ARC connect has no global free running counter\n");
199 		return -ENODEV;
200 	}
201 
202 	return 0;
203 }
204 #endif
205