1 /*
2  * Copyright (c) 2019 Synopsys.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief codes required for ARC multicore and Zephyr smp support
10  *
11  */
12 #include <zephyr/device.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/kernel_structs.h>
15 #include <ksched.h>
16 #include <zephyr/init.h>
17 #include <zephyr/irq.h>
18 #include <arc_irq_offload.h>
19 
20 volatile struct {
21 	arch_cpustart_t fn;
22 	void *arg;
23 } arc_cpu_init[CONFIG_MP_MAX_NUM_CPUS];
24 
25 /*
26  * arc_cpu_wake_flag is used to sync up master core and slave cores
27  * Slave core will spin for arc_cpu_wake_flag until master core sets
28  * it to the core id of slave core. Then, slave core clears it to notify
29  * master core that it's waken
30  *
31  */
32 volatile uint32_t arc_cpu_wake_flag;
33 
34 volatile char *arc_cpu_sp;
35 /*
36  * _curr_cpu is used to record the struct of _cpu_t of each cpu.
37  * for efficient usage in assembly
38  */
39 volatile _cpu_t *_curr_cpu[CONFIG_MP_MAX_NUM_CPUS];
40 
41 /* Called from Zephyr initialization */
arch_start_cpu(int cpu_num,k_thread_stack_t * stack,int sz,arch_cpustart_t fn,void * arg)42 void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
43 		    arch_cpustart_t fn, void *arg)
44 {
45 	_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
46 	arc_cpu_init[cpu_num].fn = fn;
47 	arc_cpu_init[cpu_num].arg = arg;
48 
49 	/* set the initial sp of target sp through arc_cpu_sp
50 	 * arc_cpu_wake_flag will protect arc_cpu_sp that
51 	 * only one slave cpu can read it per time
52 	 */
53 	arc_cpu_sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
54 
55 	arc_cpu_wake_flag = cpu_num;
56 
57 	/* wait slave cpu to start */
58 	while (arc_cpu_wake_flag != 0U) {
59 		;
60 	}
61 }
62 
63 #ifdef CONFIG_SMP
arc_connect_debug_mask_update(int cpu_num)64 static void arc_connect_debug_mask_update(int cpu_num)
65 {
66 	uint32_t core_mask = 1 << cpu_num;
67 
68 	/*
69 	 * MDB debugger may modify debug_select and debug_mask registers on start, so we can't
70 	 * rely on debug_select reset value.
71 	 */
72 	if (cpu_num != ARC_MP_PRIMARY_CPU_ID) {
73 		core_mask |= z_arc_connect_debug_select_read();
74 	}
75 
76 	z_arc_connect_debug_select_set(core_mask);
77 	/* Debugger halts cores at all conditions:
78 	 * ARC_CONNECT_CMD_DEBUG_MASK_H: Core global halt.
79 	 * ARC_CONNECT_CMD_DEBUG_MASK_AH: Actionpoint halt.
80 	 * ARC_CONNECT_CMD_DEBUG_MASK_BH: Software breakpoint halt.
81 	 * ARC_CONNECT_CMD_DEBUG_MASK_SH: Self halt.
82 	 */
83 	z_arc_connect_debug_mask_set(core_mask,	(ARC_CONNECT_CMD_DEBUG_MASK_SH
84 		| ARC_CONNECT_CMD_DEBUG_MASK_BH | ARC_CONNECT_CMD_DEBUG_MASK_AH
85 		| ARC_CONNECT_CMD_DEBUG_MASK_H));
86 }
87 #endif
88 
89 void arc_core_private_intc_init(void);
90 
91 /* the C entry of slave cores */
z_arc_slave_start(int cpu_num)92 void z_arc_slave_start(int cpu_num)
93 {
94 	arch_cpustart_t fn;
95 
96 #ifdef CONFIG_SMP
97 	struct arc_connect_bcr bcr;
98 
99 	bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
100 
101 	if (bcr.dbg) {
102 		/* configure inter-core debug unit if available */
103 		arc_connect_debug_mask_update(cpu_num);
104 	}
105 
106 	z_irq_setup();
107 
108 	arc_core_private_intc_init();
109 
110 	arc_irq_offload_init_smp();
111 
112 	z_arc_connect_ici_clear();
113 	z_irq_priority_set(DT_IRQN(DT_NODELABEL(ici)),
114 			   DT_IRQ(DT_NODELABEL(ici), priority), 0);
115 	irq_enable(DT_IRQN(DT_NODELABEL(ici)));
116 #endif
117 	/* call the function set by arch_start_cpu */
118 	fn = arc_cpu_init[cpu_num].fn;
119 
120 	fn(arc_cpu_init[cpu_num].arg);
121 }
122 
123 #ifdef CONFIG_SMP
124 
sched_ipi_handler(const void * unused)125 static void sched_ipi_handler(const void *unused)
126 {
127 	ARG_UNUSED(unused);
128 
129 	z_arc_connect_ici_clear();
130 	z_sched_ipi();
131 }
132 
133 /* arch implementation of sched_ipi */
arch_sched_ipi(void)134 void arch_sched_ipi(void)
135 {
136 	uint32_t i;
137 
138 	/* broadcast sched_ipi request to other cores
139 	 * if the target is current core, hardware will ignore it
140 	 */
141 	unsigned int num_cpus = arch_num_cpus();
142 
143 	for (i = 0U; i < num_cpus; i++) {
144 		z_arc_connect_ici_generate(i);
145 	}
146 }
147 
arc_smp_init(void)148 static int arc_smp_init(void)
149 {
150 	struct arc_connect_bcr bcr;
151 
152 	/* necessary master core init */
153 	_curr_cpu[0] = &(_kernel.cpus[0]);
154 
155 	bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
156 
157 	if (bcr.dbg) {
158 		/* configure inter-core debug unit if available */
159 		arc_connect_debug_mask_update(ARC_MP_PRIMARY_CPU_ID);
160 	}
161 
162 	if (bcr.ipi) {
163 	/* register ici interrupt, just need master core to register once */
164 		z_arc_connect_ici_clear();
165 		IRQ_CONNECT(DT_IRQN(DT_NODELABEL(ici)),
166 			    DT_IRQ(DT_NODELABEL(ici), priority),
167 			    sched_ipi_handler, NULL, 0);
168 
169 		irq_enable(DT_IRQN(DT_NODELABEL(ici)));
170 	} else {
171 		__ASSERT(0,
172 			"ARC connect has no inter-core interrupt\n");
173 		return -ENODEV;
174 	}
175 
176 	if (bcr.gfrc) {
177 		/* global free running count init */
178 		z_arc_connect_gfrc_enable();
179 
180 		/* when all cores halt, gfrc halt */
181 		z_arc_connect_gfrc_core_set((1 << arch_num_cpus()) - 1);
182 		z_arc_connect_gfrc_clear();
183 	} else {
184 		__ASSERT(0,
185 			"ARC connect has no global free running counter\n");
186 		return -ENODEV;
187 	}
188 
189 	return 0;
190 }
191 
192 SYS_INIT(arc_smp_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
193 #endif
194