1 /*
2  * Copyright (c) 2019 Synopsys.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief codes required for ARC multicore and Zephyr smp support
10  *
11  */
12 #include <zephyr/device.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/kernel_structs.h>
15 #include <ksched.h>
16 #include <ipi.h>
17 #include <zephyr/init.h>
18 #include <zephyr/irq.h>
19 #include <arc_irq_offload.h>
20 
21 volatile struct {
22 	arch_cpustart_t fn;
23 	void *arg;
24 } arc_cpu_init[CONFIG_MP_MAX_NUM_CPUS];
25 
26 /*
27  * arc_cpu_wake_flag is used to sync up master core and slave cores
28  * Slave core will spin for arc_cpu_wake_flag until master core sets
29  * it to the core id of slave core. Then, slave core clears it to notify
30  * master core that it's waken
31  *
32  */
33 volatile uint32_t arc_cpu_wake_flag;
34 
35 volatile char *arc_cpu_sp;
36 /*
37  * _curr_cpu is used to record the struct of _cpu_t of each cpu.
38  * for efficient usage in assembly
39  */
40 volatile _cpu_t *_curr_cpu[CONFIG_MP_MAX_NUM_CPUS];
41 
42 /* Called from Zephyr initialization */
arch_cpu_start(int cpu_num,k_thread_stack_t * stack,int sz,arch_cpustart_t fn,void * arg)43 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
44 		    arch_cpustart_t fn, void *arg)
45 {
46 	_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
47 	arc_cpu_init[cpu_num].fn = fn;
48 	arc_cpu_init[cpu_num].arg = arg;
49 
50 	/* set the initial sp of target sp through arc_cpu_sp
51 	 * arc_cpu_wake_flag will protect arc_cpu_sp that
52 	 * only one slave cpu can read it per time
53 	 */
54 	arc_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz;
55 
56 	arc_cpu_wake_flag = cpu_num;
57 
58 	/* wait slave cpu to start */
59 	while (arc_cpu_wake_flag != 0U) {
60 		;
61 	}
62 }
63 
64 #ifdef CONFIG_SMP
arc_connect_debug_mask_update(int cpu_num)65 static void arc_connect_debug_mask_update(int cpu_num)
66 {
67 	uint32_t core_mask = 1 << cpu_num;
68 
69 	/*
70 	 * MDB debugger may modify debug_select and debug_mask registers on start, so we can't
71 	 * rely on debug_select reset value.
72 	 */
73 	if (cpu_num != ARC_MP_PRIMARY_CPU_ID) {
74 		core_mask |= z_arc_connect_debug_select_read();
75 	}
76 
77 	z_arc_connect_debug_select_set(core_mask);
78 	/* Debugger halts cores at all conditions:
79 	 * ARC_CONNECT_CMD_DEBUG_MASK_H: Core global halt.
80 	 * ARC_CONNECT_CMD_DEBUG_MASK_AH: Actionpoint halt.
81 	 * ARC_CONNECT_CMD_DEBUG_MASK_BH: Software breakpoint halt.
82 	 * ARC_CONNECT_CMD_DEBUG_MASK_SH: Self halt.
83 	 */
84 	z_arc_connect_debug_mask_set(core_mask,	(ARC_CONNECT_CMD_DEBUG_MASK_SH
85 		| ARC_CONNECT_CMD_DEBUG_MASK_BH | ARC_CONNECT_CMD_DEBUG_MASK_AH
86 		| ARC_CONNECT_CMD_DEBUG_MASK_H));
87 }
88 #endif
89 
90 void arc_core_private_intc_init(void);
91 
92 /* the C entry of slave cores */
arch_secondary_cpu_init(int cpu_num)93 void arch_secondary_cpu_init(int cpu_num)
94 {
95 	arch_cpustart_t fn;
96 
97 #ifdef CONFIG_SMP
98 	struct arc_connect_bcr bcr;
99 
100 	bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
101 
102 	if (bcr.dbg) {
103 		/* configure inter-core debug unit if available */
104 		arc_connect_debug_mask_update(cpu_num);
105 	}
106 
107 	z_irq_setup();
108 
109 	arc_core_private_intc_init();
110 
111 	arc_irq_offload_init_smp();
112 
113 	z_arc_connect_ici_clear();
114 	z_irq_priority_set(DT_IRQN(DT_NODELABEL(ici)),
115 			   DT_IRQ(DT_NODELABEL(ici), priority), 0);
116 	irq_enable(DT_IRQN(DT_NODELABEL(ici)));
117 #endif
118 	/* call the function set by arch_cpu_start */
119 	fn = arc_cpu_init[cpu_num].fn;
120 
121 	fn(arc_cpu_init[cpu_num].arg);
122 }
123 
124 #ifdef CONFIG_SMP
125 
sched_ipi_handler(const void * unused)126 static void sched_ipi_handler(const void *unused)
127 {
128 	ARG_UNUSED(unused);
129 
130 	z_arc_connect_ici_clear();
131 	z_sched_ipi();
132 }
133 
arch_sched_directed_ipi(uint32_t cpu_bitmap)134 void arch_sched_directed_ipi(uint32_t cpu_bitmap)
135 {
136 	unsigned int i;
137 	unsigned int num_cpus = arch_num_cpus();
138 
139 	/* Send sched_ipi request to other cores
140 	 * if the target is current core, hardware will ignore it
141 	 */
142 
143 	for (i = 0U; i < num_cpus; i++) {
144 		if ((cpu_bitmap & BIT(i)) != 0) {
145 			z_arc_connect_ici_generate(i);
146 		}
147 	}
148 }
149 
arch_sched_broadcast_ipi(void)150 void arch_sched_broadcast_ipi(void)
151 {
152 	arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
153 }
154 
arch_smp_init(void)155 int arch_smp_init(void)
156 {
157 	struct arc_connect_bcr bcr;
158 
159 	/* necessary master core init */
160 	_curr_cpu[0] = &(_kernel.cpus[0]);
161 
162 	bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
163 
164 	if (bcr.dbg) {
165 		/* configure inter-core debug unit if available */
166 		arc_connect_debug_mask_update(ARC_MP_PRIMARY_CPU_ID);
167 	}
168 
169 	if (bcr.ipi) {
170 	/* register ici interrupt, just need master core to register once */
171 		z_arc_connect_ici_clear();
172 		IRQ_CONNECT(DT_IRQN(DT_NODELABEL(ici)),
173 			    DT_IRQ(DT_NODELABEL(ici), priority),
174 			    sched_ipi_handler, NULL, 0);
175 
176 		irq_enable(DT_IRQN(DT_NODELABEL(ici)));
177 	} else {
178 		__ASSERT(0,
179 			"ARC connect has no inter-core interrupt\n");
180 		return -ENODEV;
181 	}
182 
183 	if (bcr.gfrc) {
184 		/* global free running count init */
185 		z_arc_connect_gfrc_enable();
186 
187 		/* when all cores halt, gfrc halt */
188 		z_arc_connect_gfrc_core_set((1 << arch_num_cpus()) - 1);
189 		z_arc_connect_gfrc_clear();
190 	} else {
191 		__ASSERT(0,
192 			"ARC connect has no global free running counter\n");
193 		return -ENODEV;
194 	}
195 
196 	return 0;
197 }
198 #endif
199