1 /* Copyright (c) 2021 Intel Corporation
2 * SPDX-License-Identifier: Apache-2.0
3 */
4 #include <zephyr/kernel.h>
5 #include <cavs-idc.h>
6 #include <adsp_memory.h>
7 #include <adsp_shim.h>
8 #include <zephyr/irq.h>
9 #include <zephyr/pm/pm.h>
10 #include <zephyr/cache.h>
11 #include <ipi.h>
12
13 /* IDC power up message to the ROM firmware. This isn't documented
14 * anywhere, it's basically just a magic number (except the high bit,
15 * which signals the hardware)
16 */
17 #define IDC_MSG_POWER_UP \
18 (BIT(31) | /* Latch interrupt in ITC write */ \
19 (0x1 << 24) | /* "ROM control version" = 1 */ \
20 (0x2 << 0)) /* "Core wake version" = 2 */
21
22 #define IDC_CORE_MASK(num_cpus) (BIT(num_cpus) - 1)
23
soc_mp_startup(uint32_t cpu)24 __imr void soc_mp_startup(uint32_t cpu)
25 {
26 /* We got here via an IDC interrupt. Clear the TFC high bit
27 * (by writing a one!) to acknowledge and clear the latched
28 * hardware interrupt (so we don't have to service it as a
29 * spurious IPI when we enter user code). Remember: this
30 * could have come from any core, clear all of them.
31 */
32 unsigned int num_cpus = arch_num_cpus();
33
34 for (int i = 0; i < num_cpus; i++) {
35 IDC[cpu].core[i].tfc = BIT(31);
36 }
37
38 /* Interrupt must be enabled while running on current core */
39 irq_enable(DT_IRQN(INTEL_ADSP_IDC_DTNODE));
40
41 }
42
soc_start_core(int cpu_num)43 void soc_start_core(int cpu_num)
44 {
45 uint32_t curr_cpu = arch_proc_id();
46
47 /* On cAVS v2.5, MP startup works differently. The core has
48 * no ROM, and starts running immediately upon receipt of an
49 * IDC interrupt at the start of LPSRAM at 0xbe800000. Note
50 * that means we don't need to bother constructing a "message"
51 * below, it will be ignored. But it's left in place for
52 * simplicity and compatibility.
53 *
54 * All we need to do is place a single jump at that address to
55 * our existing MP entry point. Unfortunately Xtensa makes
56 * this difficult, as the region is beyond the range of a
57 * relative jump instruction, so we need an immediate, which
58 * can only be backwards-referenced. So we hand-assemble a
59 * tiny trampoline here ("jump over the immediate address,
60 * load it, jump to it").
61 *
62 * Long term we want to have this in linkable LP-SRAM memory
63 * such that the standard system bootstrap out of IMR can
64 * place it there. But this is fine for now.
65 */
66 void **lpsram = sys_cache_uncached_ptr_get(
67 (__sparse_force void __sparse_cache *)LP_SRAM_BASE);
68 uint8_t tramp[] = {
69 0x06, 0x01, 0x00, /* J <PC+8> (jump to L32R) */
70 0, /* (padding to align entry_addr) */
71 0, 0, 0, 0, /* (entry_addr goes here) */
72 0x01, 0xff, 0xff, /* L32R a0, <entry_addr> */
73 0xa0, 0x00, 0x00, /* JX a0 */
74 };
75
76 memcpy(lpsram, tramp, ARRAY_SIZE(tramp));
77 #if CONFIG_PM
78 extern void dsp_restore_vector(void);
79
80 /* We need to find out what type of booting is taking place here. Secondary cores
81 * can be disabled and enabled multiple times during runtime. During kernel
82 * initialization, the next pm state is set to ACTIVE. This way we can determine
83 * whether the core is being turned on again or for the first time.
84 */
85 if (pm_state_next_get(cpu_num)->state == PM_STATE_ACTIVE) {
86 lpsram[1] = z_soc_mp_asm_entry;
87 } else {
88 lpsram[1] = dsp_restore_vector;
89 }
90 #else
91 lpsram[1] = z_soc_mp_asm_entry;
92 #endif
93
94
95 /* Disable automatic power and clock gating for that CPU, so
96 * it won't just go back to sleep. Note that after startup,
97 * the cores are NOT power gated even if they're configured to
98 * be, so by default a core will launch successfully but then
99 * turn itself off when it gets to the WAITI instruction in
100 * the idle thread.
101 */
102 CAVS_SHIM.clkctl |= CAVS_CLKCTL_TCPLCG(cpu_num);
103 CAVS_SHIM.pwrctl |= CAVS_PWRCTL_TCPDSPPG(cpu_num);
104
105 /* We set the interrupt controller up already, but the ROM on
106 * some platforms will mess it up.
107 */
108 CAVS_INTCTRL[cpu_num].l2.clear = CAVS_L2_IDC;
109 unsigned int num_cpus = arch_num_cpus();
110
111 for (int c = 0; c < num_cpus; c++) {
112 IDC[c].busy_int |= IDC_CORE_MASK(num_cpus);
113 }
114
115 /* Send power-up message to the other core. Start address
116 * gets passed via the IETC scratch register (only 30 bits
117 * available, so it's sent shifted). The write to ITC
118 * triggers the interrupt, so that comes last.
119 */
120 uint32_t ietc = ((long)lpsram[1]) >> 2;
121
122 IDC[curr_cpu].core[cpu_num].ietc = ietc;
123 IDC[curr_cpu].core[cpu_num].itc = IDC_MSG_POWER_UP;
124 }
125
send_ipi(uint32_t cpu_bitmap)126 static void send_ipi(uint32_t cpu_bitmap)
127 {
128 uint32_t curr = arch_proc_id();
129 unsigned int num_cpus = arch_num_cpus();
130
131 for (int c = 0; c < num_cpus; c++) {
132 if ((c != curr) && soc_cpus_active[c] &&
133 ((cpu_bitmap & BIT(c)) != 0)) {
134 IDC[curr].core[c].itc = BIT(31);
135 }
136 }
137 }
138
arch_sched_broadcast_ipi(void)139 void arch_sched_broadcast_ipi(void)
140 {
141 send_ipi(IPI_ALL_CPUS_MASK);
142 }
143
arch_sched_directed_ipi(uint32_t cpu_bitmap)144 void arch_sched_directed_ipi(uint32_t cpu_bitmap)
145 {
146 send_ipi(cpu_bitmap);
147 }
148
idc_isr(const void * param)149 void idc_isr(const void *param)
150 {
151 ARG_UNUSED(param);
152
153 #ifdef CONFIG_SMP
154 /* Right now this interrupt is only used for IPIs */
155 z_sched_ipi();
156 #endif
157
158 /* ACK the interrupt to all the possible sources. This is a
159 * level-sensitive interrupt triggered by a logical OR of each
160 * of the ITC/TFC high bits, INCLUDING the one "from this
161 * CPU".
162 */
163
164 unsigned int num_cpus = arch_num_cpus();
165
166 for (int i = 0; i < num_cpus; i++) {
167 IDC[arch_proc_id()].core[i].tfc = BIT(31);
168 }
169 }
170
soc_mp_init(void)171 __imr void soc_mp_init(void)
172 {
173 IRQ_CONNECT(DT_IRQN(INTEL_ADSP_IDC_DTNODE), 0, idc_isr, NULL, 0);
174
175 /* Every CPU should be able to receive an IDC interrupt from
176 * every other CPU, but not to be back-interrupted when the
177 * target core clears the busy bit.
178 */
179 unsigned int num_cpus = arch_num_cpus();
180
181 for (int core = 0; core < num_cpus; core++) {
182 IDC[core].busy_int |= IDC_CORE_MASK(num_cpus);
183 IDC[core].done_int &= ~IDC_CORE_MASK(num_cpus);
184
185 /* Also unmask the IDC interrupt for every core in the
186 * L2 mask register.
187 */
188 CAVS_INTCTRL[core].l2.clear = CAVS_L2_IDC;
189 }
190
191 /* Clear out any existing pending interrupts that might be present */
192 for (int i = 0; i < num_cpus; i++) {
193 for (int j = 0; j < num_cpus; j++) {
194 IDC[i].core[j].tfc = BIT(31);
195 }
196 }
197
198 soc_cpus_active[0] = true;
199 }
200
soc_adsp_halt_cpu(int id)201 int soc_adsp_halt_cpu(int id)
202 {
203 unsigned int irq_mask;
204
205 if (id == 0 || id == arch_curr_cpu()->id) {
206 return -EINVAL;
207 }
208
209 irq_mask = CAVS_L2_IDC;
210
211 #ifdef CONFIG_INTEL_ADSP_TIMER
212 /*
213 * Mask timer interrupt for this CPU so it won't wake up
214 * by itself once WFI (wait for interrupt) instruction
215 * runs.
216 */
217 irq_mask |= CAVS_L2_DWCT0;
218 #endif
219
220 CAVS_INTCTRL[id].l2.set = irq_mask;
221
222 /* Stop sending IPIs to this core */
223 soc_cpus_active[id] = false;
224
225 /* Turn off the "prevent power/clock gating" bits, enabling
226 * low power idle
227 */
228 CAVS_SHIM.pwrctl &= ~CAVS_PWRCTL_TCPDSPPG(id);
229 CAVS_SHIM.clkctl &= ~CAVS_CLKCTL_TCPLCG(id);
230
231 /* If possible, wait for the other CPU to reach an idle state
232 * before returning. On older hardware this doesn't work
233 * because power is controlled by the host, so synchronization
234 * needs to be part of the application layer.
235 */
236 while ((CAVS_SHIM.pwrsts & CAVS_PWRSTS_PDSPPGS(id))) {
237 }
238 return 0;
239 }
240