1 /*
2  * Copyright (c) 2022 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/init.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/toolchain.h>
10 #include <zephyr/sys/check.h>
11 #include <zephyr/arch/cpu.h>
12 #include <zephyr/arch/xtensa/arch.h>
13 #include <zephyr/pm/pm.h>
14 #include <zephyr/pm/device_runtime.h>
15 
16 #include <soc.h>
17 #include <adsp_boot.h>
18 #include <adsp_power.h>
19 #include <adsp_ipc_regs.h>
20 #include <adsp_memory.h>
21 #include <adsp_interrupt.h>
22 #include <zephyr/irq.h>
23 #include <zephyr/cache.h>
24 #include <ipi.h>
25 
26 #define CORE_POWER_CHECK_NUM 128
27 
28 #define CPU_POWERUP_TIMEOUT_USEC 10000
29 
30 #ifdef CONFIG_XTENSA_MMU
31 #include <zephyr/arch/xtensa/xtensa_mmu.h>
32 #endif /* CONFIG_XTENSA_MMU */
33 
34 #define ACE_INTC_IRQ DT_IRQN(DT_NODELABEL(ace_intc))
35 
36 #ifdef CONFIG_XTENSA_MMU
37 #define IPI_TLB_FLUSH 0x01
38 #endif
39 
ipc_isr(void * arg)40 static void ipc_isr(void *arg)
41 {
42 	uint32_t cpu_id = arch_proc_id();
43 
44 #if defined(CONFIG_XTENSA_MMU) && (CONFIG_MP_MAX_NUM_CPUS > 1)
45 	uint32_t msg = IDC[cpu_id].agents[0].ipc.tdr & ~INTEL_ADSP_IPC_BUSY;
46 
47 	if (msg == IPI_TLB_FLUSH) {
48 		xtensa_mmu_tlb_shootdown();
49 	}
50 #endif
51 
52 	/*
53 	 * Clearing the BUSY bits in both TDR and TDA are needed to
54 	 * complete an IDC message. If we do only one (and not both),
55 	 * the other side will not be able to send another IDC
56 	 * message as the hardware still thinks you are processing
57 	 * the IDC message (and thus will not send another one).
58 	 * On TDR, it is to write one to clear, while on TDA, it is
59 	 * to write zero to clear.
60 	 */
61 	IDC[cpu_id].agents[0].ipc.tdr = INTEL_ADSP_IPC_BUSY;
62 	IDC[cpu_id].agents[0].ipc.tda = 0;
63 
64 #ifdef CONFIG_SMP
65 	void z_sched_ipi(void);
66 	z_sched_ipi();
67 #endif
68 }
69 
70 #define DFIDCCP			0x2020
71 #define CAP_INST_SHIFT		24
72 #define CAP_INST_MASK		BIT_MASK(4)
73 
74 unsigned int soc_num_cpus;
75 
soc_num_cpus_init(void)76 __imr void soc_num_cpus_init(void)
77 {
78 	/* Need to set soc_num_cpus early to arch_num_cpus() works properly */
79 	soc_num_cpus = ((sys_read32(DFIDCCP) >> CAP_INST_SHIFT) & CAP_INST_MASK) + 1;
80 	soc_num_cpus = MIN(CONFIG_MP_MAX_NUM_CPUS, soc_num_cpus);
81 
82 }
83 
soc_mp_init(void)84 void soc_mp_init(void)
85 {
86 #if defined(CONFIG_INTEL_ADSP_SIM_NO_SECONDARY_CORE_FLOW)
87 	/* BADDR stores the Xtensa LX7 AltResetVec input */
88 	for (int i = 0; i < soc_num_cpus; i++) {
89 		DSPCS.bootctl[i].baddr = (uint32_t)z_soc_mp_asm_entry;
90 	}
91 #endif
92 
93 	IRQ_CONNECT(ACE_IRQ_TO_ZEPHYR(ACE_INTL_IDCA), 0, ipc_isr, 0, 0);
94 
95 	irq_enable(ACE_IRQ_TO_ZEPHYR(ACE_INTL_IDCA));
96 
97 	unsigned int num_cpus = arch_num_cpus();
98 
99 	for (int i = 0; i < num_cpus; i++) {
100 		/* DINT has one bit per IPC, unmask only IPC "Ax" on core "x" */
101 		ACE_DINT[i].ie[ACE_INTL_IDCA] = BIT(i);
102 
103 		/* Agent A should signal only BUSY interrupts */
104 		IDC[i].agents[0].ipc.ctl = BIT(0); /* IPCTBIE */
105 	}
106 
107 	/* Set the core 0 active */
108 	soc_cpus_active[0] = true;
109 }
110 
host_runtime_get(void)111 static int host_runtime_get(void)
112 {
113 	return pm_device_runtime_get(INTEL_ADSP_HST_DOMAIN_DEV);
114 }
115 SYS_INIT(host_runtime_get, POST_KERNEL, 99);
116 
117 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
118 /*
119  * Called after exiting D3 state when context restore is enabled.
120  * Re-enables IDC interrupt again for all cores. Called once from core 0.
121  */
soc_mp_on_d3_exit(void)122 void soc_mp_on_d3_exit(void)
123 {
124 	soc_mp_init();
125 }
126 #endif
127 
soc_start_core(int cpu_num)128 void soc_start_core(int cpu_num)
129 {
130 #if !defined(CONFIG_INTEL_ADSP_SIM_NO_SECONDARY_CORE_FLOW)
131 	int retry = CORE_POWER_CHECK_NUM;
132 
133 	if (cpu_num > 0) {
134 		/* Initialize the ROM jump address */
135 		uint32_t *rom_jump_vector = (uint32_t *) ROM_JUMP_ADDR;
136 #if CONFIG_PM
137 		extern void dsp_restore_vector(void);
138 
139 		/* We need to find out what type of booting is taking place here. Secondary cores
140 		 * can be disabled and enabled multiple times during runtime. During kernel
141 		 * initialization, the next pm state is set to ACTIVE. This way we can determine
142 		 * whether the core is being turned on again or for the first time.
143 		 */
144 		if (pm_state_next_get(cpu_num)->state == PM_STATE_ACTIVE) {
145 			*rom_jump_vector = (uint32_t) z_soc_mp_asm_entry;
146 		} else {
147 			*rom_jump_vector = (uint32_t) dsp_restore_vector;
148 		}
149 #else
150 		*rom_jump_vector = (uint32_t) z_soc_mp_asm_entry;
151 #endif
152 
153 		sys_cache_data_flush_range(rom_jump_vector, sizeof(*rom_jump_vector));
154 		soc_cpu_power_up(cpu_num);
155 
156 		if (!WAIT_FOR(soc_cpu_is_powered(cpu_num),
157 			      CPU_POWERUP_TIMEOUT_USEC, k_busy_wait(HW_STATE_CHECK_DELAY))) {
158 			k_panic();
159 		}
160 
161 		/* Tell the ACE ROM that it should use secondary core flow */
162 		DSPCS.bootctl[cpu_num].battr |= DSPBR_BATTR_LPSCTL_BATTR_SLAVE_CORE;
163 	}
164 #endif /* !defined(CONFIG_INTEL_ADSP_SIM_NO_SECONDARY_CORE_FLOW) */
165 
166 	/* Setting the Power Active bit to the off state before powering up the core. This step is
167 	 * required by the HW if we are starting core for a second time. Without this sequence, the
168 	 * core will not power on properly when doing transition D0->D3->D0.
169 	 */
170 	DSPCS.capctl[cpu_num].ctl &= ~DSPCS_CTL_SPA;
171 
172 	/* Checking current power status of the core. */
173 	if (!WAIT_FOR((DSPCS.capctl[cpu_num].ctl & DSPCS_CTL_CPA) != DSPCS_CTL_CPA,
174 		      CPU_POWERUP_TIMEOUT_USEC, k_busy_wait(HW_STATE_CHECK_DELAY))) {
175 		k_panic();
176 	}
177 
178 	DSPCS.capctl[cpu_num].ctl |= DSPCS_CTL_SPA;
179 
180 #if !defined(CONFIG_INTEL_ADSP_SIM_NO_SECONDARY_CORE_FLOW)
181 	/* Waiting for power up */
182 	while (((DSPCS.capctl[cpu_num].ctl & DSPCS_CTL_CPA) != DSPCS_CTL_CPA) &&
183 	       (retry > 0)) {
184 		k_busy_wait(HW_STATE_CHECK_DELAY);
185 		retry--;
186 	}
187 
188 	if (retry == 0) {
189 		__ASSERT(false, "%s secondary core has not powered up", __func__);
190 	}
191 #endif /* !defined(CONFIG_INTEL_ADSP_SIM_NO_SECONDARY_CORE_FLOW) */
192 }
193 
soc_mp_startup(uint32_t cpu)194 void soc_mp_startup(uint32_t cpu)
195 {
196 #ifdef CONFIG_XTENSA_MMU
197 	xtensa_mmu_init();
198 #endif /* CONFIG_XTENSA_MMU */
199 
200 	/* Must have this enabled always */
201 	xtensa_irq_enable(ACE_INTC_IRQ);
202 
203 #if CONFIG_ADSP_IDLE_CLOCK_GATING
204 	/* Disable idle power gating */
205 	DSPCS.bootctl[cpu].bctl |= DSPBR_BCTL_WAITIPPG;
206 #else
207 	/* Disable idle power and clock gating */
208 	DSPCS.bootctl[cpu].bctl |= DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG;
209 #endif /* CONFIG_ADSP_IDLE_CLOCK_GATING */
210 }
211 
212 /**
213  * @brief Send a IPI to other processors.
214  *
215  * @note: Leave the MSB clear when passing @param msg.
216  *
217  * @param msg Message to be sent (31-bit integer).
218  */
219 #ifndef CONFIG_XTENSA_MMU
220 ALWAYS_INLINE
221 #endif
send_ipi(uint32_t msg,uint32_t cpu_bitmap)222 static void send_ipi(uint32_t msg, uint32_t cpu_bitmap)
223 {
224 	uint32_t curr = arch_proc_id();
225 
226 	/* Signal agent B[n] to cause an interrupt from agent A[n] */
227 	unsigned int num_cpus = arch_num_cpus();
228 
229 	for (int core = 0; core < num_cpus; core++) {
230 		if ((core != curr) && soc_cpus_active[core] &&
231 		    ((cpu_bitmap & BIT(core)) != 0)) {
232 			IDC[core].agents[1].ipc.idr = msg | INTEL_ADSP_IPC_BUSY;
233 		}
234 	}
235 }
236 
237 #if defined(CONFIG_XTENSA_MMU) && (CONFIG_MP_MAX_NUM_CPUS > 1)
xtensa_mmu_tlb_ipi(void)238 void xtensa_mmu_tlb_ipi(void)
239 {
240 	send_ipi(IPI_TLB_FLUSH, IPI_ALL_CPUS_MASK);
241 }
242 #endif
243 
arch_sched_broadcast_ipi(void)244 void arch_sched_broadcast_ipi(void)
245 {
246 	send_ipi(0, IPI_ALL_CPUS_MASK);
247 }
248 
arch_sched_directed_ipi(uint32_t cpu_bitmap)249 void arch_sched_directed_ipi(uint32_t cpu_bitmap)
250 {
251 	send_ipi(0, cpu_bitmap);
252 }
253 
254 #if CONFIG_MP_MAX_NUM_CPUS > 1
soc_adsp_halt_cpu(int id)255 int soc_adsp_halt_cpu(int id)
256 {
257 	int retry = CORE_POWER_CHECK_NUM;
258 
259 	CHECKIF(arch_proc_id() != 0) {
260 		return -EINVAL;
261 	}
262 
263 	CHECKIF(id <= 0 || id >= arch_num_cpus()) {
264 		return -EINVAL;
265 	}
266 
267 	CHECKIF(soc_cpus_active[id]) {
268 		return -EINVAL;
269 	}
270 
271 	DSPCS.capctl[id].ctl &= ~DSPCS_CTL_SPA;
272 
273 	/* Waiting for power off */
274 	while (((DSPCS.capctl[id].ctl & DSPCS_CTL_CPA) == DSPCS_CTL_CPA) &&
275 	       (retry > 0)) {
276 		k_busy_wait(HW_STATE_CHECK_DELAY);
277 		retry--;
278 	}
279 
280 	if (retry == 0) {
281 		__ASSERT(false, "%s secondary core has not powered down", __func__);
282 		return -EINVAL;
283 	}
284 
285 	soc_cpu_power_down(id);
286 	return 0;
287 }
288 #endif
289