1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <zephyr/pm/pm.h>
8 #include <zephyr/pm/device_runtime.h>
9 #include <zephyr/device.h>
10 #include <zephyr/debug/sparse.h>
11 #include <zephyr/cache.h>
12 #include <cpu_init.h>
13 #include <soc_util.h>
14 
15 #include <adsp_boot.h>
16 #include <adsp_power.h>
17 #include <adsp_memory.h>
18 #include <adsp_imr_layout.h>
19 #include <zephyr/drivers/mm/mm_drv_intel_adsp_mtl_tlb.h>
20 #include <zephyr/drivers/timer/system_timer.h>
21 #include <mem_window.h>
22 
23 #define LPSRAM_MAGIC_VALUE      0x13579BDF
24 #define LPSCTL_BATTR_MASK       GENMASK(16, 12)
25 
26 #if CONFIG_SOC_INTEL_ACE15_MTPM
27 /* Used to force any pending transaction by HW issuing an upstream read before
28  * power down host domain.
29  */
30 uint8_t adsp_pending_buffer[CONFIG_DCACHE_LINE_SIZE] __aligned(CONFIG_DCACHE_LINE_SIZE);
31 #endif /* CONFIG_SOC_INTEL_ACE15_MTPM */
32 
power_init(void)33 __imr void power_init(void)
34 {
35 #if CONFIG_ADSP_IDLE_CLOCK_GATING
36 	/* Disable idle power gating */
37 	DSPCS.bootctl[0].bctl |= DSPBR_BCTL_WAITIPPG;
38 #else
39 	/* Disable idle power and clock gating */
40 	DSPCS.bootctl[0].bctl |= DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG;
41 #endif /* CONFIG_ADSP_IDLE_CLOCK_GATING */
42 
43 #if CONFIG_SOC_INTEL_ACE15_MTPM
44 	*((__sparse_force uint32_t *)sys_cache_cached_ptr_get(&adsp_pending_buffer)) =
45 		INTEL_ADSP_ACE15_MAGIC_KEY;
46 	sys_cache_data_flush_range((__sparse_force void *)
47 			sys_cache_cached_ptr_get(&adsp_pending_buffer),
48 			sizeof(adsp_pending_buffer));
49 #endif /* CONFIG_SOC_INTEL_ACE15_MTPM */
50 }
51 
52 #ifdef CONFIG_PM
53 
54 #define L2_INTERRUPT_NUMBER     4
55 #define L2_INTERRUPT_MASK       (1<<L2_INTERRUPT_NUMBER)
56 
57 #define L3_INTERRUPT_NUMBER     6
58 #define L3_INTERRUPT_MASK       (1<<L3_INTERRUPT_NUMBER)
59 
60 #define ALL_USED_INT_LEVELS_MASK (L2_INTERRUPT_MASK | L3_INTERRUPT_MASK)
61 
62 #define CPU_POWERUP_TIMEOUT_USEC 10000
63 
64 /**
65  * @brief Power down procedure.
66  *
67  * Locks its code in L1 cache and shuts down memories.
68  * NOTE: there's no return from this function.
69  *
70  * @param disable_lpsram        flag if LPSRAM is to be disabled (whole)
71  * @param disable_hpsram        flag if HPSRAM is to be disabled (whole)
72  * @param response_to_ipc       flag if ipc response should be send during power down
73  */
74 void power_down(bool disable_lpsram, bool disable_hpsram, bool response_to_ipc);
75 
76 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
77 /**
78  *  @brief platform specific context restore procedure
79  *
80  *  Should be called when soc context restore is completed
81  */
82 extern void platform_context_restore(void);
83 
84 /*
85  * @brief pointer to a persistent storage space, to be set by platform code
86  */
87 uint8_t *global_imr_ram_storage;
88 
89 /*8
90  * @biref a d3 restore boot entry point
91  */
92 extern void boot_entry_d3_restore(void);
93 
94 /*
95  * @brief re-enables IDC interrupt for all cores after exiting D3 state
96  *
97  * Called once from core 0
98  */
99 extern void soc_mp_on_d3_exit(void);
100 
101 #else
102 
103 /*
104  * @biref FW entry point called by ROM during normal boot flow
105  */
106 extern void rom_entry(void);
107 
108 #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
109 
110 /* NOTE: This struct will grow with all values that have to be stored for
111  * proper cpu restore after PG.
112  */
113 struct core_state {
114 	uint32_t a0;
115 	uint32_t a1;
116 	uint32_t vecbase;
117 	uint32_t excsave2;
118 	uint32_t excsave3;
119 	uint32_t thread_ptr;
120 	uint32_t intenable;
121 	uint32_t ps;
122 	uint32_t bctl;
123 #if (XCHAL_NUM_MISC_REGS == 2)
124 	uint32_t misc[XCHAL_NUM_MISC_REGS];
125 #endif
126 };
127 
128 static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = {{0}};
129 
130 struct lpsram_header {
131 	uint32_t alt_reset_vector;
132 	uint32_t adsp_lpsram_magic;
133 	void *lp_restore_vector;
134 	uint32_t reserved;
135 	uint32_t slave_core_vector;
136 	uint8_t rom_bypass_vectors_reserved[0xC00 - 0x14];
137 };
138 
_save_core_context(uint32_t core_id)139 static ALWAYS_INLINE void _save_core_context(uint32_t core_id)
140 {
141 	core_desc[core_id].ps = XTENSA_RSR("PS");
142 	core_desc[core_id].vecbase = XTENSA_RSR("VECBASE");
143 	core_desc[core_id].excsave2 = XTENSA_RSR("EXCSAVE2");
144 	core_desc[core_id].excsave3 = XTENSA_RSR("EXCSAVE3");
145 	core_desc[core_id].thread_ptr = XTENSA_RUR("THREADPTR");
146 #if (XCHAL_NUM_MISC_REGS == 2)
147 	core_desc[core_id].misc[0] = XTENSA_RSR("MISC0");
148 	core_desc[core_id].misc[1] = XTENSA_RSR("MISC1");
149 #endif
150 	__asm__ volatile("mov %0, a0" : "=r"(core_desc[core_id].a0));
151 	__asm__ volatile("mov %0, a1" : "=r"(core_desc[core_id].a1));
152 
153 #if CONFIG_MP_MAX_NUM_CPUS == 1
154 	/* With one core only, the memory is mapped in cache and we need to flush
155 	 * it.
156 	 */
157 	sys_cache_data_flush_range(&core_desc[core_id], sizeof(struct core_state));
158 #endif
159 }
160 
_restore_core_context(void)161 static ALWAYS_INLINE void _restore_core_context(void)
162 {
163 	uint32_t core_id = arch_proc_id();
164 
165 	XTENSA_WSR("PS", core_desc[core_id].ps);
166 	XTENSA_WSR("VECBASE", core_desc[core_id].vecbase);
167 	XTENSA_WSR("EXCSAVE2", core_desc[core_id].excsave2);
168 	XTENSA_WSR("EXCSAVE3", core_desc[core_id].excsave3);
169 	XTENSA_WUR("THREADPTR", core_desc[core_id].thread_ptr);
170 #if (XCHAL_NUM_MISC_REGS == 2)
171 	XTENSA_WSR("MISC0", core_desc[core_id].misc[0]);
172 	XTENSA_WSR("MISC1", core_desc[core_id].misc[1]);
173 #endif
174 #ifdef CONFIG_XTENSA_MMU
175 	xtensa_mmu_reinit();
176 #endif
177 	__asm__ volatile("mov a0, %0" :: "r"(core_desc[core_id].a0));
178 	__asm__ volatile("mov a1, %0" :: "r"(core_desc[core_id].a1));
179 	__asm__ volatile("rsync");
180 }
181 
182 void dsp_restore_vector(void);
183 void mp_resume_entry(void);
184 
power_gate_entry(uint32_t core_id)185 void power_gate_entry(uint32_t core_id)
186 {
187 	xthal_window_spill();
188 	sys_cache_data_flush_and_invd_all();
189 	_save_core_context(core_id);
190 	if (core_id == 0) {
191 		struct lpsram_header *lpsheader =
192 			(struct lpsram_header *) DT_REG_ADDR(DT_NODELABEL(sram1));
193 
194 		lpsheader->adsp_lpsram_magic = LPSRAM_MAGIC_VALUE;
195 		lpsheader->lp_restore_vector = &dsp_restore_vector;
196 		sys_cache_data_flush_range(lpsheader, sizeof(struct lpsram_header));
197 		/* Re-enabling interrupts for core 0 because someone has to wake-up us
198 		 * from power gaiting.
199 		 */
200 		z_xt_ints_on(ALL_USED_INT_LEVELS_MASK);
201 	}
202 
203 	soc_cpus_active[core_id] = false;
204 	k_cpu_idle();
205 
206 	/* It is unlikely we get in here, but when this happens
207 	 * we need to lock interruptions again.
208 	 *
209 	 * @note Zephyr looks PS.INTLEVEL to check if interruptions are locked.
210 	 */
211 	(void)arch_irq_lock();
212 	z_xt_ints_off(0xffffffff);
213 }
214 
power_gate_exit(void)215 static void __used power_gate_exit(void)
216 {
217 	cpu_early_init();
218 	sys_cache_data_flush_and_invd_all();
219 	_restore_core_context();
220 
221 	/* Secondary core is resumed by set_dx */
222 	if (arch_proc_id()) {
223 		mp_resume_entry();
224 	}
225 }
226 
227 __asm__(".align 4\n\t"
228 	".global dsp_restore_vector\n\t"
229 	"dsp_restore_vector:\n\t"
230 	"  movi  a0, 0\n\t"
231 	"  movi  a1, 1\n\t"
232 	"  movi  a2, " STRINGIFY(PS_UM | PS_WOE | PS_INTLEVEL(XCHAL_EXCM_LEVEL)) "\n\t"
233 	"  wsr   a2, PS\n\t"
234 	"  wsr   a1, WINDOWSTART\n\t"
235 	"  wsr   a0, WINDOWBASE\n\t"
236 	"  rsync\n\t"
237 	"  movi  a1, z_interrupt_stacks\n\t"
238 	"  rsr   a2, PRID\n\t"
239 	"  movi  a3, " STRINGIFY(CONFIG_ISR_STACK_SIZE) "\n\t"
240 	"  mull  a2, a2, a3\n\t"
241 	"  add   a2, a2, a3\n\t"
242 	"  add   a1, a1, a2\n\t"
243 	"  call0 power_gate_exit\n\t");
244 
245 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
power_off_exit(void)246 static ALWAYS_INLINE void power_off_exit(void)
247 {
248 	__asm__(
249 		"  movi  a0, 0\n\t"
250 		"  movi  a1, 1\n\t"
251 		"  movi  a2, " STRINGIFY(PS_UM | PS_WOE | PS_INTLEVEL(XCHAL_EXCM_LEVEL)) "\n\t"
252 		"  wsr   a2, PS\n\t"
253 		"  wsr   a1, WINDOWSTART\n\t"
254 		"  wsr   a0, WINDOWBASE\n\t"
255 		"  rsync\n\t");
256 	_restore_core_context();
257 }
258 
pm_state_imr_restore(void)259 __imr void pm_state_imr_restore(void)
260 {
261 	struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
262 	/* restore lpsram power and contents */
263 	bmemcpy(sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)
264 				   UINT_TO_POINTER(LP_SRAM_BASE)),
265 		imr_layout->imr_state.header.imr_ram_storage,
266 		LP_SRAM_SIZE);
267 
268 	/* restore HPSRAM contents, mapping and power states */
269 	adsp_mm_restore_context(imr_layout->imr_state.header.imr_ram_storage+LP_SRAM_SIZE);
270 
271 	/* this function won't return, it will restore a saved state */
272 	power_off_exit();
273 }
274 #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
275 
pm_state_set(enum pm_state state,uint8_t substate_id)276 void pm_state_set(enum pm_state state, uint8_t substate_id)
277 {
278 	ARG_UNUSED(substate_id);
279 	uint32_t cpu = arch_proc_id();
280 	uint32_t battr;
281 	int ret;
282 
283 	ARG_UNUSED(ret);
284 
285 	/* save interrupt state and turn off all interrupts */
286 	core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
287 	z_xt_ints_off(0xffffffff);
288 
289 	switch (state) {
290 	case PM_STATE_SOFT_OFF:
291 		core_desc[cpu].bctl = DSPCS.bootctl[cpu].bctl;
292 		DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG;
293 		if (cpu == 0) {
294 			soc_cpus_active[cpu] = false;
295 			ret = pm_device_runtime_put(INTEL_ADSP_HST_DOMAIN_DEV);
296 			__ASSERT_NO_MSG(ret == 0);
297 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
298 			/* save storage and restore information to imr */
299 			__ASSERT_NO_MSG(global_imr_ram_storage != NULL);
300 #endif
301 			struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
302 
303 			imr_layout->imr_state.header.adsp_imr_magic = ADSP_IMR_MAGIC_VALUE;
304 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
305 			sys_cache_data_flush_and_invd_all();
306 			imr_layout->imr_state.header.imr_restore_vector =
307 					(void *)boot_entry_d3_restore;
308 			imr_layout->imr_state.header.imr_ram_storage = global_imr_ram_storage;
309 			sys_cache_data_flush_range((void *)imr_layout, sizeof(*imr_layout));
310 
311 			/* save CPU context here
312 			 * when _restore_core_context() is called, it will return directly to
313 			 * the caller of this procedure
314 			 * any changes to CPU context after _save_core_context
315 			 * will be lost when power_down is executed
316 			 * Only data in the imr region survives
317 			 */
318 			xthal_window_spill();
319 			_save_core_context(cpu);
320 
321 			/* save LPSRAM - a simple copy */
322 			memcpy(global_imr_ram_storage, (void *)LP_SRAM_BASE, LP_SRAM_SIZE);
323 
324 			/* save HPSRAM - a multi step procedure, executed by a TLB driver
325 			 * the TLB driver will change memory mapping
326 			 * leaving the system not operational
327 			 * it must be called directly here,
328 			 * just before power_down
329 			 */
330 			const struct device *tlb_dev = DEVICE_DT_GET(DT_NODELABEL(tlb));
331 
332 			__ASSERT_NO_MSG(tlb_dev != NULL);
333 			const struct intel_adsp_tlb_api *tlb_api =
334 					(struct intel_adsp_tlb_api *)tlb_dev->api;
335 
336 			tlb_api->save_context(global_imr_ram_storage+LP_SRAM_SIZE);
337 #else
338 			imr_layout->imr_state.header.imr_restore_vector =
339 					(void *)rom_entry;
340 			sys_cache_data_flush_range((void *)imr_layout, sizeof(*imr_layout));
341 #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
342 			/* do power down - this function won't return */
343 			power_down(true, CONFIG_ADSP_POWER_DOWN_HPSRAM, true);
344 		} else {
345 			power_gate_entry(cpu);
346 		}
347 		break;
348 
349 	/* Only core 0 handles this state */
350 	case PM_STATE_RUNTIME_IDLE:
351 		battr = DSPCS.bootctl[cpu].battr & (~LPSCTL_BATTR_MASK);
352 
353 		DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPPG;
354 		DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG;
355 		soc_cpu_power_down(cpu);
356 		battr |= (DSPBR_BATTR_LPSCTL_RESTORE_BOOT & LPSCTL_BATTR_MASK);
357 		DSPCS.bootctl[cpu].battr = battr;
358 
359 		ret = pm_device_runtime_put(INTEL_ADSP_HST_DOMAIN_DEV);
360 		__ASSERT_NO_MSG(ret == 0);
361 
362 		power_gate_entry(cpu);
363 		break;
364 	default:
365 		__ASSERT(false, "invalid argument - unsupported power state");
366 	}
367 }
368 
369 /* Handle SOC specific activity after Low Power Mode Exit */
pm_state_exit_post_ops(enum pm_state state,uint8_t substate_id)370 void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
371 {
372 	ARG_UNUSED(substate_id);
373 	uint32_t cpu = arch_proc_id();
374 
375 	if (cpu == 0) {
376 		int ret = pm_device_runtime_get(INTEL_ADSP_HST_DOMAIN_DEV);
377 
378 		ARG_UNUSED(ret);
379 		__ASSERT_NO_MSG(ret == 0);
380 	}
381 
382 	if (state == PM_STATE_SOFT_OFF) {
383 		/* restore clock gating state */
384 		DSPCS.bootctl[cpu].bctl |=
385 			(core_desc[cpu].bctl & DSPBR_BCTL_WAITIPCG);
386 
387 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
388 		if (cpu == 0) {
389 			struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
390 
391 			/* clean storage and restore information */
392 			sys_cache_data_invd_range(imr_layout, sizeof(*imr_layout));
393 			imr_layout->imr_state.header.adsp_imr_magic = 0;
394 			imr_layout->imr_state.header.imr_restore_vector = NULL;
395 			imr_layout->imr_state.header.imr_ram_storage = NULL;
396 			intel_adsp_clock_soft_off_exit();
397 			mem_window_idle_exit();
398 			soc_mp_on_d3_exit();
399 		}
400 #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
401 		soc_cpus_active[cpu] = true;
402 		sys_cache_data_flush_and_invd_all();
403 	} else if (state == PM_STATE_RUNTIME_IDLE) {
404 		soc_cpu_power_up(cpu);
405 
406 		if (!WAIT_FOR(soc_cpu_is_powered(cpu),
407 			      CPU_POWERUP_TIMEOUT_USEC, k_busy_wait(HW_STATE_CHECK_DELAY))) {
408 			k_panic();
409 		}
410 
411 #if CONFIG_ADSP_IDLE_CLOCK_GATING
412 		DSPCS.bootctl[cpu].bctl |= DSPBR_BCTL_WAITIPPG;
413 #else
414 		DSPCS.bootctl[cpu].bctl |= DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG;
415 #endif /* CONFIG_ADSP_IDLE_CLOCK_GATING */
416 		DSPCS.bootctl[cpu].battr &= (~LPSCTL_BATTR_MASK);
417 
418 		soc_cpus_active[cpu] = true;
419 		sys_cache_data_flush_and_invd_all();
420 	} else {
421 		__ASSERT(false, "invalid argument - unsupported power state");
422 	}
423 
424 	z_xt_ints_on(core_desc[cpu].intenable);
425 
426 	/* We don't have the key used to lock interruptions here.
427 	 * Just set PS.INTLEVEL to 0.
428 	 */
429 	__asm__ volatile ("rsil a2, 0");
430 }
431 
432 #endif /* CONFIG_PM */
433 
434 #ifdef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
435 
436 __no_optimization
arch_cpu_idle(void)437 void arch_cpu_idle(void)
438 {
439 	uint32_t cpu = arch_proc_id();
440 
441 	sys_trace_idle();
442 
443 	/*
444 	 * unlock and invalidate icache if clock gating is allowed
445 	 */
446 	if (!(DSPCS.bootctl[cpu].bctl & DSPBR_BCTL_WAITIPCG)) {
447 		xthal_icache_all_unlock();
448 		xthal_icache_all_invalidate();
449 	}
450 
451 	__asm__ volatile ("waiti 0");
452 }
453 
454 #endif /* CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE */
455