1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <zephyr/pm/pm.h>
8 #include <zephyr/pm/device_runtime.h>
9 #include <zephyr/device.h>
10 #include <zephyr/debug/sparse.h>
11 #include <zephyr/cache.h>
12 #include <cpu_init.h>
13 #include <soc_util.h>
14 
15 #include <adsp_boot.h>
16 #include <adsp_power.h>
17 #include <adsp_memory.h>
18 #include <adsp_imr_layout.h>
19 #include <zephyr/drivers/mm/mm_drv_intel_adsp_mtl_tlb.h>
20 #include <zephyr/drivers/timer/system_timer.h>
21 #include <mem_window.h>
22 
23 #define LPSRAM_MAGIC_VALUE      0x13579BDF
24 #define LPSCTL_BATTR_MASK       GENMASK(16, 12)
25 
26 #if CONFIG_SOC_INTEL_ACE15_MTPM
27 /* Used to force any pending transaction by HW issuing an upstream read before
28  * power down host domain.
29  */
30 uint8_t adsp_pending_buffer[CONFIG_DCACHE_LINE_SIZE] __aligned(CONFIG_DCACHE_LINE_SIZE);
31 #endif /* CONFIG_SOC_INTEL_ACE15_MTPM */
32 
power_init(void)33 __imr void power_init(void)
34 {
35 #if CONFIG_ADSP_IDLE_CLOCK_GATING
36 	/* Disable idle power gating */
37 	DSPCS.bootctl[0].bctl |= DSPBR_BCTL_WAITIPPG;
38 #else
39 	/* Disable idle power and clock gating */
40 	DSPCS.bootctl[0].bctl |= DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG;
41 #endif /* CONFIG_ADSP_IDLE_CLOCK_GATING */
42 
43 #if CONFIG_SOC_INTEL_ACE15_MTPM
44 	*((__sparse_force uint32_t *)sys_cache_cached_ptr_get(&adsp_pending_buffer)) =
45 		INTEL_ADSP_ACE15_MAGIC_KEY;
46 	cache_data_flush_range((__sparse_force void *)
47 			sys_cache_cached_ptr_get(&adsp_pending_buffer),
48 			sizeof(adsp_pending_buffer));
49 #endif /* CONFIG_SOC_INTEL_ACE15_MTPM */
50 }
51 
52 #ifdef CONFIG_PM
53 
54 #define L2_INTERRUPT_NUMBER     4
55 #define L2_INTERRUPT_MASK       (1<<L2_INTERRUPT_NUMBER)
56 
57 #define L3_INTERRUPT_NUMBER     6
58 #define L3_INTERRUPT_MASK       (1<<L3_INTERRUPT_NUMBER)
59 
60 #define ALL_USED_INT_LEVELS_MASK (L2_INTERRUPT_MASK | L3_INTERRUPT_MASK)
61 
62 #define CPU_POWERUP_TIMEOUT_USEC 10000
63 
64 /**
65  * @brief Power down procedure.
66  *
67  * Locks its code in L1 cache and shuts down memories.
68  * NOTE: there's no return from this function.
69  *
70  * @param disable_lpsram        flag if LPSRAM is to be disabled (whole)
71  * @param hpsram_pg_mask pointer to memory segments power gating mask
72  * (each bit corresponds to one ebb)
73  * @param response_to_ipc       flag if ipc response should be send during power down
74  */
75 extern void power_down(bool disable_lpsram, uint32_t __sparse_cache * hpsram_pg_mask,
76 		       bool response_to_ipc);
77 
78 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
79 /**
80  *  @brief platform specific context restore procedure
81  *
82  *  Should be called when soc context restore is completed
83  */
84 extern void platform_context_restore(void);
85 
86 /*
87  * @brief pointer to a persistent storage space, to be set by platform code
88  */
89 uint8_t *global_imr_ram_storage;
90 
91 /*8
92  * @biref a d3 restore boot entry point
93  */
94 extern void boot_entry_d3_restore(void);
95 
96 /*
97  * @brief re-enables IDC interrupt for all cores after exiting D3 state
98  *
99  * Called once from core 0
100  */
101 extern void soc_mp_on_d3_exit(void);
102 
103 #else
104 
105 /*
106  * @biref FW entry point called by ROM during normal boot flow
107  */
108 extern void rom_entry(void);
109 
110 #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
111 
112 /* NOTE: This struct will grow with all values that have to be stored for
113  * proper cpu restore after PG.
114  */
115 struct core_state {
116 	uint32_t a0;
117 	uint32_t a1;
118 	uint32_t vecbase;
119 	uint32_t excsave2;
120 	uint32_t excsave3;
121 	uint32_t thread_ptr;
122 	uint32_t intenable;
123 	uint32_t ps;
124 	uint32_t bctl;
125 #if (XCHAL_NUM_MISC_REGS == 2)
126 	uint32_t misc[XCHAL_NUM_MISC_REGS];
127 #endif
128 };
129 
130 static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = {{0}};
131 
132 struct lpsram_header {
133 	uint32_t alt_reset_vector;
134 	uint32_t adsp_lpsram_magic;
135 	void *lp_restore_vector;
136 	uint32_t reserved;
137 	uint32_t slave_core_vector;
138 	uint8_t rom_bypass_vectors_reserved[0xC00 - 0x14];
139 };
140 
_save_core_context(uint32_t core_id)141 static ALWAYS_INLINE void _save_core_context(uint32_t core_id)
142 {
143 	core_desc[core_id].ps = XTENSA_RSR("PS");
144 	core_desc[core_id].vecbase = XTENSA_RSR("VECBASE");
145 	core_desc[core_id].excsave2 = XTENSA_RSR("EXCSAVE2");
146 	core_desc[core_id].excsave3 = XTENSA_RSR("EXCSAVE3");
147 	core_desc[core_id].thread_ptr = XTENSA_RUR("THREADPTR");
148 #if (XCHAL_NUM_MISC_REGS == 2)
149 	core_desc[core_id].misc[0] = XTENSA_RSR("MISC0");
150 	core_desc[core_id].misc[1] = XTENSA_RSR("MISC1");
151 #endif
152 	__asm__ volatile("mov %0, a0" : "=r"(core_desc[core_id].a0));
153 	__asm__ volatile("mov %0, a1" : "=r"(core_desc[core_id].a1));
154 
155 #if CONFIG_MP_MAX_NUM_CPUS == 1
156 	/* With one core only, the memory is mapped in cache and we need to flush
157 	 * it.
158 	 */
159 	sys_cache_data_flush_range(&core_desc[core_id], sizeof(struct core_state));
160 #endif
161 }
162 
_restore_core_context(void)163 static ALWAYS_INLINE void _restore_core_context(void)
164 {
165 	uint32_t core_id = arch_proc_id();
166 
167 	XTENSA_WSR("PS", core_desc[core_id].ps);
168 	XTENSA_WSR("VECBASE", core_desc[core_id].vecbase);
169 	XTENSA_WSR("EXCSAVE2", core_desc[core_id].excsave2);
170 	XTENSA_WSR("EXCSAVE3", core_desc[core_id].excsave3);
171 	XTENSA_WUR("THREADPTR", core_desc[core_id].thread_ptr);
172 #if (XCHAL_NUM_MISC_REGS == 2)
173 	XTENSA_WSR("MISC0", core_desc[core_id].misc[0]);
174 	XTENSA_WSR("MISC1", core_desc[core_id].misc[1]);
175 #endif
176 #ifdef CONFIG_XTENSA_MMU
177 	xtensa_mmu_reinit();
178 #endif
179 	__asm__ volatile("mov a0, %0" :: "r"(core_desc[core_id].a0));
180 	__asm__ volatile("mov a1, %0" :: "r"(core_desc[core_id].a1));
181 	__asm__ volatile("rsync");
182 }
183 
184 void dsp_restore_vector(void);
185 void mp_resume_entry(void);
186 
power_gate_entry(uint32_t core_id)187 void power_gate_entry(uint32_t core_id)
188 {
189 	xthal_window_spill();
190 	sys_cache_data_flush_and_invd_all();
191 	_save_core_context(core_id);
192 	if (core_id == 0) {
193 		struct lpsram_header *lpsheader =
194 			(struct lpsram_header *) DT_REG_ADDR(DT_NODELABEL(sram1));
195 
196 		lpsheader->adsp_lpsram_magic = LPSRAM_MAGIC_VALUE;
197 		lpsheader->lp_restore_vector = &dsp_restore_vector;
198 		sys_cache_data_flush_range(lpsheader, sizeof(struct lpsram_header));
199 		/* Re-enabling interrupts for core 0 because someone has to wake-up us
200 		 * from power gaiting.
201 		 */
202 		z_xt_ints_on(ALL_USED_INT_LEVELS_MASK);
203 	}
204 
205 	soc_cpus_active[core_id] = false;
206 	k_cpu_idle();
207 
208 	/* It is unlikely we get in here, but when this happens
209 	 * we need to lock interruptions again.
210 	 *
211 	 * @note Zephyr looks PS.INTLEVEL to check if interruptions are locked.
212 	 */
213 	(void)arch_irq_lock();
214 	z_xt_ints_off(0xffffffff);
215 }
216 
power_gate_exit(void)217 static void __used power_gate_exit(void)
218 {
219 	cpu_early_init();
220 	sys_cache_data_flush_and_invd_all();
221 	_restore_core_context();
222 
223 	/* Secondary core is resumed by set_dx */
224 	if (arch_proc_id()) {
225 		mp_resume_entry();
226 	}
227 }
228 
229 __asm__(".align 4\n\t"
230 	".global dsp_restore_vector\n\t"
231 	"dsp_restore_vector:\n\t"
232 	"  movi  a0, 0\n\t"
233 	"  movi  a1, 1\n\t"
234 	"  movi  a2, " STRINGIFY(PS_UM | PS_WOE | PS_INTLEVEL(XCHAL_EXCM_LEVEL)) "\n\t"
235 	"  wsr   a2, PS\n\t"
236 	"  wsr   a1, WINDOWSTART\n\t"
237 	"  wsr   a0, WINDOWBASE\n\t"
238 	"  rsync\n\t"
239 	"  movi  a1, z_interrupt_stacks\n\t"
240 	"  rsr   a2, PRID\n\t"
241 	"  movi  a3, " STRINGIFY(CONFIG_ISR_STACK_SIZE) "\n\t"
242 	"  mull  a2, a2, a3\n\t"
243 	"  add   a2, a2, a3\n\t"
244 	"  add   a1, a1, a2\n\t"
245 	"  call0 power_gate_exit\n\t");
246 
247 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
power_off_exit(void)248 static ALWAYS_INLINE void power_off_exit(void)
249 {
250 	__asm__(
251 		"  movi  a0, 0\n\t"
252 		"  movi  a1, 1\n\t"
253 		"  movi  a2, " STRINGIFY(PS_UM | PS_WOE | PS_INTLEVEL(XCHAL_EXCM_LEVEL)) "\n\t"
254 		"  wsr   a2, PS\n\t"
255 		"  wsr   a1, WINDOWSTART\n\t"
256 		"  wsr   a0, WINDOWBASE\n\t"
257 		"  rsync\n\t");
258 	_restore_core_context();
259 }
260 
pm_state_imr_restore(void)261 __imr void pm_state_imr_restore(void)
262 {
263 	struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
264 	/* restore lpsram power and contents */
265 	bmemcpy(sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)
266 				   UINT_TO_POINTER(LP_SRAM_BASE)),
267 		imr_layout->imr_state.header.imr_ram_storage,
268 		LP_SRAM_SIZE);
269 
270 	/* restore HPSRAM contents, mapping and power states */
271 	adsp_mm_restore_context(imr_layout->imr_state.header.imr_ram_storage+LP_SRAM_SIZE);
272 
273 	/* this function won't return, it will restore a saved state */
274 	power_off_exit();
275 }
276 #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
277 
pm_state_set(enum pm_state state,uint8_t substate_id)278 void pm_state_set(enum pm_state state, uint8_t substate_id)
279 {
280 	ARG_UNUSED(substate_id);
281 	uint32_t cpu = arch_proc_id();
282 	uint32_t battr;
283 	int ret;
284 
285 	ARG_UNUSED(ret);
286 
287 	/* save interrupt state and turn off all interrupts */
288 	core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
289 	z_xt_ints_off(0xffffffff);
290 
291 	switch (state) {
292 	case PM_STATE_SOFT_OFF:
293 		core_desc[cpu].bctl = DSPCS.bootctl[cpu].bctl;
294 		DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG;
295 		if (cpu == 0) {
296 			soc_cpus_active[cpu] = false;
297 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
298 			/* save storage and restore information to imr */
299 			__ASSERT_NO_MSG(global_imr_ram_storage != NULL);
300 #endif
301 			struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
302 
303 			imr_layout->imr_state.header.adsp_imr_magic = ADSP_IMR_MAGIC_VALUE;
304 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
305 			sys_cache_data_flush_and_invd_all();
306 			imr_layout->imr_state.header.imr_restore_vector =
307 					(void *)boot_entry_d3_restore;
308 			imr_layout->imr_state.header.imr_ram_storage = global_imr_ram_storage;
309 			sys_cache_data_flush_range((void *)imr_layout, sizeof(*imr_layout));
310 
311 			/* save CPU context here
312 			 * when _restore_core_context() is called, it will return directly to
313 			 * the caller of this procedure
314 			 * any changes to CPU context after _save_core_context
315 			 * will be lost when power_down is executed
316 			 * Only data in the imr region survives
317 			 */
318 			xthal_window_spill();
319 			_save_core_context(cpu);
320 
321 			/* save LPSRAM - a simple copy */
322 			memcpy(global_imr_ram_storage, (void *)LP_SRAM_BASE, LP_SRAM_SIZE);
323 
324 			/* save HPSRAM - a multi step procedure, executed by a TLB driver
325 			 * the TLB driver will change memory mapping
326 			 * leaving the system not operational
327 			 * it must be called directly here,
328 			 * just before power_down
329 			 */
330 			const struct device *tlb_dev = DEVICE_DT_GET(DT_NODELABEL(tlb));
331 
332 			__ASSERT_NO_MSG(tlb_dev != NULL);
333 			const struct intel_adsp_tlb_api *tlb_api =
334 					(struct intel_adsp_tlb_api *)tlb_dev->api;
335 
336 			tlb_api->save_context(global_imr_ram_storage+LP_SRAM_SIZE);
337 #else
338 			imr_layout->imr_state.header.imr_restore_vector =
339 					(void *)rom_entry;
340 			sys_cache_data_flush_range((void *)imr_layout, sizeof(*imr_layout));
341 #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
342 #ifdef CONFIG_ADSP_POWER_DOWN_HPSRAM
343 			/* This assumes a single HPSRAM segment */
344 			static uint32_t hpsram_mask;
345 			/* turn off all HPSRAM banks - get a full bitmap */
346 			uint32_t ebb_banks = ace_hpsram_get_bank_count();
347 			hpsram_mask = (1 << ebb_banks) - 1;
348 #define HPSRAM_MASK_ADDR sys_cache_cached_ptr_get(&hpsram_mask)
349 #else
350 #define HPSRAM_MASK_ADDR NULL
351 #endif /* CONFIG_ADSP_POWER_DOWN_HPSRAM */
352 			/* do power down - this function won't return */
353 			ret = pm_device_runtime_put(INTEL_ADSP_HST_DOMAIN_DEV);
354 			__ASSERT_NO_MSG(ret == 0);
355 			power_down(true, HPSRAM_MASK_ADDR, true);
356 		} else {
357 			power_gate_entry(cpu);
358 		}
359 		break;
360 
361 	/* Only core 0 handles this state */
362 	case PM_STATE_RUNTIME_IDLE:
363 		battr = DSPCS.bootctl[cpu].battr & (~LPSCTL_BATTR_MASK);
364 
365 		DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPPG;
366 		DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG;
367 		soc_cpu_power_down(cpu);
368 		battr |= (DSPBR_BATTR_LPSCTL_RESTORE_BOOT & LPSCTL_BATTR_MASK);
369 		DSPCS.bootctl[cpu].battr = battr;
370 
371 		ret = pm_device_runtime_put(INTEL_ADSP_HST_DOMAIN_DEV);
372 		__ASSERT_NO_MSG(ret == 0);
373 
374 		power_gate_entry(cpu);
375 		break;
376 	default:
377 		__ASSERT(false, "invalid argument - unsupported power state");
378 	}
379 }
380 
381 /* Handle SOC specific activity after Low Power Mode Exit */
pm_state_exit_post_ops(enum pm_state state,uint8_t substate_id)382 void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
383 {
384 	ARG_UNUSED(substate_id);
385 	uint32_t cpu = arch_proc_id();
386 
387 	if (cpu == 0) {
388 		int ret = pm_device_runtime_get(INTEL_ADSP_HST_DOMAIN_DEV);
389 
390 		ARG_UNUSED(ret);
391 		__ASSERT_NO_MSG(ret == 0);
392 	}
393 
394 	if (state == PM_STATE_SOFT_OFF) {
395 		/* restore clock gating state */
396 		DSPCS.bootctl[cpu].bctl |=
397 			(core_desc[cpu].bctl & DSPBR_BCTL_WAITIPCG);
398 
399 #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
400 		if (cpu == 0) {
401 			struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
402 
403 			/* clean storage and restore information */
404 			sys_cache_data_invd_range(imr_layout, sizeof(*imr_layout));
405 			imr_layout->imr_state.header.adsp_imr_magic = 0;
406 			imr_layout->imr_state.header.imr_restore_vector = NULL;
407 			imr_layout->imr_state.header.imr_ram_storage = NULL;
408 			intel_adsp_clock_soft_off_exit();
409 			mem_window_idle_exit();
410 			soc_mp_on_d3_exit();
411 		}
412 #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
413 		soc_cpus_active[cpu] = true;
414 		sys_cache_data_flush_and_invd_all();
415 	} else if (state == PM_STATE_RUNTIME_IDLE) {
416 		soc_cpu_power_up(cpu);
417 
418 		if (!WAIT_FOR(soc_cpu_is_powered(cpu),
419 			      CPU_POWERUP_TIMEOUT_USEC, k_busy_wait(HW_STATE_CHECK_DELAY))) {
420 			k_panic();
421 		}
422 
423 #if CONFIG_ADSP_IDLE_CLOCK_GATING
424 		DSPCS.bootctl[cpu].bctl |= DSPBR_BCTL_WAITIPPG;
425 #else
426 		DSPCS.bootctl[cpu].bctl |= DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG;
427 #endif /* CONFIG_ADSP_IDLE_CLOCK_GATING */
428 		DSPCS.bootctl[cpu].battr &= (~LPSCTL_BATTR_MASK);
429 
430 		soc_cpus_active[cpu] = true;
431 		sys_cache_data_flush_and_invd_all();
432 	} else {
433 		__ASSERT(false, "invalid argument - unsupported power state");
434 	}
435 
436 	z_xt_ints_on(core_desc[cpu].intenable);
437 
438 	/* We don't have the key used to lock interruptions here.
439 	 * Just set PS.INTLEVEL to 0.
440 	 */
441 	__asm__ volatile ("rsil a2, 0");
442 }
443 
444 #endif /* CONFIG_PM */
445 
446 #ifdef CONFIG_ARCH_CPU_IDLE_CUSTOM
447 
448 __no_optimization
arch_cpu_idle(void)449 void arch_cpu_idle(void)
450 {
451 	uint32_t cpu = arch_proc_id();
452 
453 	sys_trace_idle();
454 
455 	/*
456 	 * unlock and invalidate icache if clock gating is allowed
457 	 */
458 	if (!(DSPCS.bootctl[cpu].bctl & DSPBR_BCTL_WAITIPCG)) {
459 		xthal_icache_all_unlock();
460 		xthal_icache_all_invalidate();
461 	}
462 
463 	__asm__ volatile ("waiti 0");
464 }
465 
466 #endif /* CONFIG_ARCH_CPU_IDLE_CUSTOM */
467