1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <xtensa/xtruntime.h>
9 #include <zephyr/irq_nextlevel.h>
10 #include <xtensa/hal.h>
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/pm/pm.h>
14 #include <zephyr/device.h>
15 #include <zephyr/cache.h>
16 #include <cpu_init.h>
17
18 #include <adsp_memory.h>
19 #include <adsp_shim.h>
20 #include <adsp_clk.h>
21 #include <adsp_imr_layout.h>
22 #include <cavs-idc.h>
23
24 #ifdef CONFIG_DYNAMIC_INTERRUPTS
25 #include <zephyr/sw_isr_table.h>
26 #endif
27
28 #define LOG_LEVEL CONFIG_SOC_LOG_LEVEL
29 #include <zephyr/logging/log.h>
30 LOG_MODULE_REGISTER(soc);
31
32 # define SHIM_GPDMA_BASE_OFFSET 0x6500
33 # define SHIM_GPDMA_BASE(x) (SHIM_GPDMA_BASE_OFFSET + (x) * 0x100)
34 # define SHIM_GPDMA_CLKCTL(x) (SHIM_GPDMA_BASE(x) + 0x4)
35 # define SHIM_CLKCTL_LPGPDMAFDCGB BIT(0)
36
37 #ifdef CONFIG_PM
38 #define SRAM_ALIAS_BASE 0x9E000000
39 #define SRAM_ALIAS_MASK 0xFF000000
40 #define SRAM_ALIAS_OFFSET 0x20000000
41
42 #define L2_INTERRUPT_NUMBER 4
43 #define L2_INTERRUPT_MASK (1<<L2_INTERRUPT_NUMBER)
44
45 #define L3_INTERRUPT_NUMBER 6
46 #define L3_INTERRUPT_MASK (1<<L3_INTERRUPT_NUMBER)
47
48 #define ALL_USED_INT_LEVELS_MASK (L2_INTERRUPT_MASK | L3_INTERRUPT_MASK)
49
50 /*
51 * @biref FW entry point called by ROM during normal boot flow
52 */
53 extern void rom_entry(void);
54 void mp_resume_entry(void);
55
56 struct core_state {
57 uint32_t a0;
58 uint32_t a1;
59 uint32_t excsave2;
60 uint32_t intenable;
61 };
62
63 static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = {{0}};
64
65 /**
66 * @brief Power down procedure.
67 *
68 * Locks its code in L1 cache and shuts down memories.
69 * NOTE: there's no return from this function.
70 *
71 * @param disable_lpsram flag if LPSRAM is to be disabled (whole)
72 * @param hpsram_pg_mask pointer to memory segments power gating mask
73 * (each bit corresponds to one ebb)
74 */
75 extern void power_down_cavs(bool disable_lpsram, uint32_t __sparse_cache * hpsram_pg_mask);
76
uncache_to_cache(void * address)77 static inline void __sparse_cache *uncache_to_cache(void *address)
78 {
79 return (void __sparse_cache *)((uintptr_t)(address) | SRAM_ALIAS_OFFSET);
80 }
81
_save_core_context(void)82 static ALWAYS_INLINE void _save_core_context(void)
83 {
84 uint32_t core_id = arch_proc_id();
85
86 core_desc[core_id].excsave2 = XTENSA_RSR(ZSR_CPU_STR);
87 __asm__ volatile("mov %0, a0" : "=r"(core_desc[core_id].a0));
88 __asm__ volatile("mov %0, a1" : "=r"(core_desc[core_id].a1));
89 sys_cache_data_flush_range(&core_desc[core_id], sizeof(struct core_state));
90 }
91
_restore_core_context(void)92 static ALWAYS_INLINE void _restore_core_context(void)
93 {
94 uint32_t core_id = arch_proc_id();
95
96 XTENSA_WSR(ZSR_CPU_STR, core_desc[core_id].excsave2);
97 __asm__ volatile("mov a0, %0" :: "r"(core_desc[core_id].a0));
98 __asm__ volatile("mov a1, %0" :: "r"(core_desc[core_id].a1));
99 __asm__ volatile("rsync");
100 }
101
power_gate_exit(void)102 void power_gate_exit(void)
103 {
104 cpu_early_init();
105 sys_cache_data_flush_and_invd_all();
106 _restore_core_context();
107
108 /* Secondary core is resumed by set_dx */
109 if (arch_proc_id()) {
110 mp_resume_entry();
111 }
112 }
113
114 __asm__(".align 4\n\t"
115 ".global dsp_restore_vector\n\t"
116 "dsp_restore_vector:\n\t"
117 " movi a0, 0\n\t"
118 " movi a1, 1\n\t"
119 " movi a2, 0x40020\n\t"/* PS_UM|PS_WOE */
120 " wsr a2, PS\n\t"
121 " wsr a1, WINDOWSTART\n\t"
122 " wsr a0, WINDOWBASE\n\t"
123 " rsync\n\t"
124 " movi a1, z_interrupt_stacks\n\t"
125 " rsr a2, PRID\n\t"
126 " movi a3, " STRINGIFY(CONFIG_ISR_STACK_SIZE) "\n\t"
127 " mull a2, a2, a3\n\t"
128 " add a2, a2, a3\n\t"
129 " add a1, a1, a2\n\t"
130 " call0 power_gate_exit\n\t");
131
pm_state_set(enum pm_state state,uint8_t substate_id)132 void pm_state_set(enum pm_state state, uint8_t substate_id)
133 {
134 ARG_UNUSED(substate_id);
135 uint32_t cpu = arch_proc_id();
136
137 if (state == PM_STATE_SOFT_OFF) {
138 core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
139 z_xt_ints_off(0xffffffff);
140 xthal_window_spill();
141 _save_core_context();
142 soc_cpus_active[cpu] = false;
143 sys_cache_data_flush_and_invd_all();
144 if (cpu == 0) {
145 uint32_t hpsram_mask[HPSRAM_SEGMENTS] = {0};
146
147 struct imr_header hdr = {
148 .adsp_imr_magic = ADSP_IMR_MAGIC_VALUE,
149 .imr_restore_vector = rom_entry,
150 };
151 struct imr_layout *imr_layout =
152 sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)
153 L3_MEM_BASE_ADDR);
154
155 imr_layout->imr_state.header = hdr;
156
157 #ifdef CONFIG_ADSP_POWER_DOWN_HPSRAM
158 /* turn off all HPSRAM banks - get a full bitmap */
159 for (int i = 0; i < HPSRAM_SEGMENTS; i++)
160 hpsram_mask[i] = HPSRAM_MEMMASK(i);
161 #endif /* CONFIG_ADSP_POWER_DOWN_HPSRAM */
162 /* do power down - this function won't return */
163 power_down_cavs(true, uncache_to_cache(&hpsram_mask[0]));
164 } else {
165 k_cpu_atomic_idle(arch_irq_lock());
166 }
167 } else {
168 __ASSERT(false, "invalid argument - unsupported power state");
169 }
170 }
171
172 /* Handle SOC specific activity after Low Power Mode Exit */
pm_state_exit_post_ops(enum pm_state state,uint8_t substate_id)173 void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
174 {
175 ARG_UNUSED(substate_id);
176 uint32_t cpu = arch_proc_id();
177
178 if (state == PM_STATE_SOFT_OFF) {
179 soc_cpus_active[cpu] = true;
180 sys_cache_data_flush_and_invd_all();
181 z_xt_ints_on(core_desc[cpu].intenable);
182 } else {
183 __ASSERT(false, "invalid argument - unsupported power state");
184 }
185
186 /**
187 * We don't have the key used to lock interruptions here.
188 * Just set PS.INTLEVEL to 0.
189 */
190 __asm__ volatile ("rsil a2, 0");
191 }
192 #endif /* CONFIG_PM */
193
194 #ifdef CONFIG_ARCH_CPU_IDLE_CUSTOM
195 /* xt-clang removes any NOPs more than 8. So we need to set
196 * no optimization to avoid those NOPs from being removed.
197 *
198 * This function is simply enough and full of hand written
199 * assembly that optimization is not really meaningful
200 * anyway. So we can skip optimization unconditionally.
201 * Re-evalulate its use and add #ifdef if this assumption
202 * is no longer valid.
203 */
204 __no_optimization
arch_cpu_idle(void)205 void arch_cpu_idle(void)
206 {
207 sys_trace_idle();
208
209 /* Just spin forever with interrupts unmasked, for platforms
210 * where WAITI can't be used or where its behavior is
211 * complicated (Intel DSPs will power gate on idle entry under
212 * some circumstances)
213 */
214 if (IS_ENABLED(CONFIG_XTENSA_CPU_IDLE_SPIN)) {
215 __asm__ volatile("rsil a0, 0");
216 __asm__ volatile("loop_forever: j loop_forever");
217 return;
218 }
219
220 /* Cribbed from SOF: workaround for a bug in some versions of
221 * the LX6 IP. Preprocessor ugliness avoids the need to
222 * figure out how to get the compiler to unroll a loop.
223 */
224 if (IS_ENABLED(CONFIG_XTENSA_WAITI_BUG)) {
225 #define NOP4 __asm__ volatile("nop; nop; nop; nop");
226 #define NOP32 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4
227 #define NOP128() NOP32 NOP32 NOP32 NOP32
228 NOP128();
229 #undef NOP128
230 #undef NOP32
231 #undef NOP4
232 __asm__ volatile("isync; extw");
233 }
234
235 __asm__ volatile ("waiti 0");
236 }
237 #endif
238
power_init(void)239 __imr void power_init(void)
240 {
241 /* Request HP ring oscillator and
242 * wait for status to indicate it's ready.
243 */
244 CAVS_SHIM.clkctl |= CAVS_CLKCTL_RHROSCC;
245 while ((CAVS_SHIM.clkctl & CAVS_CLKCTL_RHROSCC) != CAVS_CLKCTL_RHROSCC) {
246 k_busy_wait(10);
247 }
248
249 /* Request HP Ring Oscillator
250 * Select HP Ring Oscillator
251 * High Power Domain PLL Clock Select device by 2
252 * Low Power Domain PLL Clock Select device by 4
253 * Disable Tensilica Core(s) Prevent Local Clock Gating
254 * - Disabling "prevent clock gating" means allowing clock gating
255 */
256 CAVS_SHIM.clkctl = (CAVS_CLKCTL_RHROSCC |
257 CAVS_CLKCTL_OCS |
258 CAVS_CLKCTL_LMCS);
259
260 /* Prevent LP GPDMA 0 & 1 clock gating */
261 sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(0));
262 sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(1));
263
264 /* Disable power gating for first cores */
265 CAVS_SHIM.pwrctl |= CAVS_PWRCTL_TCPDSPPG(0);
266
267 /* On cAVS 1.8+, we must demand ownership of the timestamping
268 * and clock generator registers. Lacking the former will
269 * prevent wall clock timer interrupts from arriving, even
270 * though the device itself is operational.
271 */
272 sys_write32(GENO_MDIVOSEL | GENO_DIOPTOSEL, DSP_INIT_GENO);
273 sys_write32(IOPO_DMIC_FLAG | IOPO_I2SSEL_MASK, DSP_INIT_IOPO);
274 }
275