1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <xtensa/xtruntime.h>
9 #include <zephyr/irq_nextlevel.h>
10 #include <xtensa/hal.h>
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/pm/pm.h>
14 #include <zephyr/device.h>
15 #include <zephyr/cache.h>
16 #include <cpu_init.h>
17
18 #include <adsp_memory.h>
19 #include <adsp_shim.h>
20 #include <adsp_clk.h>
21 #include <adsp_imr_layout.h>
22 #include <cavs-idc.h>
23 #include "soc.h"
24
25 #ifdef CONFIG_DYNAMIC_INTERRUPTS
26 #include <zephyr/sw_isr_table.h>
27 #endif
28
29 #define LOG_LEVEL CONFIG_SOC_LOG_LEVEL
30 #include <zephyr/logging/log.h>
31 LOG_MODULE_REGISTER(soc);
32
33 # define SHIM_GPDMA_BASE_OFFSET 0x6500
34 # define SHIM_GPDMA_BASE(x) (SHIM_GPDMA_BASE_OFFSET + (x) * 0x100)
35 # define SHIM_GPDMA_CLKCTL(x) (SHIM_GPDMA_BASE(x) + 0x4)
36 # define SHIM_CLKCTL_LPGPDMAFDCGB BIT(0)
37
38 #ifdef CONFIG_PM
39 #define SRAM_ALIAS_BASE 0x9E000000
40 #define SRAM_ALIAS_MASK 0xFF000000
41 #define SRAM_ALIAS_OFFSET 0x20000000
42
43 #define L2_INTERRUPT_NUMBER 4
44 #define L2_INTERRUPT_MASK (1<<L2_INTERRUPT_NUMBER)
45
46 #define L3_INTERRUPT_NUMBER 6
47 #define L3_INTERRUPT_MASK (1<<L3_INTERRUPT_NUMBER)
48
49 #define ALL_USED_INT_LEVELS_MASK (L2_INTERRUPT_MASK | L3_INTERRUPT_MASK)
50
51 /*
52 * @biref FW entry point called by ROM during normal boot flow
53 */
54 extern void rom_entry(void);
55
56 struct core_state {
57 uint32_t intenable;
58 };
59
60 static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = {{0}};
61
62 /**
63 * @brief Power down procedure.
64 *
65 * Locks its code in L1 cache and shuts down memories.
66 * NOTE: there's no return from this function.
67 *
68 * @param disable_lpsram flag if LPSRAM is to be disabled (whole)
69 * @param hpsram_pg_mask pointer to memory segments power gating mask
70 * (each bit corresponds to one ebb)
71 */
72 extern void power_down_cavs(bool disable_lpsram, uint32_t __sparse_cache * hpsram_pg_mask);
73
uncache_to_cache(void * address)74 static inline void __sparse_cache *uncache_to_cache(void *address)
75 {
76 return (void __sparse_cache *)((uintptr_t)(address) | SRAM_ALIAS_OFFSET);
77 }
78
pm_state_set(enum pm_state state,uint8_t substate_id)79 void pm_state_set(enum pm_state state, uint8_t substate_id)
80 {
81 ARG_UNUSED(substate_id);
82 uint32_t cpu = arch_proc_id();
83
84 if (state == PM_STATE_SOFT_OFF) {
85 core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
86 z_xt_ints_off(0xffffffff);
87 soc_cpus_active[cpu] = false;
88 sys_cache_data_flush_and_invd_all();
89 if (cpu == 0) {
90 uint32_t hpsram_mask[HPSRAM_SEGMENTS] = {0};
91
92 struct imr_header hdr = {
93 .adsp_imr_magic = ADSP_IMR_MAGIC_VALUE,
94 .imr_restore_vector = rom_entry,
95 };
96 struct imr_layout *imr_layout =
97 z_soc_uncached_ptr((__sparse_force void __sparse_cache *)
98 L3_MEM_BASE_ADDR);
99
100 imr_layout->imr_state.header = hdr;
101
102 #ifdef CONFIG_ADSP_POWER_DOWN_HPSRAM
103 /* turn off all HPSRAM banks - get a full bitmap */
104 for (int i = 0; i < HPSRAM_SEGMENTS; i++)
105 hpsram_mask[i] = HPSRAM_MEMMASK(i);
106 #endif /* CONFIG_ADSP_POWER_DOWN_HPSRAM */
107 /* do power down - this function won't return */
108 power_down_cavs(true, uncache_to_cache(&hpsram_mask[0]));
109 } else {
110 z_xt_ints_on(core_desc[cpu].intenable);
111 k_cpu_idle();
112 }
113 } else {
114 __ASSERT(false, "invalid argument - unsupported power state");
115 }
116 }
117
118 /* Handle SOC specific activity after Low Power Mode Exit */
pm_state_exit_post_ops(enum pm_state state,uint8_t substate_id)119 void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
120 {
121 ARG_UNUSED(substate_id);
122 uint32_t cpu = arch_proc_id();
123
124 if (state == PM_STATE_SOFT_OFF) {
125 soc_cpus_active[cpu] = true;
126 sys_cache_data_flush_and_invd_all();
127 z_xt_ints_on(core_desc[cpu].intenable);
128 } else {
129 __ASSERT(false, "invalid argument - unsupported power state");
130 }
131 }
132 #endif /* CONFIG_PM */
133
power_init(void)134 __imr void power_init(void)
135 {
136 /* Request HP ring oscillator and
137 * wait for status to indicate it's ready.
138 */
139 CAVS_SHIM.clkctl |= CAVS_CLKCTL_RHROSCC;
140 while ((CAVS_SHIM.clkctl & CAVS_CLKCTL_RHROSCC) != CAVS_CLKCTL_RHROSCC) {
141 k_busy_wait(10);
142 }
143
144 /* Request HP Ring Oscillator
145 * Select HP Ring Oscillator
146 * High Power Domain PLL Clock Select device by 2
147 * Low Power Domain PLL Clock Select device by 4
148 * Disable Tensilica Core(s) Prevent Local Clock Gating
149 * - Disabling "prevent clock gating" means allowing clock gating
150 */
151 CAVS_SHIM.clkctl = (CAVS_CLKCTL_RHROSCC |
152 CAVS_CLKCTL_OCS |
153 CAVS_CLKCTL_LMCS);
154
155 /* Prevent LP GPDMA 0 & 1 clock gating */
156 sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(0));
157 sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(1));
158
159 /* Disable power gating for first cores */
160 CAVS_SHIM.pwrctl |= CAVS_PWRCTL_TCPDSPPG(0);
161
162 /* On cAVS 1.8+, we must demand ownership of the timestamping
163 * and clock generator registers. Lacking the former will
164 * prevent wall clock timer interrupts from arriving, even
165 * though the device itself is operational.
166 */
167 sys_write32(GENO_MDIVOSEL | GENO_DIOPTOSEL, DSP_INIT_GENO);
168 sys_write32(IOPO_DMIC_FLAG | IOPO_I2SSEL_MASK, DSP_INIT_IOPO);
169 }
170