1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <xtensa/xtruntime.h>
9 #include <zephyr/irq_nextlevel.h>
10 #include <xtensa/hal.h>
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/pm/pm.h>
14 #include <zephyr/device.h>
15 #include <zephyr/cache.h>
16 #include <cpu_init.h>
17
18 #include <adsp_shim.h>
19 #include <adsp_clk.h>
20 #include <cavs-idc.h>
21 #include "soc.h"
22
23 #ifdef CONFIG_DYNAMIC_INTERRUPTS
24 #include <zephyr/sw_isr_table.h>
25 #endif
26
27 #define LOG_LEVEL CONFIG_SOC_LOG_LEVEL
28 #include <zephyr/logging/log.h>
29 LOG_MODULE_REGISTER(soc);
30
31 # define SHIM_GPDMA_BASE_OFFSET 0x6500
32 # define SHIM_GPDMA_BASE(x) (SHIM_GPDMA_BASE_OFFSET + (x) * 0x100)
33 # define SHIM_GPDMA_CLKCTL(x) (SHIM_GPDMA_BASE(x) + 0x4)
34 # define SHIM_CLKCTL_LPGPDMAFDCGB BIT(0)
35
36 #ifdef CONFIG_PM_POLICY_CUSTOM
37 #define SRAM_ALIAS_BASE 0x9E000000
38 #define SRAM_ALIAS_MASK 0xFF000000
39 #define EBB_BANKS_IN_SEGMENT 32
40 #define SRAM_ALIAS_OFFSET 0x20000000
41
42 #define L2_INTERRUPT_NUMBER 4
43 #define L2_INTERRUPT_MASK (1<<L2_INTERRUPT_NUMBER)
44
45 #define L3_INTERRUPT_NUMBER 6
46 #define L3_INTERRUPT_MASK (1<<L3_INTERRUPT_NUMBER)
47
48 #define ALL_USED_INT_LEVELS_MASK (L2_INTERRUPT_MASK | L3_INTERRUPT_MASK)
49
50 struct core_state {
51 uint32_t intenable;
52 };
53
54 static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = {{0}};
55
56 /**
57 * @brief Power down procedure.
58 *
59 * Locks its code in L1 cache and shuts down memories.
60 * NOTE: there's no return from this function.
61 *
62 * @param disable_lpsram flag if LPSRAM is to be disabled (whole)
63 * @param hpsram_pg_mask pointer to memory segments power gating mask
64 * (each bit corresponds to one ebb)
65 */
66 extern void power_down_cavs(bool disable_lpsram, uint32_t *hpsram_pg_mask);
67
uncache_to_cache(void * address)68 static inline void __sparse_cache *uncache_to_cache(void *address)
69 {
70 return (void __sparse_cache *)((uintptr_t)(address) | SRAM_ALIAS_OFFSET);
71 }
72
pm_state_set(enum pm_state state,uint8_t substate_id)73 __weak void pm_state_set(enum pm_state state, uint8_t substate_id)
74 {
75 ARG_UNUSED(substate_id);
76 uint32_t cpu = arch_proc_id();
77
78 if (state == PM_STATE_SOFT_OFF) {
79 core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
80 z_xt_ints_off(0xffffffff);
81 soc_cpus_active[cpu] = false;
82 sys_cache_data_flush_and_invd_all();
83 if (cpu == 0) {
84 uint32_t ebb = EBB_BANKS_IN_SEGMENT;
85 /* turn off all HPSRAM banks - get a full bitmap */
86 uint32_t hpsram_mask = (1 << ebb) - 1;
87 /* do power down - this function won't return */
88 power_down_cavs(true, uncache_to_cache(&hpsram_mask));
89 } else {
90 z_xt_ints_on(core_desc[cpu].intenable);
91 k_cpu_idle();
92 }
93 } else {
94 __ASSERT(false, "invalid argument - unsupported power state");
95 }
96 }
97
98 /* Handle SOC specific activity after Low Power Mode Exit */
pm_state_exit_post_ops(enum pm_state state,uint8_t substate_id)99 __weak void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
100 {
101 ARG_UNUSED(substate_id);
102 uint32_t cpu = arch_proc_id();
103
104 if (state == PM_STATE_SOFT_OFF) {
105 soc_cpus_active[cpu] = true;
106 sys_cache_data_flush_and_invd_all();
107 z_xt_ints_on(core_desc[cpu].intenable);
108 } else {
109 __ASSERT(false, "invalid argument - unsupported power state");
110 }
111 }
112 #endif /* CONFIG_PM_POLICY_CUSTOM */
113
power_init(void)114 __imr void power_init(void)
115 {
116 /* Request HP ring oscillator and
117 * wait for status to indicate it's ready.
118 */
119 CAVS_SHIM.clkctl |= CAVS_CLKCTL_RHROSCC;
120 while ((CAVS_SHIM.clkctl & CAVS_CLKCTL_RHROSCC) != CAVS_CLKCTL_RHROSCC) {
121 k_busy_wait(10);
122 }
123
124 /* Request HP Ring Oscillator
125 * Select HP Ring Oscillator
126 * High Power Domain PLL Clock Select device by 2
127 * Low Power Domain PLL Clock Select device by 4
128 * Disable Tensilica Core(s) Prevent Local Clock Gating
129 * - Disabling "prevent clock gating" means allowing clock gating
130 */
131 CAVS_SHIM.clkctl = (CAVS_CLKCTL_RHROSCC |
132 CAVS_CLKCTL_OCS |
133 CAVS_CLKCTL_LMCS);
134
135 /* Prevent LP GPDMA 0 & 1 clock gating */
136 sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(0));
137 sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(1));
138
139 /* Disable power gating for first cores */
140 CAVS_SHIM.pwrctl |= CAVS_PWRCTL_TCPDSPPG(0);
141
142 /* On cAVS 1.8+, we must demand ownership of the timestamping
143 * and clock generator registers. Lacking the former will
144 * prevent wall clock timer interrupts from arriving, even
145 * though the device itself is operational.
146 */
147 sys_write32(GENO_MDIVOSEL | GENO_DIOPTOSEL, DSP_INIT_GENO);
148 sys_write32(IOPO_DMIC_FLAG | IOPO_I2SSEL_MASK, DSP_INIT_IOPO);
149 }
150