1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2022 Intel Corporation. All rights reserved.
4 //
5 // Author: Tomasz Leman <tomasz.m.leman@intel.com>
6
7 /**
8 * \file
9 * \brief Zephyr RTOS CPU implementation file
10 * \authors Tomasz Leman <tomasz.m.leman@intel.com>
11 */
12
13 #include <sof/init.h>
14 #include <sof/lib/cpu.h>
15 #include <sof/lib/pm_runtime.h>
16
17 /* Zephyr includes */
18 #include <soc.h>
19 #include <version.h>
20 #include <zephyr/kernel.h>
21
22 #if CONFIG_MULTICORE && CONFIG_SMP
23
24 extern K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
25 CONFIG_ISR_STACK_SIZE);
26
27 static atomic_t start_flag;
28 static atomic_t ready_flag;
29
30 /* Zephyr kernel_internal.h interface */
31 extern void smp_timer_init(void);
32
secondary_init(void * arg)33 static FUNC_NORETURN void secondary_init(void *arg)
34 {
35 struct k_thread dummy_thread;
36
37 /*
38 * This is an open-coded version of zephyr/kernel/smp.c
39 * smp_init_top(). We do this so that we can call SOF
40 * secondary_core_init() for each core.
41 */
42
43 atomic_set(&ready_flag, 1);
44 z_smp_thread_init(arg, &dummy_thread);
45 smp_timer_init();
46
47 secondary_core_init(sof_get());
48
49 #ifdef CONFIG_THREAD_STACK_INFO
50 dummy_thread.stack_info.start = (uintptr_t)z_interrupt_stacks +
51 arch_curr_cpu()->id * Z_KERNEL_STACK_LEN(CONFIG_ISR_STACK_SIZE);
52 dummy_thread.stack_info.size = Z_KERNEL_STACK_LEN(CONFIG_ISR_STACK_SIZE);
53 #endif
54
55 z_smp_thread_swap();
56
57 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
58 }
59
60 #if CONFIG_ZEPHYR_NATIVE_DRIVERS
61 #include <sof/trace/trace.h>
62 #include <rtos/wait.h>
63 #include <zephyr/pm/pm.h>
64
65 LOG_MODULE_DECLARE(zephyr, CONFIG_SOF_LOG_LEVEL);
66
67 extern struct tr_ctx zephyr_tr;
68
cpu_enable_core(int id)69 int cpu_enable_core(int id)
70 {
71 /* only called from single core, no RMW lock */
72 __ASSERT_NO_MSG(cpu_is_primary(arch_proc_id()));
73 /*
74 * This is an open-coded version of zephyr/kernel/smp.c
75 * z_smp_start_cpu(). We do this, so we can use a customized
76 * secondary_init() for SOF.
77 */
78
79 if (arch_cpu_active(id))
80 return 0;
81
82 #if ZEPHYR_VERSION(3, 0, 99) <= ZEPHYR_VERSION_CODE
83 z_init_cpu(id);
84 #endif
85
86 atomic_clear(&start_flag);
87 atomic_clear(&ready_flag);
88
89 arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
90 secondary_init, &start_flag);
91
92 while (!atomic_get(&ready_flag))
93 k_busy_wait(100);
94
95 atomic_set(&start_flag, 1);
96
97 return 0;
98 }
99
cpu_disable_core(int id)100 void cpu_disable_core(int id)
101 {
102 /* only called from single core, no RMW lock */
103 __ASSERT_NO_MSG(cpu_is_primary(arch_proc_id()));
104
105 if (!arch_cpu_active(id)) {
106 tr_warn(&zephyr_tr, "core %d is already disabled", id);
107 return;
108 }
109 #if defined(CONFIG_PM)
110 /* TODO: before requesting core shut down check if it's not actively used */
111 if (!pm_state_force(id, &(struct pm_state_info){PM_STATE_SOFT_OFF, 0, 0})) {
112 tr_err(&zephyr_tr, "failed to set PM_STATE_SOFT_OFF on core %d", id);
113 return;
114 }
115
116 /* Primary core will be turn off by the host after it enter SOFT_OFF state */
117 if (cpu_is_primary(id))
118 return;
119
120 /* Broadcasting interrupts to other cores. */
121 arch_sched_ipi();
122
123 uint64_t timeout = k_cycle_get_64() +
124 k_ms_to_cyc_ceil64(CONFIG_SECONDARY_CORE_DISABLING_TIMEOUT);
125
126 /* Waiting for secondary core to enter idle state */
127 while (arch_cpu_active(id) && (k_cycle_get_64() < timeout))
128 idelay(PLATFORM_DEFAULT_DELAY);
129
130 if (arch_cpu_active(id)) {
131 tr_err(&zephyr_tr, "core %d did not enter idle state", id);
132 return;
133 }
134
135 if (soc_adsp_halt_cpu(id) != 0)
136 tr_err(&zephyr_tr, "failed to disable core %d", id);
137 #endif /* CONFIG_PM */
138 }
139
cpu_is_core_enabled(int id)140 int cpu_is_core_enabled(int id)
141 {
142 return arch_cpu_active(id);
143 }
144
cpu_enabled_cores(void)145 int cpu_enabled_cores(void)
146 {
147 unsigned int i;
148 int mask = 0;
149
150 for (i = 0; i < CONFIG_MP_NUM_CPUS; i++)
151 if (arch_cpu_active(i))
152 mask |= BIT(i);
153
154 return mask;
155 }
156 #else
157 static int w_core_enable_mask = 0x1; /*Core 0 is always active*/
158
cpu_enable_core(int id)159 int cpu_enable_core(int id)
160 {
161 pm_runtime_get(PM_RUNTIME_DSP, PWRD_BY_TPLG | id);
162
163 /* only called from single core, no RMW lock */
164 __ASSERT_NO_MSG(cpu_get_id() == PLATFORM_PRIMARY_CORE_ID);
165
166 w_core_enable_mask |= BIT(id);
167
168 return 0;
169 }
170
cpu_enable_secondary_core(int id)171 int cpu_enable_secondary_core(int id)
172 {
173 /*
174 * This is an open-coded version of zephyr/kernel/smp.c
175 * z_smp_start_cpu(). We do this, so we can use a customized
176 * secondary_init() for SOF.
177 */
178
179 if (arch_cpu_active(id))
180 return 0;
181
182 #if ZEPHYR_VERSION(3, 0, 99) <= ZEPHYR_VERSION_CODE
183 z_init_cpu(id);
184 #endif
185
186 atomic_clear(&start_flag);
187 atomic_clear(&ready_flag);
188
189 arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
190 secondary_init, &start_flag);
191
192 while (!atomic_get(&ready_flag))
193 k_busy_wait(100);
194
195 atomic_set(&start_flag, 1);
196
197 return 0;
198 }
199
cpu_disable_core(int id)200 void cpu_disable_core(int id)
201 {
202 /* TODO: call Zephyr API */
203
204 /* only called from single core, no RMW lock */
205 __ASSERT_NO_MSG(cpu_get_id() == PLATFORM_PRIMARY_CORE_ID);
206
207 w_core_enable_mask &= ~BIT(id);
208 }
209
cpu_is_core_enabled(int id)210 int cpu_is_core_enabled(int id)
211 {
212 return w_core_enable_mask & BIT(id);
213 }
214
cpu_enabled_cores(void)215 int cpu_enabled_cores(void)
216 {
217 return w_core_enable_mask;
218 }
219 #endif /* CONFIG_ZEPHYR_NATIVE_DRIVERS */
220
cpu_power_down_core(uint32_t flags)221 void cpu_power_down_core(uint32_t flags)
222 {
223 /* TODO: use Zephyr version */
224 }
225
cpu_restore_secondary_cores(void)226 int cpu_restore_secondary_cores(void)
227 {
228 /* TODO: use Zephyr API */
229 return 0;
230 }
231
cpu_secondary_cores_prepare_d0ix(void)232 int cpu_secondary_cores_prepare_d0ix(void)
233 {
234 /* TODO: use Zephyr API */
235 return 0;
236 }
237
238 #endif /* CONFIG_MULTICORE && CONFIG_SMP */
239