1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 // Keyon Jie <yang.jie@linux.intel.com>
7
8 #include <sof/audio/buffer.h>
9 #include <sof/audio/component_ext.h>
10 #include <sof/audio/pipeline.h>
11 #include <sof/common.h>
12 #include <rtos/idc.h>
13 #include <sof/ipc/topology.h>
14 #include <sof/ipc/common.h>
15 #include <sof/ipc/msg.h>
16 #include <sof/ipc/driver.h>
17 #include <sof/ipc/schedule.h>
18 #include <rtos/alloc.h>
19 #include <rtos/cache.h>
20 #include <sof/lib/cpu.h>
21 #include <sof/lib/mailbox.h>
22 #include <sof/list.h>
23 #include <sof/platform.h>
24 #include <rtos/sof.h>
25 #include <rtos/spinlock.h>
26 #include <ipc/dai.h>
27 #include <ipc/header.h>
28 #include <ipc/stream.h>
29 #include <ipc/topology.h>
30 #include <errno.h>
31 #include <stdbool.h>
32 #include <stddef.h>
33 #include <stdint.h>
34
35 LOG_MODULE_REGISTER(ipc, CONFIG_SOF_LOG_LEVEL);
36
37 /* be60f97d-78df-4796-a0ee-435cb56b720a */
38 DECLARE_SOF_UUID("ipc", ipc_uuid, 0xbe60f97d, 0x78df, 0x4796,
39 0xa0, 0xee, 0x43, 0x5c, 0xb5, 0x6b, 0x72, 0x0a);
40
41 DECLARE_TR_CTX(ipc_tr, SOF_UUID(ipc_uuid), LOG_LEVEL_INFO);
42
ipc_process_on_core(uint32_t core,bool blocking)43 int ipc_process_on_core(uint32_t core, bool blocking)
44 {
45 struct ipc *ipc = ipc_get();
46 struct idc_msg msg = { .header = IDC_MSG_IPC, .core = core, };
47 int ret;
48
49 /* check if requested core is enabled */
50 if (!cpu_is_core_enabled(core)) {
51 tr_err(&ipc_tr, "ipc_process_on_core(): core #%d is disabled", core);
52 return -EACCES;
53 }
54
55 /* The other core will write there its response */
56 dcache_invalidate_region((__sparse_force void __sparse_cache *)MAILBOX_HOSTBOX_BASE,
57 ((struct sof_ipc_cmd_hdr *)ipc->comp_data)->size);
58
59 /*
60 * If the primary core is waiting for secondary cores to complete, it
61 * will also reply to the host
62 */
63 if (!blocking) {
64 k_spinlock_key_t key;
65
66 ipc->core = core;
67 key = k_spin_lock(&ipc->lock);
68 ipc->task_mask |= IPC_TASK_SECONDARY_CORE;
69 k_spin_unlock(&ipc->lock, key);
70 }
71
72 /* send IDC message */
73 ret = idc_send_msg(&msg, blocking ? IDC_BLOCKING : IDC_NON_BLOCKING);
74 if (ret < 0)
75 return ret;
76
77 /* reply written by other core */
78 return 1;
79 }
80
81 /*
82 * Components, buffers and pipelines all use the same set of monotonic ID
83 * numbers passed in by the host. They are stored in different lists, hence
84 * more than 1 list may need to be searched for the corresponding component.
85 */
86
ipc_get_comp_by_id(struct ipc * ipc,uint32_t id)87 struct ipc_comp_dev *ipc_get_comp_by_id(struct ipc *ipc, uint32_t id)
88 {
89 struct ipc_comp_dev *icd;
90 struct list_item *clist;
91
92 list_for_item(clist, &ipc->comp_list) {
93 icd = container_of(clist, struct ipc_comp_dev, list);
94 if (icd->id == id)
95 return icd;
96 }
97
98 return NULL;
99 }
100
101 /* Walks through the list of components looking for a sink/source endpoint component
102 * of the given pipeline
103 */
ipc_get_ppl_comp(struct ipc * ipc,uint32_t pipeline_id,int dir)104 struct ipc_comp_dev *ipc_get_ppl_comp(struct ipc *ipc, uint32_t pipeline_id, int dir)
105 {
106 struct ipc_comp_dev *icd;
107 struct comp_buffer *buffer;
108 struct comp_dev *buff_comp;
109 struct list_item *clist, *blist;
110 struct ipc_comp_dev *next_ppl_icd = NULL;
111
112 list_for_item(clist, &ipc->comp_list) {
113 icd = container_of(clist, struct ipc_comp_dev, list);
114 if (icd->type != COMP_TYPE_COMPONENT)
115 continue;
116
117 /* first try to find the module in the pipeline */
118 if (dev_comp_pipe_id(icd->cd) == pipeline_id) {
119 struct list_item *buffer_list = comp_buffer_list(icd->cd, dir);
120 bool last_in_pipeline = true;
121
122 /* The component has no buffer in the given direction */
123 if (list_is_empty(buffer_list))
124 return icd;
125
126 /* check all connected modules to see if they are on different pipelines */
127 list_for_item(blist, buffer_list) {
128 buffer = buffer_from_list(blist, dir);
129 buff_comp = buffer_get_comp(buffer, dir);
130
131 if (buff_comp && dev_comp_pipe_id(buff_comp) == pipeline_id)
132 last_in_pipeline = false;
133 }
134 /* all connected components placed on another pipeline */
135 if (last_in_pipeline)
136 next_ppl_icd = icd;
137 }
138 }
139
140 return next_ppl_icd;
141 }
142
ipc_send_queued_msg(void)143 void ipc_send_queued_msg(void)
144 {
145 struct ipc *ipc = ipc_get();
146 struct ipc_msg *msg;
147 k_spinlock_key_t key;
148
149 key = k_spin_lock(&ipc->lock);
150
151 if (ipc_get()->pm_prepare_D3)
152 goto out;
153
154 /* any messages to send ? */
155 if (list_is_empty(&ipc->msg_list))
156 goto out;
157
158 msg = list_first_item(&ipc->msg_list, struct ipc_msg,
159 list);
160
161 if (ipc_platform_send_msg(msg) == 0)
162 /* Remove the message from the list if it has been successfully sent. */
163 list_item_del(&msg->list);
164 out:
165 k_spin_unlock(&ipc->lock, key);
166 }
167
schedule_ipc_worker(void)168 static void schedule_ipc_worker(void)
169 {
170 /*
171 * note: in XTOS builds, this is handled in
172 * task_main_primary_core()
173 */
174 #ifdef __ZEPHYR__
175 struct ipc *ipc = ipc_get();
176
177 k_work_schedule(&ipc->z_delayed_work, K_USEC(IPC_PERIOD_USEC));
178 #endif
179 }
180
ipc_msg_send(struct ipc_msg * msg,void * data,bool high_priority)181 void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority)
182 {
183 struct ipc *ipc = ipc_get();
184 k_spinlock_key_t key;
185 int ret;
186
187 key = k_spin_lock(&ipc->lock);
188
189 /* copy mailbox data to message if not already copied */
190 if ((msg->tx_size > 0 && msg->tx_size <= SOF_IPC_MSG_MAX_SIZE) &&
191 msg->tx_data != data) {
192 ret = memcpy_s(msg->tx_data, msg->tx_size, data, msg->tx_size);
193 assert(!ret);
194 }
195
196 /*
197 * note: This function can be executed in LL or EDF context, from any core.
198 * In Zephyr builds, there is IPC queue that is always handled by the primary core,
199 * whereas submitting to the queue is allowed from any core. Therefore disable option
200 * of sending IPC immediately by any context/core to secure IPC registers/mailbox
201 * access.
202 */
203 #ifndef __ZEPHYR__
204 /* try to send critical notifications right away */
205 if (high_priority) {
206 ret = ipc_platform_send_msg(msg);
207 if (!ret) {
208 k_spin_unlock(&ipc->lock, key);
209 return;
210 }
211 }
212 #endif
213 /* add to queue unless already there */
214 if (list_is_empty(&msg->list)) {
215 if (high_priority)
216 list_item_prepend(&msg->list, &ipc->msg_list);
217 else
218 list_item_append(&msg->list, &ipc->msg_list);
219 }
220
221 schedule_ipc_worker();
222
223 k_spin_unlock(&ipc->lock, key);
224 }
225
226 #ifdef __ZEPHYR__
ipc_work_handler(struct k_work * work)227 static void ipc_work_handler(struct k_work *work)
228 {
229 struct ipc *ipc = ipc_get();
230 k_spinlock_key_t key;
231
232 ipc_send_queued_msg();
233
234 key = k_spin_lock(&ipc->lock);
235
236 if (!list_is_empty(&ipc->msg_list) && !ipc->pm_prepare_D3)
237 schedule_ipc_worker();
238
239 k_spin_unlock(&ipc->lock, key);
240 }
241 #endif
242
ipc_schedule_process(struct ipc * ipc)243 void ipc_schedule_process(struct ipc *ipc)
244 {
245 schedule_task(&ipc->ipc_task, 0, IPC_PERIOD_USEC);
246 }
247
ipc_init(struct sof * sof)248 int ipc_init(struct sof *sof)
249 {
250 tr_dbg(&ipc_tr, "ipc_init()");
251
252 /* init ipc data */
253 sof->ipc = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->ipc));
254 sof->ipc->comp_data = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0,
255 SOF_MEM_CAPS_RAM, SOF_IPC_MSG_MAX_SIZE);
256
257 k_spinlock_init(&sof->ipc->lock);
258 list_init(&sof->ipc->msg_list);
259 list_init(&sof->ipc->comp_list);
260
261 #ifdef __ZEPHYR__
262 k_work_init_delayable(&sof->ipc->z_delayed_work, ipc_work_handler);
263 #endif
264
265 return platform_ipc_init(sof->ipc);
266 }
267
268 /* Locking: call with ipc->lock held and interrupts disabled */
ipc_complete_cmd(struct ipc * ipc)269 void ipc_complete_cmd(struct ipc *ipc)
270 {
271 /*
272 * We have up to three contexts, attempting to complete IPC processing:
273 * the original IPC EDF task, the IDC EDF task on a secondary core, or
274 * an LL pipeline thread, running either on the primary or one of
275 * secondary cores. All these three contexts execute asynchronously. It
276 * is important to only signal the host that the IPC processing has
277 * completed after *all* tasks have completed. Therefore only the last
278 * context should do that. We accomplish this by setting IPC_TASK_* bits
279 * in ipc->task_mask for each used IPC context and by clearing them when
280 * each of those contexts completes. Only when the mask is 0 we can
281 * signal the host.
282 */
283 if (ipc->task_mask)
284 return;
285
286 ipc_platform_complete_cmd(ipc);
287 }
288
ipc_complete_task(void * data)289 static void ipc_complete_task(void *data)
290 {
291 struct ipc *ipc = data;
292 k_spinlock_key_t key;
293
294 key = k_spin_lock(&ipc->lock);
295 ipc->task_mask &= ~IPC_TASK_INLINE;
296 ipc_complete_cmd(ipc);
297 k_spin_unlock(&ipc->lock, key);
298 }
299
ipc_do_cmd(void * data)300 static enum task_state ipc_do_cmd(void *data)
301 {
302 struct ipc *ipc = data;
303
304 /*
305 * 32-bit writes are atomic and at the moment no IPC processing is
306 * taking place, so, no need for a lock.
307 */
308 ipc->task_mask = IPC_TASK_INLINE;
309
310 return ipc_platform_do_cmd(ipc);
311 }
312
313 struct task_ops ipc_task_ops = {
314 .run = ipc_do_cmd,
315 .complete = ipc_complete_task,
316 .get_deadline = ipc_task_deadline,
317 };
318