1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 // Keyon Jie <yang.jie@linux.intel.com>
7
8 #include <sof/audio/buffer.h>
9 #include <sof/audio/component_ext.h>
10 #include <sof/audio/pipeline.h>
11 #include <sof/ipc/msg.h>
12 #include <rtos/interrupt.h>
13 #include <sof/lib/mm_heap.h>
14 #include <sof/lib/uuid.h>
15 #include <sof/compiler_attributes.h>
16 #include <sof/list.h>
17 #include <rtos/spinlock.h>
18 #include <rtos/string.h>
19 #include <rtos/clk.h>
20 #include <ipc/header.h>
21 #include <ipc/stream.h>
22 #include <ipc/topology.h>
23 #include <ipc4/module.h>
24 #include <errno.h>
25 #include <stdbool.h>
26 #include <stddef.h>
27 #include <stdint.h>
28
29 LOG_MODULE_REGISTER(pipe, CONFIG_SOF_LOG_LEVEL);
30
31 /* 4e934adb-b0ec-4d33-a086-c1022f921321 */
32 DECLARE_SOF_RT_UUID("pipe", pipe_uuid, 0x4e934adb, 0xb0ec, 0x4d33,
33 0xa0, 0x86, 0xc1, 0x02, 0x2f, 0x92, 0x13, 0x21);
34
35 DECLARE_TR_CTX(pipe_tr, SOF_UUID(pipe_uuid), LOG_LEVEL_INFO);
36
37 /* number of pipeline stream metadata objects we export in mailbox */
38 #define PPL_POSN_OFFSETS \
39 (MAILBOX_STREAM_SIZE / sizeof(struct sof_ipc_stream_posn))
40
41 /* lookup table to determine busy/free pipeline metadata objects */
42 struct pipeline_posn {
43 bool posn_offset[PPL_POSN_OFFSETS]; /**< available offsets */
44 struct k_spinlock lock; /**< lock mechanism */
45 };
46 /* the pipeline position lookup table */
47 static SHARED_DATA struct pipeline_posn pipeline_posn_shared;
48
49 /**
50 * \brief Retrieves pipeline position structure.
51 * \return Pointer to pipeline position structure.
52 */
pipeline_posn_get(void)53 static inline struct pipeline_posn *pipeline_posn_get(void)
54 {
55 return sof_get()->pipeline_posn;
56 }
57
58 /**
59 * \brief Retrieves first free pipeline position offset.
60 * \param[in,out] posn_offset Pipeline position offset to be set.
61 * \return Error code.
62 */
pipeline_posn_offset_get(uint32_t * posn_offset)63 static inline int pipeline_posn_offset_get(uint32_t *posn_offset)
64 {
65 struct pipeline_posn *pipeline_posn = pipeline_posn_get();
66 int ret = -EINVAL;
67 uint32_t i;
68 k_spinlock_key_t key;
69
70 key = k_spin_lock(&pipeline_posn->lock);
71
72 for (i = 0; i < PPL_POSN_OFFSETS; ++i) {
73 if (!pipeline_posn->posn_offset[i]) {
74 *posn_offset = i * sizeof(struct sof_ipc_stream_posn);
75 pipeline_posn->posn_offset[i] = true;
76 ret = 0;
77 break;
78 }
79 }
80
81
82 k_spin_unlock(&pipeline_posn->lock, key);
83
84 return ret;
85 }
86
87 /**
88 * \brief Frees pipeline position offset.
89 * \param[in] posn_offset Pipeline position offset to be freed.
90 */
pipeline_posn_offset_put(uint32_t posn_offset)91 static inline void pipeline_posn_offset_put(uint32_t posn_offset)
92 {
93 struct pipeline_posn *pipeline_posn = pipeline_posn_get();
94 int i = posn_offset / sizeof(struct sof_ipc_stream_posn);
95 k_spinlock_key_t key;
96
97 key = k_spin_lock(&pipeline_posn->lock);
98
99 pipeline_posn->posn_offset[i] = false;
100
101 k_spin_unlock(&pipeline_posn->lock, key);
102 }
103
pipeline_posn_init(struct sof * sof)104 void pipeline_posn_init(struct sof *sof)
105 {
106 sof->pipeline_posn = platform_shared_get(&pipeline_posn_shared,
107 sizeof(pipeline_posn_shared));
108 k_spinlock_init(&sof->pipeline_posn->lock);
109 }
110
111 /* create new pipeline - returns pipeline id or negative error */
pipeline_new(uint32_t pipeline_id,uint32_t priority,uint32_t comp_id)112 struct pipeline *pipeline_new(uint32_t pipeline_id, uint32_t priority, uint32_t comp_id)
113 {
114 struct sof_ipc_stream_posn posn;
115 struct pipeline *p;
116 int ret;
117
118 pipe_cl_info("pipeline new pipe_id %d priority %d",
119 pipeline_id, priority);
120
121 /* show heap status */
122 heap_trace_all(0);
123
124 /* allocate new pipeline */
125 p = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*p));
126 if (!p) {
127 pipe_cl_err("pipeline_new(): Out of Memory");
128 return NULL;
129 }
130
131 /* init pipeline */
132 p->comp_id = comp_id;
133 p->priority = priority;
134 p->pipeline_id = pipeline_id;
135 p->status = COMP_STATE_INIT;
136 p->trigger.cmd = COMP_TRIGGER_NO_ACTION;
137 ret = memcpy_s(&p->tctx, sizeof(struct tr_ctx), &pipe_tr,
138 sizeof(struct tr_ctx));
139 if (ret < 0) {
140 pipe_err(p, "pipeline_new(): failed to copy trace settings");
141 goto free;
142 }
143
144 ret = pipeline_posn_offset_get(&p->posn_offset);
145 if (ret < 0) {
146 pipe_err(p, "pipeline_new(): pipeline_posn_offset_get failed %d",
147 ret);
148 goto free;
149 }
150
151 /* just for retrieving valid ipc_msg header */
152 ipc_build_stream_posn(&posn, SOF_IPC_STREAM_TRIG_XRUN, p->comp_id);
153
154 if (posn.rhdr.hdr.size) {
155 p->msg = ipc_msg_init(posn.rhdr.hdr.cmd, posn.rhdr.hdr.size);
156 if (!p->msg) {
157 pipe_err(p, "pipeline_new(): ipc_msg_init failed");
158 goto free;
159 }
160 }
161
162 return p;
163 free:
164 rfree(p);
165 return NULL;
166 }
167
buffer_set_comp(struct comp_buffer * buffer,struct comp_dev * comp,int dir)168 static void buffer_set_comp(struct comp_buffer *buffer, struct comp_dev *comp,
169 int dir)
170 {
171 struct comp_buffer __sparse_cache *buffer_c = buffer_acquire(buffer);
172
173 if (dir == PPL_CONN_DIR_COMP_TO_BUFFER)
174 buffer_c->source = comp;
175 else
176 buffer_c->sink = comp;
177
178 buffer_release(buffer_c);
179
180 /* The buffer might be marked as shared later, write back the cache */
181 if (!buffer->c.shared)
182 dcache_writeback_invalidate_region(uncache_to_cache(buffer), sizeof(*buffer));
183 }
184
pipeline_connect(struct comp_dev * comp,struct comp_buffer * buffer,int dir)185 int pipeline_connect(struct comp_dev *comp, struct comp_buffer *buffer,
186 int dir)
187 {
188 struct list_item *comp_list;
189 uint32_t flags;
190
191 if (dir == PPL_CONN_DIR_COMP_TO_BUFFER)
192 comp_info(comp, "connect buffer %d as sink", buffer->id);
193 else
194 comp_info(comp, "connect buffer %d as source", buffer->id);
195
196 irq_local_disable(flags);
197
198 comp_list = comp_buffer_list(comp, dir);
199 buffer_attach(buffer, comp_list, dir);
200 buffer_set_comp(buffer, comp, dir);
201
202 irq_local_enable(flags);
203
204 return 0;
205 }
206
pipeline_disconnect(struct comp_dev * comp,struct comp_buffer * buffer,int dir)207 void pipeline_disconnect(struct comp_dev *comp, struct comp_buffer *buffer, int dir)
208 {
209 struct list_item *comp_list;
210 uint32_t flags;
211
212 if (dir == PPL_CONN_DIR_COMP_TO_BUFFER)
213 comp_dbg(comp, "disconnect buffer %d as sink", buffer->id);
214 else
215 comp_dbg(comp, "disconnect buffer %d as source", buffer->id);
216
217 irq_local_disable(flags);
218
219 comp_list = comp_buffer_list(comp, dir);
220 buffer_detach(buffer, comp_list, dir);
221 buffer_set_comp(buffer, NULL, dir);
222
223 irq_local_enable(flags);
224 }
225
226 /* pipelines must be inactive */
pipeline_free(struct pipeline * p)227 int pipeline_free(struct pipeline *p)
228 {
229 pipe_dbg(p, "pipeline_free()");
230
231 /*
232 * pipeline_free should always be called only after all the widgets in the pipeline have
233 * been freed.
234 */
235
236 /* remove from any scheduling */
237 if (p->pipe_task) {
238 schedule_task_free(p->pipe_task);
239 rfree(p->pipe_task);
240 }
241
242 ipc_msg_free(p->msg);
243
244 pipeline_posn_offset_put(p->posn_offset);
245
246 /* now free the pipeline */
247 rfree(p);
248
249 /* show heap status */
250 heap_trace_all(0);
251
252 return 0;
253 }
254
pipeline_comp_complete(struct comp_dev * current,struct comp_buffer * calling_buf,struct pipeline_walk_context * ctx,int dir)255 static int pipeline_comp_complete(struct comp_dev *current,
256 struct comp_buffer *calling_buf,
257 struct pipeline_walk_context *ctx, int dir)
258 {
259 struct pipeline_data *ppl_data = ctx->comp_data;
260
261 pipe_dbg(ppl_data->p, "pipeline_comp_complete(), current->comp.id = %u, dir = %u",
262 dev_comp_id(current), dir);
263
264 if (!comp_is_single_pipeline(current, ppl_data->start)) {
265 pipe_dbg(ppl_data->p, "pipeline_comp_complete(), current is from another pipeline");
266 return 0;
267 }
268
269 /* complete component init */
270 current->pipeline = ppl_data->p;
271 current->period = ppl_data->p->period;
272 current->priority = ppl_data->p->priority;
273
274 return pipeline_for_each_comp(current, ctx, dir);
275 }
276
pipeline_complete(struct pipeline * p,struct comp_dev * source,struct comp_dev * sink)277 int pipeline_complete(struct pipeline *p, struct comp_dev *source,
278 struct comp_dev *sink)
279 {
280 struct pipeline_data data;
281 struct pipeline_walk_context walk_ctx = {
282 .comp_func = pipeline_comp_complete,
283 .comp_data = &data,
284 };
285
286 #if !UNIT_TEST && !CONFIG_LIBRARY
287 int freq = clock_get_freq(cpu_get_id());
288 #else
289 int freq = 0;
290 #endif
291 int ret;
292
293 pipe_dbg(p, "pipeline complete, clock freq %dHz", freq);
294
295 /* check whether pipeline is already completed */
296 if (p->status != COMP_STATE_INIT) {
297 pipe_err(p, "pipeline_complete(): Pipeline already completed");
298 return -EINVAL;
299 }
300
301 data.start = source;
302 data.p = p;
303
304 /* now walk downstream from source component and
305 * complete component task and pipeline initialization
306 */
307 ret = walk_ctx.comp_func(source, NULL, &walk_ctx, PPL_DIR_DOWNSTREAM);
308
309 p->source_comp = source;
310 p->sink_comp = sink;
311 p->status = COMP_STATE_READY;
312
313 /* show heap status */
314 heap_trace_all(0);
315
316 return ret;
317 }
318
pipeline_comp_reset(struct comp_dev * current,struct comp_buffer * calling_buf,struct pipeline_walk_context * ctx,int dir)319 static int pipeline_comp_reset(struct comp_dev *current,
320 struct comp_buffer *calling_buf,
321 struct pipeline_walk_context *ctx, int dir)
322 {
323 struct pipeline *p = ctx->comp_data;
324 struct pipeline *p_current = current->pipeline;
325 int end_type;
326 int is_single_ppl = comp_is_single_pipeline(current, p->source_comp);
327 int is_same_sched = pipeline_is_same_sched_comp(p_current, p);
328 int err;
329
330 pipe_dbg(p_current, "pipeline_comp_reset(), current->comp.id = %u, dir = %u",
331 dev_comp_id(current), dir);
332
333 /*
334 * Reset should propagate to the connected pipelines, which need to be
335 * scheduled together, except for IPC4, where each pipeline receives
336 * commands from the host separately
337 */
338 if (!is_single_ppl && IPC4_MOD_ID(current->ipc_config.id))
339 return 0;
340
341 if (!is_single_ppl && !is_same_sched) {
342 /* If pipeline connected to the starting one is in improper
343 * direction (CAPTURE towards DAI, PLAYBACK towards HOST),
344 * stop propagation. Direction param of the pipeline can not be
345 * trusted at this point, as it might not be configured yet,
346 * hence checking for endpoint component type.
347 */
348 end_type = comp_get_endpoint_type(p_current->sink_comp);
349 switch (dir) {
350 case SOF_IPC_STREAM_PLAYBACK:
351 if (end_type == COMP_ENDPOINT_HOST ||
352 end_type == COMP_ENDPOINT_NODE)
353 return 0;
354 break;
355 case SOF_IPC_STREAM_CAPTURE:
356 if (end_type == COMP_ENDPOINT_DAI ||
357 end_type == COMP_ENDPOINT_NODE)
358 return 0;
359 }
360 }
361
362 err = comp_reset(current);
363 if (err < 0 || err == PPL_STATUS_PATH_STOP)
364 return err;
365
366 return pipeline_for_each_comp(current, ctx, dir);
367 }
368
369 /* reset the whole pipeline */
pipeline_reset(struct pipeline * p,struct comp_dev * host)370 int pipeline_reset(struct pipeline *p, struct comp_dev *host)
371 {
372 struct pipeline_walk_context walk_ctx = {
373 .comp_func = pipeline_comp_reset,
374 .comp_data = p,
375 .buff_func = buffer_reset_params,
376 .skip_incomplete = true,
377 };
378 int ret;
379
380 pipe_dbg(p, "pipe reset");
381
382 ret = walk_ctx.comp_func(host, NULL, &walk_ctx, host->direction);
383 if (ret < 0) {
384 pipe_err(p, "pipeline_reset(): ret = %d, host->comp.id = %u",
385 ret, dev_comp_id(host));
386 } else {
387 /* pipeline is reset to default state */
388 p->status = COMP_STATE_READY;
389 }
390
391 return ret;
392 }
393
394 /* Generic method for walking the graph upstream or downstream.
395 * It requires function pointer for recursion.
396 */
pipeline_for_each_comp(struct comp_dev * current,struct pipeline_walk_context * ctx,int dir)397 int pipeline_for_each_comp(struct comp_dev *current,
398 struct pipeline_walk_context *ctx, int dir)
399 {
400 struct list_item *buffer_list = comp_buffer_list(current, dir);
401 struct list_item *clist;
402
403 /* run this operation further */
404 list_for_item(clist, buffer_list) {
405 struct comp_buffer *buffer = buffer_from_list(clist, dir);
406 struct comp_buffer __sparse_cache *buffer_c;
407 struct comp_dev *buffer_comp;
408 int err = 0;
409
410 if (ctx->incoming == buffer)
411 continue;
412
413 /* don't go back to the buffer which already walked */
414 /*
415 * Note, that this access must be performed unlocked via
416 * uncached address. Trying to lock before checking the flag
417 * understandably leads to a deadlock when this function is
418 * called recursively from .comp_func() below. We do it in a
419 * safe way: this flag must *only* be accessed in this function
420 * only in these three cases: testing, setting and clearing.
421 * Note, that it is also assumed that buffers aren't shared
422 * across CPUs. See further comment below.
423 */
424 dcache_writeback_invalidate_region(uncache_to_cache(buffer), sizeof(*buffer));
425 if (buffer->walking)
426 continue;
427
428 buffer_comp = buffer_get_comp(buffer, dir);
429
430 buffer_c = buffer_acquire(buffer);
431
432 /* execute operation on buffer */
433 if (ctx->buff_func)
434 ctx->buff_func(buffer_c, ctx->buff_data);
435
436 /* don't go further if this component is not connected */
437 if (buffer_comp &&
438 (!ctx->skip_incomplete || buffer_comp->pipeline) &&
439 ctx->comp_func) {
440 buffer_c->walking = true;
441 buffer_release(buffer_c);
442
443 err = ctx->comp_func(buffer_comp, buffer,
444 ctx, dir);
445
446 buffer_c = buffer_acquire(buffer);
447 buffer_c->walking = false;
448 }
449
450 buffer_release(buffer_c);
451
452 if (err < 0 || err == PPL_STATUS_PATH_STOP)
453 return err;
454 }
455
456 return 0;
457 }
458
459 /* visit connected pipeline to find the dai comp */
pipeline_get_dai_comp(uint32_t pipeline_id,int dir)460 struct comp_dev *pipeline_get_dai_comp(uint32_t pipeline_id, int dir)
461 {
462 struct ipc_comp_dev *crt;
463 struct ipc *ipc = ipc_get();
464
465 crt = ipc_get_ppl_comp(ipc, pipeline_id, dir);
466 while (crt) {
467 struct comp_buffer *buffer;
468 struct comp_dev *comp;
469 struct list_item *blist = comp_buffer_list(crt->cd, dir);
470
471 /* if buffer list is empty then we have found a DAI */
472 if (list_is_empty(blist))
473 return crt->cd;
474
475 buffer = buffer_from_list(blist->next, dir);
476 comp = buffer_get_comp(buffer, dir);
477
478 /* buffer_comp is in another pipeline and it is not complete */
479 if (!comp->pipeline)
480 return NULL;
481
482 crt = ipc_get_ppl_comp(ipc, comp->pipeline->pipeline_id, dir);
483 }
484
485 return NULL;
486 }
487
488 #if CONFIG_IPC_MAJOR_4
489 /* Playback only: visit connected pipeline to find the dai comp and latency.
490 * This function walks down through a pipelines chain looking for the target dai component.
491 * Calculates the delay of each pipeline by determining the number of buffered blocks.
492 */
pipeline_get_dai_comp_latency(uint32_t pipeline_id,uint32_t * latency)493 struct comp_dev *pipeline_get_dai_comp_latency(uint32_t pipeline_id, uint32_t *latency)
494 {
495 struct ipc_comp_dev *ipc_sink;
496 struct ipc_comp_dev *ipc_source;
497 struct comp_dev *source;
498 struct ipc *ipc = ipc_get();
499
500 *latency = 0;
501
502 /* Walks through the ipc component list and get source endpoint component of the given
503 * pipeline.
504 */
505 ipc_source = ipc_get_ppl_src_comp(ipc, pipeline_id);
506 if (!ipc_source)
507 return NULL;
508 source = ipc_source->cd;
509
510 /* Walks through the ipc component list and get sink endpoint component of the given
511 * pipeline. This function returns the first sink. We assume that dai is connected to pin 0.
512 */
513 ipc_sink = ipc_get_ppl_sink_comp(ipc, pipeline_id);
514 while (ipc_sink) {
515 struct comp_buffer *buffer;
516 uint64_t input_data, output_data;
517 struct ipc4_base_module_cfg input_base_cfg = {.ibs = 0};
518 struct ipc4_base_module_cfg output_base_cfg = {.obs = 0};
519 int ret;
520
521 /* Calculate pipeline latency */
522 input_data = comp_get_total_data_processed(source, 0, true);
523 output_data = comp_get_total_data_processed(ipc_sink->cd, 0, false);
524
525 ret = comp_get_attribute(source, COMP_ATTR_BASE_CONFIG, &input_base_cfg);
526 if (ret < 0)
527 return NULL;
528
529 ret = comp_get_attribute(ipc_sink->cd, COMP_ATTR_BASE_CONFIG, &output_base_cfg);
530 if (ret < 0)
531 return NULL;
532
533 if (input_data && output_data && input_base_cfg.ibs && output_base_cfg.obs)
534 *latency += input_data / input_base_cfg.ibs -
535 output_data / output_base_cfg.obs;
536
537 /* If the component doesn't have a sink buffer, it can be a dai. */
538 if (list_is_empty(&ipc_sink->cd->bsink_list))
539 return dev_comp_type(ipc_sink->cd) == SOF_COMP_DAI ? ipc_sink->cd : NULL;
540
541 /* Get a component connected to our sink buffer - hop to a next pipeline */
542 buffer = buffer_from_list(comp_buffer_list(ipc_sink->cd, PPL_DIR_DOWNSTREAM)->next,
543 PPL_DIR_DOWNSTREAM);
544 source = buffer_get_comp(buffer, PPL_DIR_DOWNSTREAM);
545
546 /* buffer_comp is in another pipeline and it is not complete */
547 if (!source || !source->pipeline)
548 return NULL;
549
550 /* Get a next sink component */
551 ipc_sink = ipc_get_ppl_sink_comp(ipc, source->pipeline->pipeline_id);
552 }
553
554 return NULL;
555 }
556 #endif
557