1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 // Keyon Jie <yang.jie@linux.intel.com>
7
8 #include <sof/audio/buffer.h>
9 #include <sof/audio/component_ext.h>
10 #include <sof/audio/pipeline.h>
11 #include <sof/ipc/msg.h>
12 #include <sof/drivers/interrupt.h>
13 #include <sof/lib/mm_heap.h>
14 #include <sof/lib/uuid.h>
15 #include <sof/list.h>
16 #include <sof/spinlock.h>
17 #include <sof/string.h>
18 #include <ipc/header.h>
19 #include <ipc/stream.h>
20 #include <ipc/topology.h>
21 #include <errno.h>
22 #include <stdbool.h>
23 #include <stddef.h>
24 #include <stdint.h>
25
26 /* 4e934adb-b0ec-4d33-a086-c1022f921321 */
27 DECLARE_SOF_RT_UUID("pipe", pipe_uuid, 0x4e934adb, 0xb0ec, 0x4d33,
28 0xa0, 0x86, 0xc1, 0x02, 0x2f, 0x92, 0x13, 0x21);
29
30 DECLARE_TR_CTX(pipe_tr, SOF_UUID(pipe_uuid), LOG_LEVEL_INFO);
31
32 /* number of pipeline stream metadata objects we export in mailbox */
33 #define PPL_POSN_OFFSETS \
34 (MAILBOX_STREAM_SIZE / sizeof(struct sof_ipc_stream_posn))
35
36 /* lookup table to determine busy/free pipeline metadata objects */
37 struct pipeline_posn {
38 bool posn_offset[PPL_POSN_OFFSETS]; /**< available offsets */
39 spinlock_t lock; /**< lock mechanism */
40 };
41 /* the pipeline position lookup table */
42 static SHARED_DATA struct pipeline_posn pipeline_posn;
43
44 /**
45 * \brief Retrieves pipeline position structure.
46 * \return Pointer to pipeline position structure.
47 */
pipeline_posn_get(void)48 static inline struct pipeline_posn *pipeline_posn_get(void)
49 {
50 return sof_get()->pipeline_posn;
51 }
52
53 /**
54 * \brief Retrieves first free pipeline position offset.
55 * \param[in,out] posn_offset Pipeline position offset to be set.
56 * \return Error code.
57 */
pipeline_posn_offset_get(uint32_t * posn_offset)58 static inline int pipeline_posn_offset_get(uint32_t *posn_offset)
59 {
60 struct pipeline_posn *pipeline_posn = pipeline_posn_get();
61 int ret = -EINVAL;
62 uint32_t i;
63
64 spin_lock(&pipeline_posn->lock);
65
66 for (i = 0; i < PPL_POSN_OFFSETS; ++i) {
67 if (!pipeline_posn->posn_offset[i]) {
68 *posn_offset = i * sizeof(struct sof_ipc_stream_posn);
69 pipeline_posn->posn_offset[i] = true;
70 ret = 0;
71 break;
72 }
73 }
74
75
76 spin_unlock(&pipeline_posn->lock);
77
78 return ret;
79 }
80
81 /**
82 * \brief Frees pipeline position offset.
83 * \param[in] posn_offset Pipeline position offset to be freed.
84 */
pipeline_posn_offset_put(uint32_t posn_offset)85 static inline void pipeline_posn_offset_put(uint32_t posn_offset)
86 {
87 struct pipeline_posn *pipeline_posn = pipeline_posn_get();
88 int i = posn_offset / sizeof(struct sof_ipc_stream_posn);
89
90 spin_lock(&pipeline_posn->lock);
91
92 pipeline_posn->posn_offset[i] = false;
93
94
95 spin_unlock(&pipeline_posn->lock);
96 }
97
pipeline_posn_init(struct sof * sof)98 void pipeline_posn_init(struct sof *sof)
99 {
100 sof->pipeline_posn = platform_shared_get(&pipeline_posn,
101 sizeof(pipeline_posn));
102 spinlock_init(&sof->pipeline_posn->lock);
103 }
104
105 /* create new pipeline - returns pipeline id or negative error */
pipeline_new(uint32_t pipeline_id,uint32_t priority,uint32_t comp_id)106 struct pipeline *pipeline_new(uint32_t pipeline_id, uint32_t priority, uint32_t comp_id)
107 {
108 struct sof_ipc_stream_posn posn;
109 struct pipeline *p;
110 int ret;
111
112 pipe_cl_info("pipeline new pipe_id %d priority %d",
113 pipeline_id, priority);
114
115 /* show heap status */
116 heap_trace_all(0);
117
118 /* allocate new pipeline */
119 p = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*p));
120 if (!p) {
121 pipe_cl_err("pipeline_new(): Out of Memory");
122 return NULL;
123 }
124
125 /* init pipeline */
126 p->comp_id = comp_id;
127 p->priority = priority;
128 p->pipeline_id = pipeline_id;
129 p->status = COMP_STATE_INIT;
130 ret = memcpy_s(&p->tctx, sizeof(struct tr_ctx), &pipe_tr,
131 sizeof(struct tr_ctx));
132 assert(!ret);
133
134 ret = pipeline_posn_offset_get(&p->posn_offset);
135 if (ret < 0) {
136 pipe_err(p, "pipeline_new(): pipeline_posn_offset_get failed %d",
137 ret);
138 rfree(p);
139 return NULL;
140 }
141
142 /* just for retrieving valid ipc_msg header */
143 ipc_build_stream_posn(&posn, SOF_IPC_STREAM_TRIG_XRUN, p->comp_id);
144
145 p->msg = ipc_msg_init(posn.rhdr.hdr.cmd, sizeof(posn));
146 if (!p->msg) {
147 pipe_err(p, "pipeline_new(): ipc_msg_init failed");
148 rfree(p);
149 return NULL;
150 }
151
152 return p;
153 }
154
pipeline_connect(struct comp_dev * comp,struct comp_buffer * buffer,int dir)155 int pipeline_connect(struct comp_dev *comp, struct comp_buffer *buffer,
156 int dir)
157 {
158 uint32_t flags;
159
160 if (dir == PPL_CONN_DIR_COMP_TO_BUFFER)
161 comp_info(comp, "connect buffer %d as sink", buffer->id);
162 else
163 comp_info(comp, "connect buffer %d as source", buffer->id);
164
165 irq_local_disable(flags);
166 list_item_prepend(buffer_comp_list(buffer, dir),
167 comp_buffer_list(comp, dir));
168 buffer_set_comp(buffer, comp, dir);
169 comp_writeback(comp);
170 irq_local_enable(flags);
171
172 return 0;
173 }
174
pipeline_disconnect(struct comp_dev * comp,struct comp_buffer * buffer,int dir)175 void pipeline_disconnect(struct comp_dev *comp, struct comp_buffer *buffer, int dir)
176 {
177 uint32_t flags;
178
179 if (dir == PPL_CONN_DIR_COMP_TO_BUFFER)
180 comp_info(comp, "disconnect buffer %d as sink", buffer->id);
181 else
182 comp_info(comp, "disconnect buffer %d as source", buffer->id);
183
184 irq_local_disable(flags);
185 list_item_del(buffer_comp_list(buffer, dir));
186 comp_writeback(comp);
187 irq_local_enable(flags);
188 }
189
190 /* pipelines must be inactive */
pipeline_free(struct pipeline * p)191 int pipeline_free(struct pipeline *p)
192 {
193 pipe_info(p, "pipeline_free()");
194
195 /*
196 * pipeline_free should always be called only after all the widgets in the pipeline have
197 * been freed.
198 */
199
200 /* remove from any scheduling */
201 if (p->pipe_task) {
202 schedule_task_free(p->pipe_task);
203 rfree(p->pipe_task);
204 }
205
206 ipc_msg_free(p->msg);
207
208 pipeline_posn_offset_put(p->posn_offset);
209
210 /* now free the pipeline */
211 rfree(p);
212
213 /* show heap status */
214 heap_trace_all(0);
215
216 return 0;
217 }
218
pipeline_comp_complete(struct comp_dev * current,struct comp_buffer * calling_buf,struct pipeline_walk_context * ctx,int dir)219 static int pipeline_comp_complete(struct comp_dev *current,
220 struct comp_buffer *calling_buf,
221 struct pipeline_walk_context *ctx, int dir)
222 {
223 struct pipeline_data *ppl_data = ctx->comp_data;
224
225 pipe_dbg(ppl_data->p, "pipeline_comp_complete(), current->comp.id = %u, dir = %u",
226 dev_comp_id(current), dir);
227
228 if (!comp_is_single_pipeline(current, ppl_data->start)) {
229 pipe_dbg(ppl_data->p, "pipeline_comp_complete(), current is from another pipeline");
230 return 0;
231 }
232
233 /* complete component init */
234 current->pipeline = ppl_data->p;
235 current->period = ppl_data->p->period;
236 current->priority = ppl_data->p->priority;
237
238 pipeline_for_each_comp(current, ctx, dir);
239
240 return 0;
241 }
242
pipeline_complete(struct pipeline * p,struct comp_dev * source,struct comp_dev * sink)243 int pipeline_complete(struct pipeline *p, struct comp_dev *source,
244 struct comp_dev *sink)
245 {
246 struct pipeline_data data;
247 struct pipeline_walk_context walk_ctx = {
248 .comp_func = pipeline_comp_complete,
249 .comp_data = &data,
250 };
251
252 #if !UNIT_TEST && !CONFIG_LIBRARY
253 int freq = clock_get_freq(cpu_get_id());
254 #else
255 int freq = 0;
256 #endif
257
258 pipe_info(p, "pipeline complete, clock freq %dHz", freq);
259
260 /* check whether pipeline is already completed */
261 if (p->status != COMP_STATE_INIT) {
262 pipe_err(p, "pipeline_complete(): Pipeline already completed");
263 return -EINVAL;
264 }
265
266 data.start = source;
267 data.p = p;
268
269 /* now walk downstream from source component and
270 * complete component task and pipeline initialization
271 */
272 walk_ctx.comp_func(source, NULL, &walk_ctx, PPL_DIR_DOWNSTREAM);
273
274 p->source_comp = source;
275 p->sink_comp = sink;
276 p->status = COMP_STATE_READY;
277
278 /* show heap status */
279 heap_trace_all(0);
280
281 return 0;
282 }
283
pipeline_comp_reset(struct comp_dev * current,struct comp_buffer * calling_buf,struct pipeline_walk_context * ctx,int dir)284 static int pipeline_comp_reset(struct comp_dev *current,
285 struct comp_buffer *calling_buf,
286 struct pipeline_walk_context *ctx, int dir)
287 {
288 struct pipeline *p = ctx->comp_data;
289 int stream_direction = dir;
290 int end_type;
291 int is_single_ppl = comp_is_single_pipeline(current, p->source_comp);
292 int is_same_sched =
293 pipeline_is_same_sched_comp(current->pipeline, p);
294 int err;
295
296 pipe_dbg(current->pipeline, "pipeline_comp_reset(), current->comp.id = %u, dir = %u",
297 dev_comp_id(current), dir);
298
299 /* reset should propagate to the connected pipelines,
300 * which need to be scheduled together
301 */
302 if (!is_single_ppl && !is_same_sched) {
303 /* If pipeline connected to the starting one is in improper
304 * direction (CAPTURE towards DAI, PLAYBACK towards HOST),
305 * stop propagation. Direction param of the pipeline can not be
306 * trusted at this point, as it might not be configured yet,
307 * hence checking for endpoint component type.
308 */
309 end_type = comp_get_endpoint_type(current->pipeline->sink_comp);
310 if (stream_direction == SOF_IPC_STREAM_PLAYBACK) {
311 if (end_type == COMP_ENDPOINT_HOST ||
312 end_type == COMP_ENDPOINT_NODE)
313 return 0;
314 }
315
316 if (stream_direction == SOF_IPC_STREAM_CAPTURE) {
317 if (end_type == COMP_ENDPOINT_DAI ||
318 end_type == COMP_ENDPOINT_NODE)
319 return 0;
320 }
321 }
322
323 err = comp_reset(current);
324 if (err < 0 || err == PPL_STATUS_PATH_STOP)
325 return err;
326
327 return pipeline_for_each_comp(current, ctx, dir);
328 }
329
330 /* reset the whole pipeline */
pipeline_reset(struct pipeline * p,struct comp_dev * host)331 int pipeline_reset(struct pipeline *p, struct comp_dev *host)
332 {
333 struct pipeline_walk_context walk_ctx = {
334 .comp_func = pipeline_comp_reset,
335 .comp_data = p,
336 .buff_func = buffer_reset_params,
337 .skip_incomplete = true,
338 };
339 int ret;
340
341 pipe_info(p, "pipe reset");
342
343 ret = walk_ctx.comp_func(host, NULL, &walk_ctx, host->direction);
344 if (ret < 0) {
345 pipe_err(p, "pipeline_reset(): ret = %d, host->comp.id = %u",
346 ret, dev_comp_id(host));
347 }
348
349 return ret;
350 }
351
352 /* Generic method for walking the graph upstream or downstream.
353 * It requires function pointer for recursion.
354 */
pipeline_for_each_comp(struct comp_dev * current,struct pipeline_walk_context * ctx,int dir)355 int pipeline_for_each_comp(struct comp_dev *current,
356 struct pipeline_walk_context *ctx, int dir)
357 {
358 struct list_item *buffer_list = comp_buffer_list(current, dir);
359 struct list_item *clist;
360 struct comp_buffer *buffer;
361 struct comp_dev *buffer_comp;
362 uint32_t flags;
363
364 /* run this operation further */
365 list_for_item(clist, buffer_list) {
366 buffer = buffer_from_list(clist, struct comp_buffer, dir);
367
368 /* don't go back to the buffer which already walked */
369 if (buffer->walking)
370 continue;
371
372 /* execute operation on buffer */
373 if (ctx->buff_func)
374 ctx->buff_func(buffer, ctx->buff_data);
375
376 buffer_comp = buffer_get_comp(buffer, dir);
377
378 /* don't go further if this component is not connected */
379 if (!buffer_comp ||
380 (ctx->skip_incomplete && !buffer_comp->pipeline))
381 continue;
382
383 /* continue further */
384 if (ctx->comp_func) {
385 buffer_lock(buffer, &flags);
386 buffer->walking = true;
387 buffer_unlock(buffer, flags);
388
389 int err = ctx->comp_func(buffer_comp, buffer,
390 ctx, dir);
391 buffer_lock(buffer, &flags);
392 buffer->walking = false;
393 buffer_unlock(buffer, flags);
394 if (err < 0)
395 return err;
396 }
397 }
398
399 return 0;
400 }
401