1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3 * Copyright(c) 2022 Intel Corporation. All rights reserved.
4 *
5 * Author: Piotr Makaruk <piotr.makaruk@intel.com>
6 */
7
8 #include <sof/audio/buffer.h>
9 #include <sof/audio/component.h>
10 #include <sof/audio/component_ext.h>
11 #include <sof/audio/pipeline.h>
12 #include <sof/common.h>
13 #include <sof/ipc/topology.h>
14 #include <sof/ipc/common.h>
15 #include <ipc/dai.h>
16 #include <ipc4/gateway.h>
17 #include <sof/schedule/ll_schedule.h>
18 #include <sof/schedule/schedule.h>
19 #include <rtos/task.h>
20 #include <sof/lib/dma.h>
21 #include <ipc4/error_status.h>
22 #include <ipc4/module.h>
23 #include <ipc4/pipeline.h>
24 #include <sof/ut.h>
25 #include <zephyr/pm/policy.h>
26 #include <rtos/init.h>
27 #if CONFIG_IPC4_XRUN_NOTIFICATIONS_ENABLE
28 #include <ipc4/notification.h>
29 #include <sof/ipc/msg.h>
30 #include <ipc/header.h>
31 #endif
32
33 static const struct comp_driver comp_chain_dma;
34 static const uint32_t max_chain_number = DAI_NUM_HDA_OUT + DAI_NUM_HDA_IN;
35
36 LOG_MODULE_REGISTER(chain_dma, CONFIG_SOF_LOG_LEVEL);
37
38 /* 6a0a274f-27cc-4afb-a3e7-3444723f432e */
39 DECLARE_SOF_RT_UUID("chain_dma", chain_dma_uuid, 0x6a0a274f, 0x27cc, 0x4afb,
40 0xa3, 0xe7, 0x34, 0x44, 0x72, 0x3f, 0x43, 0x2e);
41 DECLARE_TR_CTX(chain_dma_tr, SOF_UUID(chain_dma_uuid), LOG_LEVEL_INFO);
42
43 /* chain dma component private data */
44 struct chain_dma_data {
45 bool first_data_received;
46 /* node id of host HD/A DMA */
47 union ipc4_connector_node_id host_connector_node_id;
48 /* node id of link HD/A DMA */
49 union ipc4_connector_node_id link_connector_node_id;
50 uint32_t *hw_buffer;
51 struct task chain_task;
52 enum sof_ipc_stream_direction stream_direction;
53 /* container size in bytes */
54 uint8_t cs;
55 #if CONFIG_IPC4_XRUN_NOTIFICATIONS_ENABLE
56 bool xrun_notification_sent;
57 struct ipc_msg *msg_xrun;
58 #endif
59
60 /* local host DMA config */
61 struct dma *dma_host;
62 struct dma_chan_data *chan_host;
63 struct dma_config z_config_host;
64 struct dma_block_config dma_block_cfg_host;
65
66 /* local link DMA config */
67 struct dma *dma_link;
68 struct dma_chan_data *chan_link;
69 struct dma_config z_config_link;
70 struct dma_block_config dma_block_cfg_link;
71
72 struct comp_buffer *dma_buffer;
73 };
74
chain_host_start(struct comp_dev * dev)75 static int chain_host_start(struct comp_dev *dev)
76 {
77 struct chain_dma_data *cd = comp_get_drvdata(dev);
78 int err;
79
80 err = dma_start(cd->chan_host->dma->z_dev, cd->chan_host->index);
81 if (err < 0)
82 return err;
83
84 comp_info(dev, "chain_host_start(): dma_start() host chan_index = %u",
85 cd->chan_host->index);
86 return 0;
87 }
88
chain_link_start(struct comp_dev * dev)89 static int chain_link_start(struct comp_dev *dev)
90 {
91 struct chain_dma_data *cd = comp_get_drvdata(dev);
92 int err;
93
94 err = dma_start(cd->chan_link->dma->z_dev, cd->chan_link->index);
95 if (err < 0)
96 return err;
97
98 comp_info(dev, "chain_link_start(): dma_start() link chan_index = %u",
99 cd->chan_link->index);
100 return 0;
101 }
102
chain_link_stop(struct comp_dev * dev)103 static int chain_link_stop(struct comp_dev *dev)
104 {
105 struct chain_dma_data *cd = comp_get_drvdata(dev);
106 int err;
107
108 err = dma_stop(cd->chan_link->dma->z_dev, cd->chan_link->index);
109 if (err < 0)
110 return err;
111
112 comp_info(dev, "chain_link_stop(): dma_stop() link chan_index = %u",
113 cd->chan_link->index);
114
115 return 0;
116 }
117
chain_host_stop(struct comp_dev * dev)118 static int chain_host_stop(struct comp_dev *dev)
119 {
120 struct chain_dma_data *cd = comp_get_drvdata(dev);
121 int err;
122
123 err = dma_stop(cd->chan_host->dma->z_dev, cd->chan_host->index);
124 if (err < 0)
125 return err;
126
127 comp_info(dev, "chain_host_stop(): dma_stop() host chan_index = %u",
128 cd->chan_host->index);
129
130 return 0;
131 }
132
133 /* Get size of data, which was consumed by link */
chain_get_transferred_data_size(const uint32_t out_read_pos,const uint32_t in_read_pos,const size_t buff_size)134 static size_t chain_get_transferred_data_size(const uint32_t out_read_pos, const uint32_t in_read_pos,
135 const size_t buff_size)
136 {
137 if (out_read_pos >= in_read_pos)
138 return out_read_pos - in_read_pos;
139
140 return buff_size - in_read_pos + out_read_pos;
141 }
142
143 #if CONFIG_IPC4_XRUN_NOTIFICATIONS_ENABLE
handle_xrun(struct chain_dma_data * cd)144 static void handle_xrun(struct chain_dma_data *cd)
145 {
146 int ret;
147
148 if (cd->link_connector_node_id.f.dma_type == ipc4_hda_link_output_class &&
149 !cd->xrun_notification_sent) {
150 tr_warn(&chain_dma_tr, "handle_xrun(): underrun detected");
151 xrun_notif_msg_init(cd->msg_xrun, cd->link_connector_node_id.dw,
152 SOF_IPC4_GATEWAY_UNDERRUN_DETECTED);
153 ipc_msg_send(cd->msg_xrun, NULL, true);
154 cd->xrun_notification_sent = true;
155 } else if (cd->link_connector_node_id.f.dma_type == ipc4_hda_link_input_class &&
156 !cd->xrun_notification_sent) {
157 tr_warn(&chain_dma_tr, "handle_xrun(): overrun detected");
158 xrun_notif_msg_init(cd->msg_xrun, cd->link_connector_node_id.dw,
159 SOF_IPC4_GATEWAY_OVERRUN_DETECTED);
160 ipc_msg_send(cd->msg_xrun, NULL, true);
161 cd->xrun_notification_sent = true;
162 } else {
163 /* if xrun_notification_sent is already set, then it means that link was
164 * able to reach stability therefore next underrun/overrun should be reported.
165 */
166 cd->xrun_notification_sent = false;
167 }
168 }
169 #endif
170
chain_task_run(void * data)171 static enum task_state chain_task_run(void *data)
172 {
173 size_t link_avail_bytes, link_free_bytes, host_avail_bytes, host_free_bytes;
174 struct chain_dma_data *cd = data;
175 uint32_t link_read_pos, host_read_pos;
176 struct dma_status stat;
177 uint32_t link_type;
178 int ret;
179
180 /* Link DMA can return -EPIPE and current status if xrun occurs, then it is not critical
181 * and flow shall continue. Other error values will be treated as critical.
182 */
183 ret = dma_get_status(cd->chan_link->dma->z_dev, cd->chan_link->index, &stat);
184 switch (ret) {
185 case 0:
186 break;
187 case -EPIPE:
188 tr_warn(&chain_dma_tr, "chain_task_run(): dma_get_status() link xrun occurred,"
189 " ret = %u", ret);
190 #if CONFIG_IPC4_XRUN_NOTIFICATIONS_ENABLE
191 handle_xrun(cd);
192 #endif
193 break;
194 default:
195 tr_err(&chain_dma_tr, "chain_task_run(): dma_get_status() error, ret = %u", ret);
196 return SOF_TASK_STATE_COMPLETED;
197 }
198
199 link_avail_bytes = stat.pending_length;
200 link_free_bytes = stat.free;
201 link_read_pos = stat.read_position;
202
203 /* Host DMA does not report xruns. All error values will be treated as critical. */
204 ret = dma_get_status(cd->chan_host->dma->z_dev, cd->chan_host->index, &stat);
205 if (ret < 0) {
206 tr_err(&chain_dma_tr, "chain_task_run(): dma_get_status() error, ret = %u", ret);
207 return SOF_TASK_STATE_COMPLETED;
208 }
209
210 host_avail_bytes = stat.pending_length;
211 host_free_bytes = stat.free;
212 host_read_pos = stat.read_position;
213
214 link_type = cd->link_connector_node_id.f.dma_type;
215 if (link_type == ipc4_hda_link_input_class) {
216 /* CAPTURE:
217 * When chained Link Input with Host Input immediately start transmitting data
218 * to host. In this mode task will always stream to host as much data as possible
219 */
220 const size_t increment = MIN(host_free_bytes, link_avail_bytes);
221
222 ret = dma_reload(cd->chan_host->dma->z_dev, cd->chan_host->index, 0, 0, increment);
223 if (ret < 0) {
224 tr_err(&chain_dma_tr,
225 "chain_task_run(): dma_reload() host error, ret = %u", ret);
226 return SOF_TASK_STATE_COMPLETED;
227 }
228
229 ret = dma_reload(cd->chan_link->dma->z_dev, cd->chan_link->index, 0, 0, increment);
230 if (ret < 0) {
231 tr_err(&chain_dma_tr,
232 "chain_task_run(): dma_reload() link error, ret = %u", ret);
233 return SOF_TASK_STATE_COMPLETED;
234 }
235 } else {
236 /* PLAYBACK:
237 * When chained Host Output with Link Output then wait for half buffer full. In this
238 * mode task will update read position based on transferred data size to avoid
239 * overwriting valid data and write position by half buffer size.
240 */
241 const size_t half_buff_size = cd->dma_buffer->stream.size / 2;
242
243 if (!cd->first_data_received && host_avail_bytes > half_buff_size) {
244 ret = dma_reload(cd->chan_link->dma->z_dev,
245 cd->chan_link->index, 0, 0,
246 half_buff_size);
247 if (ret < 0) {
248 tr_err(&chain_dma_tr,
249 "chain_task_run(): dma_reload() link error, ret = %u",
250 ret);
251 return SOF_TASK_STATE_COMPLETED;
252 }
253 cd->first_data_received = true;
254
255 } else if (cd->first_data_received) {
256 const size_t transferred =
257 chain_get_transferred_data_size(link_read_pos,
258 host_read_pos,
259 cd->dma_buffer->stream.size);
260
261 ret = dma_reload(cd->chan_host->dma->z_dev, cd->chan_host->index,
262 0, 0, transferred);
263 if (ret < 0) {
264 tr_err(&chain_dma_tr,
265 "chain_task_run(): dma_reload() host error, ret = %u", ret);
266 return SOF_TASK_STATE_COMPLETED;
267 }
268
269 if (host_avail_bytes >= half_buff_size &&
270 link_free_bytes >= half_buff_size) {
271 ret = dma_reload(cd->chan_link->dma->z_dev, cd->chan_link->index,
272 0, 0, half_buff_size);
273 if (ret < 0) {
274 tr_err(&chain_dma_tr, "chain_task_run(): dma_reload() "
275 "link error, ret = %u", ret);
276 return SOF_TASK_STATE_COMPLETED;
277 }
278 }
279 }
280 }
281 return SOF_TASK_STATE_RESCHEDULE;
282 }
283
chain_task_start(struct comp_dev * dev)284 static int chain_task_start(struct comp_dev *dev)
285 {
286 struct comp_driver_list *drivers = comp_drivers_get();
287 struct chain_dma_data *cd = comp_get_drvdata(dev);
288 k_spinlock_key_t key;
289 int ret;
290
291 comp_info(dev, "chain_task_start(), host_dma_id = 0x%08x", cd->host_connector_node_id.dw);
292
293 key = k_spin_lock(&drivers->lock);
294 switch (cd->chain_task.state) {
295 case SOF_TASK_STATE_QUEUED:
296 k_spin_unlock(&drivers->lock, key);
297 return 0;
298 case SOF_TASK_STATE_COMPLETED:
299 break;
300 case SOF_TASK_STATE_INIT:
301 break;
302 case SOF_TASK_STATE_FREE:
303 break;
304 default:
305 comp_err(dev, "chain_task_start(), bad state transition");
306 ret = -EINVAL;
307 goto error;
308 }
309
310 if (cd->stream_direction == SOF_IPC_STREAM_PLAYBACK) {
311 ret = chain_host_start(dev);
312 if (ret)
313 goto error;
314 ret = chain_link_start(dev);
315 if (ret) {
316 chain_host_stop(dev);
317 goto error;
318 }
319 } else {
320 ret = chain_link_start(dev);
321 if (ret)
322 goto error;
323 ret = chain_host_start(dev);
324 if (ret) {
325 chain_link_stop(dev);
326 goto error;
327 }
328 }
329
330 ret = schedule_task_init_ll(&cd->chain_task, SOF_UUID(chain_dma_uuid),
331 SOF_SCHEDULE_LL_TIMER, SOF_TASK_PRI_HIGH,
332 chain_task_run, cd, 0, 0);
333 if (ret < 0) {
334 comp_err(dev, "chain_task_start(), ll task initialization failed");
335 goto error_task;
336 }
337
338 ret = schedule_task(&cd->chain_task, 0, 0);
339 if (ret < 0) {
340 comp_err(dev, "chain_task_start(), ll schedule task failed");
341 schedule_task_free(&cd->chain_task);
342 goto error_task;
343 }
344
345 pm_policy_state_lock_get(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
346 k_spin_unlock(&drivers->lock, key);
347
348 return 0;
349
350 error_task:
351 chain_host_stop(dev);
352 chain_link_stop(dev);
353 error:
354 k_spin_unlock(&drivers->lock, key);
355 return ret;
356 }
357
chain_task_pause(struct comp_dev * dev)358 static int chain_task_pause(struct comp_dev *dev)
359 {
360 struct comp_driver_list *drivers = comp_drivers_get();
361 struct chain_dma_data *cd = comp_get_drvdata(dev);
362 k_spinlock_key_t key;
363 int ret, ret2;
364
365 if (cd->chain_task.state == SOF_TASK_STATE_FREE)
366 return 0;
367
368 key = k_spin_lock(&drivers->lock);
369 cd->first_data_received = false;
370 if (cd->stream_direction == SOF_IPC_STREAM_PLAYBACK) {
371 ret = chain_host_stop(dev);
372 ret2 = chain_link_stop(dev);
373 } else {
374 ret = chain_link_stop(dev);
375 ret2 = chain_host_stop(dev);
376 }
377 if (!ret)
378 ret = ret2;
379
380 k_spin_unlock(&drivers->lock, key);
381
382 schedule_task_free(&cd->chain_task);
383 pm_policy_state_lock_put(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
384
385 return ret;
386 }
387
chain_release(struct comp_dev * dev)388 static void chain_release(struct comp_dev *dev)
389 {
390 struct chain_dma_data *cd = comp_get_drvdata(dev);
391
392 dma_release_channel(cd->chan_host->dma->z_dev, cd->chan_host->index);
393 dma_put(cd->dma_host);
394 dma_release_channel(cd->chan_link->dma->z_dev, cd->chan_link->index);
395 dma_put(cd->dma_link);
396
397 if (cd->dma_buffer) {
398 buffer_free(cd->dma_buffer);
399 cd->dma_buffer = NULL;
400 }
401 }
402
403 /* Retrieves host connector node id from dma id */
get_connector_node_id(uint32_t dma_id,bool host_type,union ipc4_connector_node_id * connector_node_id)404 static int get_connector_node_id(uint32_t dma_id, bool host_type,
405 union ipc4_connector_node_id *connector_node_id)
406 {
407 uint8_t type = host_type ? ipc4_hda_host_output_class : ipc4_hda_link_output_class;
408
409 if (dma_id >= DAI_NUM_HDA_OUT) {
410 type = host_type ? ipc4_hda_host_input_class : ipc4_hda_link_input_class;
411 dma_id -= DAI_NUM_HDA_OUT;
412 if (dma_id >= DAI_NUM_HDA_IN)
413 return -EINVAL;
414 }
415 connector_node_id->dw = 0;
416 connector_node_id->f.dma_type = type;
417 connector_node_id->f.v_index = dma_id;
418
419 return 0;
420 }
421
chain_init(struct comp_dev * dev,void * addr,size_t length)422 static int chain_init(struct comp_dev *dev, void *addr, size_t length)
423 {
424 struct chain_dma_data *cd = comp_get_drvdata(dev);
425 struct dma_block_config *dma_block_cfg_host = &cd->dma_block_cfg_host;
426 struct dma_block_config *dma_block_cfg_link = &cd->dma_block_cfg_link;
427 struct dma_config *dma_cfg_host = &cd->z_config_host;
428 struct dma_config *dma_cfg_link = &cd->z_config_link;
429 int channel;
430 int err;
431
432 memset(dma_cfg_host, 0, sizeof(*dma_cfg_host));
433 memset(dma_block_cfg_host, 0, sizeof(*dma_block_cfg_host));
434 dma_cfg_host->block_count = 1;
435 dma_cfg_host->source_data_size = cd->cs;
436 dma_cfg_host->dest_data_size = cd->cs;
437 dma_cfg_host->head_block = dma_block_cfg_host;
438 dma_block_cfg_host->block_size = length;
439
440 memset(dma_cfg_link, 0, sizeof(*dma_cfg_link));
441 memset(dma_block_cfg_link, 0, sizeof(*dma_block_cfg_link));
442 dma_cfg_link->block_count = 1;
443 dma_cfg_link->source_data_size = cd->cs;
444 dma_cfg_link->dest_data_size = cd->cs;
445 dma_cfg_link->head_block = dma_block_cfg_link;
446 dma_block_cfg_link->block_size = length;
447
448 switch (cd->stream_direction) {
449 case SOF_IPC_STREAM_PLAYBACK:
450 dma_cfg_host->channel_direction = HOST_TO_MEMORY;
451 dma_block_cfg_host->dest_address = (uint32_t)addr;
452 dma_cfg_link->channel_direction = MEMORY_TO_PERIPHERAL;
453 dma_block_cfg_link->source_address = (uint32_t)addr;
454 break;
455 case SOF_IPC_STREAM_CAPTURE:
456 dma_cfg_host->channel_direction = MEMORY_TO_HOST;
457 dma_block_cfg_host->source_address = (uint32_t)addr;
458 dma_cfg_link->channel_direction = PERIPHERAL_TO_MEMORY;
459 dma_block_cfg_link->dest_address = (uint32_t)addr;
460 break;
461 }
462
463 /* get host DMA channel */
464 channel = cd->host_connector_node_id.f.v_index;
465 channel = dma_request_channel(cd->dma_host->z_dev, &channel);
466 if (channel < 0) {
467 comp_err(dev, "chain_init(): dma_request_channel() failed");
468 return -EINVAL;
469 }
470
471 cd->chan_host = &cd->dma_host->chan[channel];
472
473 err = dma_config(cd->dma_host->z_dev, cd->chan_host->index, dma_cfg_host);
474 if (err < 0) {
475 comp_err(dev, "chain_init(): dma_config() failed");
476 goto error_host;
477 }
478
479 /* get link DMA channel */
480 channel = cd->link_connector_node_id.f.v_index;
481 channel = dma_request_channel(cd->dma_link->z_dev, &channel);
482 if (channel < 0) {
483 comp_err(dev, "chain_init(): dma_request_channel() failed");
484 goto error_host;
485 }
486
487 cd->chan_link = &cd->dma_link->chan[channel];
488
489 err = dma_config(cd->dma_link->z_dev, cd->chan_link->index, dma_cfg_link);
490 if (err < 0) {
491 comp_err(dev, "chain_init(): dma_config() failed");
492 goto error_link;
493 }
494 return 0;
495
496 error_link:
497 dma_release_channel(cd->dma_link->z_dev, cd->chan_link->index);
498 cd->chan_link = NULL;
499 error_host:
500 dma_release_channel(cd->dma_host->z_dev, cd->chan_host->index);
501 cd->chan_host = NULL;
502 return err;
503 }
504
chain_task_init(struct comp_dev * dev,uint8_t host_dma_id,uint8_t link_dma_id,uint32_t fifo_size)505 static int chain_task_init(struct comp_dev *dev, uint8_t host_dma_id, uint8_t link_dma_id,
506 uint32_t fifo_size)
507 {
508 struct chain_dma_data *cd = comp_get_drvdata(dev);
509 struct comp_buffer __sparse_cache *buffer_c;
510 uint32_t addr_align;
511 size_t buff_size;
512 void *buff_addr;
513 uint32_t dir;
514 int ret;
515
516 ret = get_connector_node_id(host_dma_id, true, &cd->host_connector_node_id);
517 if (ret < 0)
518 return ret;
519
520 ret = get_connector_node_id(link_dma_id, false, &cd->link_connector_node_id);
521 if (ret < 0)
522 return ret;
523
524 /* Verify whether HDA gateways can be chained */
525 if (cd->host_connector_node_id.f.dma_type == ipc4_hda_host_output_class) {
526 if (cd->link_connector_node_id.f.dma_type != ipc4_hda_link_output_class)
527 return -EINVAL;
528 cd->stream_direction = SOF_IPC_STREAM_PLAYBACK;
529 }
530 if (cd->host_connector_node_id.f.dma_type == ipc4_hda_host_input_class) {
531 if (cd->link_connector_node_id.f.dma_type != ipc4_hda_link_input_class)
532 return -EINVAL;
533 cd->stream_direction = SOF_IPC_STREAM_CAPTURE;
534 }
535
536 /* request HDA DMA with shared access privilege */
537 dir = (cd->stream_direction == SOF_IPC_STREAM_PLAYBACK) ?
538 DMA_DIR_HMEM_TO_LMEM : DMA_DIR_LMEM_TO_HMEM;
539
540 cd->dma_host = dma_get(dir, 0, DMA_DEV_HOST, DMA_ACCESS_SHARED);
541 if (!cd->dma_host) {
542 comp_err(dev, "chain_task_init(): dma_get() returned NULL");
543 return -EINVAL;
544 }
545
546 dir = (cd->stream_direction == SOF_IPC_STREAM_PLAYBACK) ?
547 DMA_DIR_MEM_TO_DEV : DMA_DIR_DEV_TO_MEM;
548
549 cd->dma_link = dma_get(dir, DMA_CAP_HDA, DMA_DEV_HDA, DMA_ACCESS_SHARED);
550 if (!cd->dma_link) {
551 dma_put(cd->dma_host);
552 comp_err(dev, "chain_task_init(): dma_get() returned NULL");
553 return -EINVAL;
554 }
555
556 /* retrieve DMA buffer address alignment */
557 ret = dma_get_attribute(cd->dma_host->z_dev, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT,
558 &addr_align);
559 if (ret < 0) {
560 comp_err(dev,
561 "chain_task_init(): could not get dma buffer address alignment, err = %d",
562 ret);
563 goto error;
564 }
565
566 switch (cd->link_connector_node_id.f.dma_type) {
567 case ipc4_hda_link_input_class:
568 /* Increasing buffer size for capture path as L1SEN exit takes sometimes
569 * more than expected. To prevent from glitches and DMA overruns buffer
570 * is increased 5 times.
571 */
572 fifo_size *= 5;
573 break;
574 case ipc4_hda_link_output_class:
575 /* Increasing buffer size for playback path as L1SEN exit takes sometimes
576 * more that expected
577 * Note, FIFO size must be smaller than half of host buffer size
578 * (20ms ping pong) to avoid problems with position reporting
579 * Size increase from default 2ms to 5ms is enough.
580 */
581 fifo_size *= 5;
582 fifo_size /= 2;
583 break;
584 }
585
586 fifo_size = ALIGN_UP_INTERNAL(fifo_size, addr_align);
587
588 cd->dma_buffer = buffer_alloc(fifo_size, SOF_MEM_CAPS_DMA, addr_align);
589
590 if (!cd->dma_buffer) {
591 comp_err(dev, "chain_task_init(): failed to alloc dma buffer");
592 ret = -EINVAL;
593 goto error;
594 }
595
596 /* clear dma buffer */
597 buffer_c = buffer_acquire(cd->dma_buffer);
598 buffer_zero(buffer_c);
599 buff_addr = cd->dma_buffer->stream.addr;
600 buff_size = cd->dma_buffer->stream.size;
601 buffer_release(buffer_c);
602
603 ret = chain_init(dev, buff_addr, buff_size);
604 if (ret < 0) {
605 buffer_free(cd->dma_buffer);
606 cd->dma_buffer = NULL;
607 goto error;
608 }
609
610 cd->chain_task.state = SOF_TASK_STATE_INIT;
611
612 return 0;
613 error:
614 dma_put(cd->dma_host);
615 dma_put(cd->dma_link);
616 return ret;
617 }
618
chain_task_trigger(struct comp_dev * dev,int cmd)619 static int chain_task_trigger(struct comp_dev *dev, int cmd)
620 {
621 switch (cmd) {
622 case COMP_TRIGGER_START:
623 return chain_task_start(dev);
624 case COMP_TRIGGER_PAUSE:
625 return chain_task_pause(dev);
626 default:
627 return -EINVAL;
628 }
629 }
630
chain_task_create(const struct comp_driver * drv,const struct comp_ipc_config * ipc_config,const void * ipc_specific_config)631 static struct comp_dev *chain_task_create(const struct comp_driver *drv,
632 const struct comp_ipc_config *ipc_config,
633 const void *ipc_specific_config)
634 {
635 const struct ipc4_chain_dma *cdma = (struct ipc4_chain_dma *)ipc_specific_config;
636 const uint32_t host_dma_id = cdma->primary.r.host_dma_id;
637 const uint32_t link_dma_id = cdma->primary.r.link_dma_id;
638 const uint32_t fifo_size = cdma->extension.r.fifo_size;
639 const bool scs = cdma->primary.r.scs;
640 struct chain_dma_data *cd;
641 struct comp_dev *dev;
642 int ret;
643
644 if (host_dma_id >= max_chain_number)
645 return NULL;
646
647 dev = comp_alloc(drv, sizeof(*dev));
648 if (!dev)
649 return NULL;
650
651 cd = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*cd));
652 if (!cd)
653 goto error;
654
655 cd->first_data_received = false;
656 cd->cs = scs ? 2 : 4;
657 cd->chain_task.state = SOF_TASK_STATE_INIT;
658
659 comp_set_drvdata(dev, cd);
660
661 ret = chain_task_init(dev, host_dma_id, link_dma_id, fifo_size);
662 if (ret)
663 goto error_cd;
664
665 #if CONFIG_IPC4_XRUN_NOTIFICATIONS_ENABLE
666 cd->msg_xrun = ipc_msg_init(header.dat,
667 sizeof(struct ipc4_resource_event_data_notification));
668 if (!cd->msg_xrun)
669 goto error_cd;
670 cd->xrun_notification_sent = false;
671 #endif
672
673 return dev;
674
675 error_cd:
676 rfree(cd);
677 error:
678 rfree(dev);
679 return NULL;
680 }
681
chain_task_free(struct comp_dev * dev)682 static void chain_task_free(struct comp_dev *dev)
683 {
684 struct chain_dma_data *cd = comp_get_drvdata(dev);
685
686 chain_release(dev);
687 rfree(cd);
688 rfree(dev);
689 }
690
691 static const struct comp_driver comp_chain_dma = {
692 .uid = SOF_RT_UUID(chain_dma_uuid),
693 .tctx = &chain_dma_tr,
694 .ops = {
695 .create = chain_task_create,
696 .trigger = chain_task_trigger,
697 .free = chain_task_free,
698 },
699 };
700
701 static SHARED_DATA struct comp_driver_info comp_chain_dma_info = {
702 .drv = &comp_chain_dma,
703 };
704
sys_comp_chain_dma_init(void)705 UT_STATIC void sys_comp_chain_dma_init(void)
706 {
707 comp_register(platform_shared_get(&comp_chain_dma_info,
708 sizeof(comp_chain_dma_info)));
709 }
710
711 DECLARE_MODULE(sys_comp_chain_dma_init);
712 SOF_MODULE_INIT(chain_dma, sys_comp_chain_dma_init);
713