1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 // Keyon Jie <yang.jie@linux.intel.com>
7
8 #include <sof/audio/buffer.h>
9 #include <sof/audio/component_ext.h>
10 #include <sof/audio/format.h>
11 #include <sof/audio/pipeline.h>
12 #include <sof/common.h>
13 #include <rtos/panic.h>
14 #include <sof/ipc/msg.h>
15 #include <rtos/interrupt.h>
16 #include <rtos/timer.h>
17 #include <rtos/alloc.h>
18 #include <rtos/cache.h>
19 #include <rtos/init.h>
20 #include <sof/lib/dai.h>
21 #include <sof/lib/memory.h>
22 #include <sof/lib/notifier.h>
23 #include <sof/lib/uuid.h>
24 #include <sof/lib/dma.h>
25 #include <sof/list.h>
26 #include <rtos/spinlock.h>
27 #include <rtos/string.h>
28 #include <sof/ut.h>
29 #include <sof/trace/trace.h>
30 #include <ipc/dai.h>
31 #include <ipc/stream.h>
32 #include <ipc/topology.h>
33 #include <ipc4/copier.h>
34 #include <user/trace.h>
35 #include <errno.h>
36 #include <stddef.h>
37 #include <stdint.h>
38
39 #include <zephyr/device.h>
40 #include <zephyr/drivers/dai.h>
41
42 static const struct comp_driver comp_dai;
43
44 LOG_MODULE_REGISTER(dai_comp, CONFIG_SOF_LOG_LEVEL);
45
46 /* c2b00d27-ffbc-4150-a51a-245c79c5e54b */
47 DECLARE_SOF_RT_UUID("dai", dai_comp_uuid, 0xc2b00d27, 0xffbc, 0x4150,
48 0xa5, 0x1a, 0x24, 0x5c, 0x79, 0xc5, 0xe5, 0x4b);
49
50 DECLARE_TR_CTX(dai_comp_tr, SOF_UUID(dai_comp_uuid), LOG_LEVEL_INFO);
51
52 #if CONFIG_COMP_DAI_GROUP
53
54 static int dai_comp_trigger_internal(struct comp_dev *dev, int cmd);
55
dai_atomic_trigger(void * arg,enum notify_id type,void * data)56 static void dai_atomic_trigger(void *arg, enum notify_id type, void *data)
57 {
58 struct comp_dev *dev = arg;
59 struct dai_data *dd = comp_get_drvdata(dev);
60 struct dai_group *group = dd->group;
61
62 /* Atomic context set by the last DAI to receive trigger command */
63 group->trigger_ret = dai_comp_trigger_internal(dev, group->trigger_cmd);
64 }
65
66 /* Assign DAI to a group */
dai_assign_group(struct comp_dev * dev,uint32_t group_id)67 int dai_assign_group(struct comp_dev *dev, uint32_t group_id)
68 {
69 struct dai_data *dd = comp_get_drvdata(dev);
70
71 if (dd->group) {
72 if (dd->group->group_id != group_id) {
73 comp_err(dev, "dai_assign_group(), DAI already in group %d, requested %d",
74 dd->group->group_id, group_id);
75 return -EINVAL;
76 }
77
78 /* No need to re-assign to the same group, do nothing */
79 return 0;
80 }
81
82 dd->group = dai_group_get(group_id, DAI_CREAT);
83 if (!dd->group) {
84 comp_err(dev, "dai_assign_group(), failed to assign group %d",
85 group_id);
86 return -EINVAL;
87 }
88
89 comp_dbg(dev, "dai_assign_group(), group %d num %d",
90 group_id, dd->group->num_dais);
91
92 /* Register for the atomic trigger event */
93 notifier_register(dev, dd->group, NOTIFIER_ID_DAI_TRIGGER,
94 dai_atomic_trigger, 0);
95
96 return 0;
97 }
98 #endif
99
dai_trigger_op(struct dai * dai,int cmd,int direction)100 static int dai_trigger_op(struct dai *dai, int cmd, int direction)
101 {
102 const struct device *dev = dai->dev;
103 enum dai_trigger_cmd zephyr_cmd;
104
105 switch (cmd) {
106 case COMP_TRIGGER_STOP:
107 zephyr_cmd = DAI_TRIGGER_STOP;
108 break;
109 case COMP_TRIGGER_START:
110 case COMP_TRIGGER_RELEASE:
111 zephyr_cmd = DAI_TRIGGER_START;
112 break;
113 case COMP_TRIGGER_PAUSE:
114 zephyr_cmd = DAI_TRIGGER_PAUSE;
115 break;
116 case COMP_TRIGGER_PRE_START:
117 case COMP_TRIGGER_PRE_RELEASE:
118 zephyr_cmd = DAI_TRIGGER_PRE_START;
119 break;
120 default:
121 return -EINVAL;
122 }
123
124 return dai_trigger(dev, direction, zephyr_cmd);
125 }
126
127 /* called from src/ipc/ipc3/handler.c and src/ipc/ipc4/dai.c */
dai_set_config(struct dai * dai,struct ipc_config_dai * common_config,const void * spec_config)128 int dai_set_config(struct dai *dai, struct ipc_config_dai *common_config,
129 const void *spec_config)
130 {
131 const struct device *dev = dai->dev;
132 const struct sof_ipc_dai_config *sof_cfg = spec_config;
133 struct dai_config cfg;
134 const void *cfg_params;
135 bool is_blob;
136
137 cfg.dai_index = common_config->dai_index;
138 is_blob = common_config->is_config_blob;
139 cfg.format = sof_cfg->format;
140 cfg.options = sof_cfg->flags;
141 cfg.rate = common_config->sampling_frequency;
142
143 switch (common_config->type) {
144 case SOF_DAI_INTEL_SSP:
145 cfg.type = is_blob ? DAI_INTEL_SSP_NHLT : DAI_INTEL_SSP;
146 cfg_params = is_blob ? spec_config : &sof_cfg->ssp;
147 break;
148 case SOF_DAI_INTEL_ALH:
149 cfg.type = is_blob ? DAI_INTEL_ALH_NHLT : DAI_INTEL_ALH;
150 cfg_params = is_blob ? spec_config : &sof_cfg->alh;
151 break;
152 case SOF_DAI_INTEL_DMIC:
153 cfg.type = is_blob ? DAI_INTEL_DMIC_NHLT : DAI_INTEL_DMIC;
154 cfg_params = is_blob ? spec_config : &sof_cfg->dmic;
155 break;
156 case SOF_DAI_INTEL_HDA:
157 cfg.type = is_blob ? DAI_INTEL_HDA_NHLT : DAI_INTEL_HDA;
158 cfg_params = is_blob ? spec_config : &sof_cfg->hda;
159 break;
160 default:
161 return -EINVAL;
162 }
163
164 return dai_config_set(dev, &cfg, cfg_params);
165 }
166
167 /* called from ipc/ipc3/dai.c */
dai_get_handshake(struct dai * dai,int direction,int stream_id)168 int dai_get_handshake(struct dai *dai, int direction, int stream_id)
169 {
170 k_spinlock_key_t key = k_spin_lock(&dai->lock);
171 const struct dai_properties *props = dai_get_properties(dai->dev, direction,
172 stream_id);
173 int hs_id = props->dma_hs_id;
174
175 k_spin_unlock(&dai->lock, key);
176
177 return hs_id;
178 }
179
180 /* called from ipc/ipc3/dai.c and ipc/ipc4/dai.c */
dai_get_fifo_depth(struct dai * dai,int direction)181 int dai_get_fifo_depth(struct dai *dai, int direction)
182 {
183 const struct dai_properties *props;
184 k_spinlock_key_t key;
185 int fifo_depth;
186
187 if (!dai)
188 return 0;
189
190 key = k_spin_lock(&dai->lock);
191 props = dai_get_properties(dai->dev, direction, 0);
192 fifo_depth = props->fifo_depth;
193 k_spin_unlock(&dai->lock, key);
194
195 return fifo_depth;
196 }
197
dai_get_stream_id(struct dai * dai,int direction)198 int dai_get_stream_id(struct dai *dai, int direction)
199 {
200 k_spinlock_key_t key = k_spin_lock(&dai->lock);
201 const struct dai_properties *props = dai_get_properties(dai->dev, direction, 0);
202 int stream_id = props->stream_id;
203
204 k_spin_unlock(&dai->lock, key);
205
206 return stream_id;
207 }
208
dai_get_fifo(struct dai * dai,int direction,int stream_id)209 static int dai_get_fifo(struct dai *dai, int direction, int stream_id)
210 {
211 k_spinlock_key_t key = k_spin_lock(&dai->lock);
212 const struct dai_properties *props = dai_get_properties(dai->dev, direction,
213 stream_id);
214 int fifo_address = props->fifo_address;
215
216 k_spin_unlock(&dai->lock, key);
217
218 return fifo_address;
219 }
220
221 /* this is called by DMA driver every time descriptor has completed */
dai_dma_cb(struct comp_dev * dev,uint32_t bytes)222 static enum dma_cb_status dai_dma_cb(struct comp_dev *dev, uint32_t bytes)
223 {
224 struct dai_data *dd = comp_get_drvdata(dev);
225 struct comp_buffer __sparse_cache *local_buf, *dma_buf;
226 enum dma_cb_status dma_status = DMA_CB_STATUS_RELOAD;
227 void *buffer_ptr;
228 int ret;
229
230 comp_dbg(dev, "dai_dma_cb()");
231
232 /* stop dma copy for pause/stop/xrun */
233 if (dev->state != COMP_STATE_ACTIVE || dd->xrun) {
234 /* stop the DAI */
235 dai_trigger_op(dd->dai, COMP_TRIGGER_STOP, dev->direction);
236
237 /* tell DMA not to reload */
238 dma_status = DMA_CB_STATUS_END;
239 }
240
241 dma_buf = buffer_acquire(dd->dma_buffer);
242
243 /* is our pipeline handling an XRUN ? */
244 if (dd->xrun) {
245 /* make sure we only playback silence during an XRUN */
246 if (dev->direction == SOF_IPC_STREAM_PLAYBACK)
247 /* fill buffer with silence */
248 buffer_zero(dma_buf);
249 buffer_release(dma_buf);
250
251 return dma_status;
252 }
253
254 local_buf = buffer_acquire(dd->local_buffer);
255
256 if (dev->direction == SOF_IPC_STREAM_PLAYBACK) {
257 ret = dma_buffer_copy_to(local_buf, dma_buf,
258 dd->process, bytes);
259
260 buffer_ptr = local_buf->stream.r_ptr;
261 } else {
262 ret = dma_buffer_copy_from(dma_buf, local_buf,
263 dd->process, bytes);
264
265 buffer_ptr = local_buf->stream.w_ptr;
266 }
267
268 /* assert dma_buffer_copy succeed */
269 if (ret < 0) {
270 struct comp_buffer __sparse_cache *source_c, *sink_c;
271
272 source_c = dev->direction == SOF_IPC_STREAM_PLAYBACK ?
273 local_buf : dma_buf;
274 sink_c = dev->direction == SOF_IPC_STREAM_PLAYBACK ?
275 dma_buf : local_buf;
276 comp_err(dev, "dai_dma_cb() dma buffer copy failed, dir %d bytes %d avail %d free %d",
277 dev->direction, bytes,
278 audio_stream_get_avail_samples(&source_c->stream) *
279 audio_stream_frame_bytes(&source_c->stream),
280 audio_stream_get_free_samples(&sink_c->stream) *
281 audio_stream_frame_bytes(&sink_c->stream));
282 } else {
283 /* update host position (in bytes offset) for drivers */
284 dd->total_data_processed += bytes;
285 }
286
287 buffer_release(local_buf);
288 buffer_release(dma_buf);
289
290 return dma_status;
291 }
292
dai_new(const struct comp_driver * drv,const struct comp_ipc_config * config,const void * spec)293 static struct comp_dev *dai_new(const struct comp_driver *drv,
294 const struct comp_ipc_config *config,
295 const void *spec)
296 {
297 struct comp_dev *dev;
298 const struct ipc_config_dai *dai_cfg = spec;
299 struct dai_data *dd;
300 uint32_t dir;
301
302 comp_cl_dbg(&comp_dai, "dai_new()");
303
304 dev = comp_alloc(drv, sizeof(*dev));
305 if (!dev)
306 return NULL;
307 dev->ipc_config = *config;
308
309 dd = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*dd));
310 if (!dd) {
311 rfree(dev);
312 return NULL;
313 }
314
315 comp_set_drvdata(dev, dd);
316
317 dd->dai = dai_get(dai_cfg->type, dai_cfg->dai_index, DAI_CREAT);
318 if (!dd->dai) {
319 comp_cl_err(&comp_dai, "dai_new(): dai_get() failed to create DAI.");
320 goto error;
321 }
322
323 dd->ipc_config = *dai_cfg;
324
325 /* request GP LP DMA with shared access privilege */
326 dir = dai_cfg->direction == SOF_IPC_STREAM_PLAYBACK ?
327 DMA_DIR_MEM_TO_DEV : DMA_DIR_DEV_TO_MEM;
328
329 dd->dma = dma_get(dir, dd->dai->dma_caps, dd->dai->dma_dev, DMA_ACCESS_SHARED);
330 if (!dd->dma) {
331 comp_cl_err(&comp_dai, "dai_new(): dma_get() failed to get shared access to DMA.");
332 goto error;
333 }
334
335 k_spinlock_init(&dd->dai->lock);
336
337 dma_sg_init(&dd->config.elem_array);
338 dd->xrun = 0;
339 dd->chan = NULL;
340
341 dev->state = COMP_STATE_READY;
342 return dev;
343
344 error:
345 rfree(dd);
346 rfree(dev);
347 return NULL;
348 }
349
dai_free(struct comp_dev * dev)350 static void dai_free(struct comp_dev *dev)
351 {
352 struct dai_data *dd = comp_get_drvdata(dev);
353
354 if (dd->group) {
355 notifier_unregister(dev, dd->group, NOTIFIER_ID_DAI_TRIGGER);
356 dai_group_put(dd->group);
357 }
358
359 if (dd->chan) {
360 dma_release_channel(dd->dma->z_dev, dd->chan->index);
361 dd->chan->dev_data = NULL;
362 }
363
364 dma_put(dd->dma);
365
366 dai_release_llp_slot(dev);
367
368 dai_put(dd->dai);
369
370 rfree(dd->dai_spec_config);
371 rfree(dd);
372 rfree(dev);
373 }
374
dai_comp_get_hw_params(struct comp_dev * dev,struct sof_ipc_stream_params * params,int dir)375 static int dai_comp_get_hw_params(struct comp_dev *dev,
376 struct sof_ipc_stream_params *params,
377 int dir)
378 {
379 struct dai_data *dd = comp_get_drvdata(dev);
380 struct dai_config cfg;
381 int ret;
382
383 comp_dbg(dev, "dai_hw_params()");
384
385 ret = dai_config_get(dd->dai->dev, &cfg, dir);
386 if (ret)
387 return ret;
388
389 params->rate = cfg.rate;
390 params->buffer_fmt = 0;
391 params->channels = cfg.channels;
392
393 /* dai_comp_get_hw_params() function fetches hardware dai parameters,
394 * which then are propagating back through the pipeline, so that any
395 * component can convert specific stream parameter. Here, we overwrite
396 * frame_fmt hardware parameter as DAI component is able to convert
397 * stream with different frame_fmt's (using pcm converter)
398 */
399 params->frame_fmt = dev->ipc_config.frame_fmt;
400
401 return ret;
402 }
403
dai_verify_params(struct comp_dev * dev,struct sof_ipc_stream_params * params)404 static int dai_verify_params(struct comp_dev *dev, struct sof_ipc_stream_params *params)
405 {
406 struct sof_ipc_stream_params hw_params;
407 int ret;
408
409 ret = dai_comp_get_hw_params(dev, &hw_params, params->direction);
410 if (ret < 0) {
411 comp_err(dev, "dai_verify_params(): dai_verify_params failed ret %d", ret);
412 return ret;
413 }
414
415 /* checks whether pcm parameters match hardware DAI parameter set
416 * during dai_set_config(). If hardware parameter is equal to 0, it
417 * means that it can vary, so any value is acceptable. We do not check
418 * format parameter, because DAI is able to change format using
419 * pcm_converter functions.
420 */
421 if (hw_params.rate && hw_params.rate != params->rate) {
422 comp_err(dev, "dai_verify_params(): pcm rate parameter %d does not match hardware rate %d",
423 params->rate, hw_params.rate);
424 return -EINVAL;
425 }
426
427 if (hw_params.channels && hw_params.channels != params->channels) {
428 comp_err(dev, "dai_verify_params(): pcm channels parameter %d does not match hardware channels %d",
429 params->channels, hw_params.channels);
430 return -EINVAL;
431 }
432
433 /* set component period frames */
434 component_set_nearest_period_frames(dev, params->rate);
435
436 return 0;
437 }
438
439 /* set component audio SSP and DMA configuration */
dai_playback_params(struct comp_dev * dev,uint32_t period_bytes,uint32_t period_count)440 static int dai_playback_params(struct comp_dev *dev, uint32_t period_bytes,
441 uint32_t period_count)
442 {
443 struct dai_data *dd = comp_get_drvdata(dev);
444 struct dma_sg_config *config = &dd->config;
445 struct dma_config *dma_cfg;
446 struct dma_block_config *dma_block_cfg;
447 struct dma_block_config *prev = NULL;
448 struct comp_buffer __sparse_cache *dma_buf = buffer_acquire(dd->dma_buffer),
449 *local_buf = buffer_acquire(dd->local_buffer);
450 uint32_t local_fmt = local_buf->stream.frame_fmt;
451 uint32_t dma_fmt = dma_buf->stream.frame_fmt;
452 uint32_t fifo, max_block_count, buf_size;
453 int i, err = 0;
454
455 buffer_release(local_buf);
456
457 /* set processing function */
458 dd->process = pcm_get_conversion_function(local_fmt, dma_fmt);
459
460 if (!dd->process) {
461 comp_err(dev, "dai_playback_params(): converter function NULL: local fmt %d dma fmt %d\n",
462 local_fmt, dma_fmt);
463 err = -EINVAL;
464 goto out;
465 }
466
467 /* set up DMA configuration */
468 config->direction = DMA_DIR_MEM_TO_DEV;
469 config->src_width = get_sample_bytes(dma_fmt);
470 config->dest_width = config->src_width;
471 config->cyclic = 1;
472 config->irq_disabled = pipeline_is_timer_driven(dev->pipeline);
473 config->dest_dev = dai_get_handshake(dd->dai, dev->direction, dd->stream_id);
474 config->is_scheduling_source = comp_is_scheduling_source(dev);
475 config->period = dev->pipeline->period;
476
477 comp_info(dev, "dai_playback_params() dest_dev = %d stream_id = %d src_width = %d dest_width = %d",
478 config->dest_dev, dd->stream_id,
479 config->src_width, config->dest_width);
480
481 if (!config->elem_array.elems) {
482 fifo = dai_get_fifo(dd->dai, dev->direction,
483 dd->stream_id);
484
485 comp_dbg(dev, "dai_playback_params() fifo 0x%x", fifo);
486
487 err = dma_get_attribute(dd->dma->z_dev, DMA_ATTR_MAX_BLOCK_COUNT,
488 &max_block_count);
489 if (err < 0) {
490 comp_err(dev, "dai_playback_params(): could not get dma attr max block count, err = %d", err);
491 goto out;
492 }
493
494 if (max_block_count < period_count) {
495 comp_dbg(dev, "dai_playback_params(): block count = %d not supported by DMA", period_count);
496 buf_size = period_count * period_bytes;
497 do {
498 if (IS_ALIGNED(buf_size, max_block_count)) {
499 period_count = max_block_count;
500 period_bytes = buf_size / period_count;
501 break;
502 } else {
503 comp_warn(dev, "dai_playback_params(): alignment error for buffer = %d, block count = %d", buf_size, max_block_count);
504 }
505 } while (--max_block_count > 0);
506 }
507
508 err = dma_sg_alloc(&config->elem_array, SOF_MEM_ZONE_RUNTIME,
509 config->direction,
510 period_count,
511 period_bytes,
512 (uintptr_t)(dma_buf->stream.addr),
513 fifo);
514 if (err < 0) {
515 comp_err(dev, "dai_playback_params(): dma_sg_alloc() for period_count %d period_bytes %d failed with err = %d",
516 period_count, period_bytes, err);
517 goto out;
518 }
519 }
520
521 dma_cfg = rballoc(SOF_MEM_FLAG_COHERENT,
522 SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA,
523 sizeof(struct dma_config));
524 if (!dma_cfg) {
525 comp_err(dev, "dai_playback_params(): dma_cfg allocation failed");
526 err = -ENOMEM;
527 goto free;
528 }
529
530 dma_cfg->channel_direction = MEMORY_TO_PERIPHERAL;
531 dma_cfg->source_data_size = config->src_width;
532 dma_cfg->dest_data_size = config->dest_width;
533
534 if (config->burst_elems)
535 dma_cfg->source_burst_length = config->burst_elems;
536 else
537 dma_cfg->source_burst_length = 8;
538
539 dma_cfg->dest_burst_length = dma_cfg->source_burst_length;
540 dma_cfg->cyclic = config->cyclic;
541 dma_cfg->user_data = NULL;
542 dma_cfg->dma_callback = NULL;
543 dma_cfg->block_count = config->elem_array.count;
544 dma_cfg->dma_slot = config->dest_dev;
545
546 dma_block_cfg = rballoc(SOF_MEM_FLAG_COHERENT,
547 SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA,
548 sizeof(struct dma_block_config) * dma_cfg->block_count);
549 if (!dma_block_cfg) {
550 rfree(dma_cfg);
551 comp_err(dev, "dai_playback_params(): dma_block_config allocation failed");
552 err = -ENOMEM;
553 goto free;
554 }
555
556 dma_cfg->head_block = dma_block_cfg;
557 for (i = 0; i < dma_cfg->block_count; i++) {
558 dma_block_cfg->dest_scatter_en = config->scatter;
559 dma_block_cfg->block_size = config->elem_array.elems[i].size;
560 dma_block_cfg->source_address = config->elem_array.elems[i].src;
561 dma_block_cfg->dest_address = config->elem_array.elems[i].dest;
562 prev = dma_block_cfg;
563 prev->next_block = ++dma_block_cfg;
564 }
565 if (prev)
566 prev->next_block = dma_cfg->head_block;
567 dd->z_config = dma_cfg;
568
569 free:
570 if (err < 0)
571 dma_sg_free(&config->elem_array);
572 out:
573 buffer_release(dma_buf);
574
575 return err;
576 }
577
dai_capture_params(struct comp_dev * dev,uint32_t period_bytes,uint32_t period_count)578 static int dai_capture_params(struct comp_dev *dev, uint32_t period_bytes,
579 uint32_t period_count)
580 {
581 struct dai_data *dd = comp_get_drvdata(dev);
582 struct dma_sg_config *config = &dd->config;
583 struct dma_config *dma_cfg;
584 struct dma_block_config *dma_block_cfg;
585 struct dma_block_config *prev = NULL;
586 struct comp_buffer __sparse_cache *dma_buf = buffer_acquire(dd->dma_buffer),
587 *local_buf = buffer_acquire(dd->local_buffer);
588 uint32_t local_fmt = local_buf->stream.frame_fmt;
589 uint32_t dma_fmt = dma_buf->stream.frame_fmt;
590 uint32_t fifo, max_block_count, buf_size;
591 int i, err = 0;
592
593 buffer_release(local_buf);
594
595 /* set processing function */
596 dd->process = pcm_get_conversion_function(dma_fmt, local_fmt);
597
598 if (!dd->process) {
599 comp_err(dev, "dai_capture_params(): converter function NULL: local fmt %d dma fmt %d\n",
600 local_fmt, dma_fmt);
601 err = -EINVAL;
602 goto out;
603 }
604
605 /* set up DMA configuration */
606 config->direction = DMA_DIR_DEV_TO_MEM;
607 config->cyclic = 1;
608 config->irq_disabled = pipeline_is_timer_driven(dev->pipeline);
609 config->src_dev = dai_get_handshake(dd->dai, dev->direction,
610 dd->stream_id);
611 config->is_scheduling_source = comp_is_scheduling_source(dev);
612 config->period = dev->pipeline->period;
613
614 /* TODO: Make this code platform-specific or move it driver callback */
615 if (dd->dai->type == SOF_DAI_INTEL_DMIC) {
616 /* For DMIC the DMA src and dest widths should always be 4 bytes
617 * due to 32 bit FIFO packer. Setting width to 2 bytes for
618 * 16 bit format would result in recording at double rate.
619 */
620 config->src_width = 4;
621 config->dest_width = 4;
622 } else {
623 config->src_width = get_sample_bytes(dma_fmt);
624 config->dest_width = config->src_width;
625 }
626
627 comp_info(dev, "dai_capture_params() src_dev = %d stream_id = %d src_width = %d dest_width = %d",
628 config->src_dev, dd->stream_id,
629 config->src_width, config->dest_width);
630
631 if (!config->elem_array.elems) {
632 fifo = dai_get_fifo(dd->dai, dev->direction,
633 dd->stream_id);
634
635 comp_dbg(dev, "dai_capture_params() fifo 0x%x", fifo);
636
637 err = dma_get_attribute(dd->dma->z_dev, DMA_ATTR_MAX_BLOCK_COUNT,
638 &max_block_count);
639 if (err < 0) {
640 comp_err(dev, "dai_capture_params(): could not get dma attr max block count, err = %d", err);
641 goto out;
642 }
643
644 if (!max_block_count) {
645 comp_err(dev, "dai_capture_params(): invalid max-block-count of zero");
646 goto out;
647 }
648
649 if (max_block_count < period_count) {
650 comp_dbg(dev, "dai_capture_params(): block count = %d not supported by DMA", period_count);
651 buf_size = period_count * period_bytes;
652 do {
653 if (IS_ALIGNED(buf_size, max_block_count)) {
654 period_count = max_block_count;
655 period_bytes = buf_size / period_count;
656 break;
657 } else {
658 comp_warn(dev, "dai_capture_params(): alignment error for buffer = %d, block count = %d", buf_size, max_block_count);
659 }
660 } while (--max_block_count > 0);
661 }
662
663 err = dma_sg_alloc(&config->elem_array, SOF_MEM_ZONE_RUNTIME,
664 config->direction,
665 period_count,
666 period_bytes,
667 (uintptr_t)(dma_buf->stream.addr),
668 fifo);
669 if (err < 0) {
670 comp_err(dev, "dai_capture_params(): dma_sg_alloc() for period_count %d period_bytes %d failed with err = %d",
671 period_count, period_bytes, err);
672 goto out;
673 }
674 }
675
676 dma_cfg = rballoc(SOF_MEM_FLAG_COHERENT,
677 SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA,
678 sizeof(struct dma_config));
679 if (!dma_cfg) {
680 comp_err(dev, "dai_playback_params(): dma_cfg allocation failed");
681 err = -ENOMEM;
682 goto free;
683 }
684
685 dma_cfg->channel_direction = PERIPHERAL_TO_MEMORY;
686 dma_cfg->source_data_size = config->src_width;
687 dma_cfg->dest_data_size = config->dest_width;
688
689 if (config->burst_elems)
690 dma_cfg->source_burst_length = config->burst_elems;
691 else
692 dma_cfg->source_burst_length = 8;
693
694 dma_cfg->dest_burst_length = dma_cfg->source_burst_length;
695 dma_cfg->cyclic = config->cyclic;
696 dma_cfg->user_data = NULL;
697 dma_cfg->dma_callback = NULL;
698 dma_cfg->block_count = config->elem_array.count;
699 dma_cfg->dma_slot = config->src_dev;
700
701 dma_block_cfg = rballoc(SOF_MEM_FLAG_COHERENT,
702 SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA,
703 sizeof(struct dma_block_config) * dma_cfg->block_count);
704 if (!dma_block_cfg) {
705 rfree(dma_cfg);
706 comp_err(dev, "dai_playback_params(): dma_block_config allocation failed");
707 err = -ENOMEM;
708 goto free;
709 }
710
711 dma_cfg->head_block = dma_block_cfg;
712 for (i = 0; i < dma_cfg->block_count; i++) {
713 dma_block_cfg->dest_scatter_en = config->scatter;
714 dma_block_cfg->block_size = config->elem_array.elems[i].size;
715 dma_block_cfg->source_address = config->elem_array.elems[i].src;
716 dma_block_cfg->dest_address = config->elem_array.elems[i].dest;
717 prev = dma_block_cfg;
718 prev->next_block = ++dma_block_cfg;
719 }
720 if (prev)
721 prev->next_block = dma_cfg->head_block;
722 dd->z_config = dma_cfg;
723
724 free:
725 if (err < 0)
726 dma_sg_free(&config->elem_array);
727 out:
728 buffer_release(dma_buf);
729
730 return err;
731 }
732
dai_params(struct comp_dev * dev,struct sof_ipc_stream_params * params)733 static int dai_params(struct comp_dev *dev, struct sof_ipc_stream_params *params)
734 {
735 struct sof_ipc_stream_params hw_params = *params;
736 struct dai_data *dd = comp_get_drvdata(dev);
737 struct comp_buffer __sparse_cache *buffer_c;
738 uint32_t frame_size;
739 uint32_t period_count;
740 uint32_t period_bytes;
741 uint32_t buffer_size;
742 uint32_t addr_align;
743 uint32_t align;
744 int err;
745
746 comp_dbg(dev, "dai_params()");
747
748 /* configure dai_data first */
749 err = ipc_dai_data_config(dev);
750 if (err < 0)
751 return err;
752
753 err = dai_verify_params(dev, params);
754 if (err < 0) {
755 comp_err(dev, "dai_params(): pcm params verification failed.");
756 return -EINVAL;
757 }
758
759 if (dev->direction == SOF_IPC_STREAM_PLAYBACK)
760 dd->local_buffer = list_first_item(&dev->bsource_list,
761 struct comp_buffer,
762 sink_list);
763 else
764 dd->local_buffer = list_first_item(&dev->bsink_list,
765 struct comp_buffer,
766 source_list);
767
768 /* check if already configured */
769 if (dev->state == COMP_STATE_PREPARE) {
770 comp_info(dev, "dai_params() component has been already configured.");
771 return 0;
772 }
773
774 /* can set params on only init state */
775 if (dev->state != COMP_STATE_READY) {
776 comp_err(dev, "dai_params(): Component is in state %d, expected COMP_STATE_READY.",
777 dev->state);
778 return -EINVAL;
779 }
780
781 err = dma_get_attribute(dd->dma->z_dev, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT,
782 &addr_align);
783 if (err < 0) {
784 comp_err(dev, "dai_params(): could not get dma buffer address alignment, err = %d",
785 err);
786 return err;
787 }
788
789 err = dma_get_attribute(dd->dma->z_dev, DMA_ATTR_BUFFER_SIZE_ALIGNMENT, &align);
790 if (err < 0 || !align) {
791 comp_err(dev, "dai_params(): no valid dma buffer alignment, err = %d, align = %u",
792 err, align);
793 return -EINVAL;
794 }
795
796 period_count = dd->dma->plat_data.period_count;
797 if (!period_count) {
798 comp_err(dev, "dai_params(): no valid dma buffer period count");
799 return -EINVAL;
800 }
801
802 buffer_c = buffer_acquire(dd->local_buffer);
803
804 /* calculate frame size */
805 frame_size = get_frame_bytes(dev->ipc_config.frame_fmt,
806 buffer_c->stream.channels);
807
808 buffer_release(buffer_c);
809
810 /* calculate period size */
811 period_bytes = dev->frames * frame_size;
812 if (!period_bytes) {
813 comp_err(dev, "dai_params(): invalid period_bytes.");
814 return -EINVAL;
815 }
816
817 dd->period_bytes = period_bytes;
818
819 /* calculate DMA buffer size */
820 period_count = MAX(period_count,
821 SOF_DIV_ROUND_UP(dd->ipc_config.dma_buffer_size, period_bytes));
822 buffer_size = ALIGN_UP(period_count * period_bytes, align);
823
824 /* alloc DMA buffer or change its size if exists */
825 if (dd->dma_buffer) {
826 buffer_c = buffer_acquire(dd->dma_buffer);
827 err = buffer_set_size(buffer_c, buffer_size);
828 buffer_release(buffer_c);
829
830 if (err < 0) {
831 comp_err(dev, "dai_params(): buffer_set_size() failed, buffer_size = %u",
832 buffer_size);
833 return err;
834 }
835 } else {
836 dd->dma_buffer = buffer_alloc(buffer_size, SOF_MEM_CAPS_DMA,
837 addr_align);
838 if (!dd->dma_buffer) {
839 comp_err(dev, "dai_params(): failed to alloc dma buffer");
840 return -ENOMEM;
841 }
842
843 /*
844 * dma_buffer should reffer to hardware dai parameters.
845 * Here, we overwrite frame_fmt hardware parameter as DAI
846 * component is able to convert stream with different
847 * frame_fmt's (using pcm converter).
848 */
849 hw_params.frame_fmt = dev->ipc_config.frame_fmt;
850 buffer_c = buffer_acquire(dd->dma_buffer);
851 buffer_set_params(buffer_c, &hw_params,
852 BUFFER_UPDATE_FORCE);
853 buffer_release(buffer_c);
854 }
855
856 return dev->direction == SOF_IPC_STREAM_PLAYBACK ?
857 dai_playback_params(dev, period_bytes, period_count) :
858 dai_capture_params(dev, period_bytes, period_count);
859 }
860
dai_config_prepare(struct comp_dev * dev)861 static int dai_config_prepare(struct comp_dev *dev)
862 {
863 struct dai_data *dd = comp_get_drvdata(dev);
864 int channel;
865
866 /* cannot configure DAI while active */
867 if (dev->state == COMP_STATE_ACTIVE) {
868 comp_info(dev, "dai_config_prepare(): Component is in active state.");
869 return 0;
870 }
871
872 if (!dd->dai_spec_config) {
873 comp_err(dev, "dai specific config is not set yet!");
874 return -EINVAL;
875 }
876
877 if (dd->chan) {
878 comp_info(dev, "dai_config_prepare(): dma channel index %d already configured",
879 dd->chan->index);
880 return 0;
881 }
882
883 channel = dai_config_dma_channel(dev, dd->dai_spec_config);
884 comp_dbg(dev, "dai_config_prepare(), channel = %d", channel);
885
886 /* do nothing for asking for channel free, for compatibility. */
887 if (channel == DMA_CHAN_INVALID) {
888 comp_err(dev, "dai_config is not set yet!");
889 return -EINVAL;
890 }
891
892 /* get DMA channel */
893 channel = dma_request_channel(dd->dma->z_dev, &channel);
894 if (channel < 0) {
895 comp_err(dev, "dai_config_prepare(): dma_request_channel() failed");
896 dd->chan = NULL;
897 return -EIO;
898 }
899
900 dd->chan = &dd->dma->chan[channel];
901 dd->chan->dev_data = dd;
902
903 comp_dbg(dev, "dai_config_prepare(): new configured dma channel index %d",
904 dd->chan->index);
905
906 return 0;
907 }
908
dai_prepare(struct comp_dev * dev)909 static int dai_prepare(struct comp_dev *dev)
910 {
911 struct dai_data *dd = comp_get_drvdata(dev);
912 struct comp_buffer __sparse_cache *buffer_c;
913 int ret;
914
915 comp_dbg(dev, "dai_prepare()");
916
917 ret = dai_config_prepare(dev);
918 if (ret < 0)
919 return ret;
920
921 ret = comp_set_state(dev, COMP_TRIGGER_PREPARE);
922 if (ret < 0)
923 return ret;
924
925 if (ret == COMP_STATUS_STATE_ALREADY_SET)
926 return PPL_STATUS_PATH_STOP;
927
928 dd->total_data_processed = 0;
929
930 if (!dd->chan) {
931 comp_err(dev, "dai_prepare(): Missing dd->chan.");
932 comp_set_state(dev, COMP_TRIGGER_RESET);
933 return -EINVAL;
934 }
935
936 if (!dd->config.elem_array.elems) {
937 comp_err(dev, "dai_prepare(): Missing dd->config.elem_array.elems.");
938 comp_set_state(dev, COMP_TRIGGER_RESET);
939 return -EINVAL;
940 }
941
942 /* clear dma buffer to avoid pop noise */
943 buffer_c = buffer_acquire(dd->dma_buffer);
944 buffer_zero(buffer_c);
945 buffer_release(buffer_c);
946
947 /* dma reconfig not required if XRUN handling */
948 if (dd->xrun) {
949 /* after prepare, we have recovered from xrun */
950 dd->xrun = 0;
951 return ret;
952 }
953
954 ret = dma_config(dd->chan->dma->z_dev, dd->chan->index, dd->z_config);
955 if (ret < 0)
956 comp_set_state(dev, COMP_TRIGGER_RESET);
957
958 return ret;
959 }
960
dai_reset(struct comp_dev * dev)961 static int dai_reset(struct comp_dev *dev)
962 {
963 struct dai_data *dd = comp_get_drvdata(dev);
964 struct dma_sg_config *config = &dd->config;
965
966 comp_dbg(dev, "dai_reset()");
967
968 /*
969 * DMA channel release should be skipped now for DAI's that support the two-step stop
970 * option. It will be done when the host sends the DAI_CONFIG IPC during hw_free.
971 */
972 if (!dd->delayed_dma_stop)
973 dai_dma_release(dev);
974
975 dma_sg_free(&config->elem_array);
976 if (dd->z_config) {
977 rfree(dd->z_config->head_block);
978 rfree(dd->z_config);
979 dd->z_config = NULL;
980 }
981
982 if (dd->dma_buffer) {
983 buffer_free(dd->dma_buffer);
984 dd->dma_buffer = NULL;
985 }
986
987 dd->wallclock = 0;
988 dd->total_data_processed = 0;
989 dd->xrun = 0;
990 comp_set_state(dev, COMP_TRIGGER_RESET);
991
992 return 0;
993 }
994
dai_update_start_position(struct comp_dev * dev)995 static void dai_update_start_position(struct comp_dev *dev)
996 {
997 struct dai_data *dd = comp_get_drvdata(dev);
998
999 /* update starting wallclock */
1000 platform_dai_wallclock(dev, &dd->wallclock);
1001 }
1002
1003 /* used to pass standard and bespoke command (with data) to component */
dai_comp_trigger_internal(struct comp_dev * dev,int cmd)1004 static int dai_comp_trigger_internal(struct comp_dev *dev, int cmd)
1005 {
1006 struct dai_data *dd = comp_get_drvdata(dev);
1007 int prev_state = dev->state;
1008 int ret;
1009
1010 comp_dbg(dev, "dai_comp_trigger_internal(), command = %u", cmd);
1011
1012 ret = comp_set_state(dev, cmd);
1013 if (ret < 0)
1014 return ret;
1015
1016 if (ret == COMP_STATUS_STATE_ALREADY_SET)
1017 return PPL_STATUS_PATH_STOP;
1018
1019 switch (cmd) {
1020 case COMP_TRIGGER_START:
1021 comp_dbg(dev, "dai_comp_trigger_internal(), START");
1022
1023 /* only start the DAI if we are not XRUN handling */
1024 if (dd->xrun == 0) {
1025 ret = dma_start(dd->chan->dma->z_dev, dd->chan->index);
1026 if (ret < 0)
1027 return ret;
1028
1029 /* start the DAI */
1030 dai_trigger_op(dd->dai, cmd, dev->direction);
1031 } else {
1032 dd->xrun = 0;
1033 }
1034
1035 dai_update_start_position(dev);
1036 break;
1037 case COMP_TRIGGER_RELEASE:
1038 /* before release, we clear the buffer data to 0s,
1039 * then there is no history data sent out after release.
1040 * this is only supported at capture mode.
1041 */
1042 if (dev->direction == SOF_IPC_STREAM_CAPTURE) {
1043 struct comp_buffer __sparse_cache *buffer_c =
1044 buffer_acquire(dd->dma_buffer);
1045
1046 buffer_zero(buffer_c);
1047 buffer_release(buffer_c);
1048 }
1049
1050 /* only start the DAI if we are not XRUN handling */
1051 if (dd->xrun == 0) {
1052 /* recover valid start position */
1053 if (dev->state == COMP_STATE_ACTIVE) {
1054 ret = dma_stop(dd->chan->dma->z_dev, dd->chan->index);
1055 if (ret < 0)
1056 return ret;
1057 }
1058
1059 /* dma_config needed after stop */
1060 ret = dma_config(dd->chan->dma->z_dev, dd->chan->index, dd->z_config);
1061 if (ret < 0)
1062 return ret;
1063
1064 ret = dma_start(dd->chan->dma->z_dev, dd->chan->index);
1065 if (ret < 0)
1066 return ret;
1067
1068 /* start the DAI */
1069 dai_trigger_op(dd->dai, cmd, dev->direction);
1070 } else {
1071 dd->xrun = 0;
1072 }
1073
1074 dai_update_start_position(dev);
1075 break;
1076 case COMP_TRIGGER_XRUN:
1077 comp_info(dev, "dai_comp_trigger_internal(), XRUN");
1078 dd->xrun = 1;
1079
1080 COMPILER_FALLTHROUGH;
1081 case COMP_TRIGGER_STOP:
1082 comp_dbg(dev, "dai_comp_trigger_internal(), STOP");
1083 /*
1084 * Some platforms cannot just simple disable
1085 * DMA channel during the transfer,
1086 * because it will hang the whole DMA controller.
1087 * Therefore, stop the DMA first and let the DAI
1088 * drain the FIFO in order to stop the channel
1089 * as soon as possible.
1090 */
1091 #if CONFIG_COMP_DAI_TRIGGER_ORDER_REVERSE
1092 ret = dma_stop(dd->chan->dma->z_dev, dd->chan->index);
1093 dai_trigger_op(dd->dai, cmd, dev->direction);
1094 #else
1095 dai_trigger_op(dd->dai, cmd, dev->direction);
1096 ret = dma_stop(dd->chan->dma->z_dev, dd->chan->index);
1097 if (ret) {
1098 comp_warn(dev, "dma was stopped earlier");
1099 ret = 0;
1100 }
1101 #endif
1102 break;
1103 case COMP_TRIGGER_PAUSE:
1104 comp_dbg(dev, "dai_comp_trigger_internal(), PAUSE");
1105 #if CONFIG_COMP_DAI_TRIGGER_ORDER_REVERSE
1106 if (prev_state == COMP_STATE_ACTIVE) {
1107 ret = dma_suspend(dd->chan->dma->z_dev, dd->chan->index);
1108 } else {
1109 comp_warn(dev, "dma was stopped earlier");
1110 ret = 0;
1111 }
1112 dai_trigger_op(dd->dai, cmd, dev->direction);
1113 #else
1114 dai_trigger_op(dd->dai, cmd, dev->direction);
1115 if (prev_state == COMP_STATE_ACTIVE) {
1116 ret = dma_suspend(dd->chan->dma->z_dev, dd->chan->index);
1117 } else {
1118 comp_warn(dev, "dma was stopped earlier");
1119 ret = 0;
1120 }
1121 #endif
1122 break;
1123 case COMP_TRIGGER_PRE_START:
1124 case COMP_TRIGGER_PRE_RELEASE:
1125 /* only start the DAI if we are not XRUN handling */
1126 if (dd->xrun)
1127 dd->xrun = 0;
1128 else
1129 dai_trigger_op(dd->dai, cmd, dev->direction);
1130 break;
1131 }
1132
1133 return ret;
1134 }
1135
dai_comp_trigger(struct comp_dev * dev,int cmd)1136 static int dai_comp_trigger(struct comp_dev *dev, int cmd)
1137 {
1138 struct dai_data *dd = comp_get_drvdata(dev);
1139 struct dai_group *group = dd->group;
1140 uint32_t irq_flags;
1141 int ret = 0;
1142
1143 /* DAI not in a group, use normal trigger */
1144 if (!group) {
1145 comp_dbg(dev, "dai_comp_trigger(), non-atomic trigger");
1146 return dai_comp_trigger_internal(dev, cmd);
1147 }
1148
1149 /* DAI is grouped, so only trigger when the entire group is ready */
1150
1151 if (!group->trigger_counter) {
1152 /* First DAI to receive the trigger command,
1153 * prepare for atomic trigger
1154 */
1155 comp_dbg(dev, "dai_comp_trigger(), begin atomic trigger for group %d",
1156 group->group_id);
1157 group->trigger_cmd = cmd;
1158 group->trigger_counter = group->num_dais - 1;
1159 } else if (group->trigger_cmd != cmd) {
1160 /* Already processing a different trigger command */
1161 comp_err(dev, "dai_comp_trigger(), already processing atomic trigger");
1162 ret = -EAGAIN;
1163 } else {
1164 /* Count down the number of remaining DAIs required
1165 * to receive the trigger command before atomic trigger
1166 * takes place
1167 */
1168 group->trigger_counter--;
1169 comp_dbg(dev, "dai_comp_trigger(), trigger counter %d, group %d",
1170 group->trigger_counter, group->group_id);
1171
1172 if (!group->trigger_counter) {
1173 /* The counter has reached 0, which means
1174 * all DAIs have received the same trigger command
1175 * and we may begin the actual trigger process
1176 * synchronously.
1177 */
1178
1179 irq_local_disable(irq_flags);
1180 notifier_event(group, NOTIFIER_ID_DAI_TRIGGER,
1181 BIT(cpu_get_id()), NULL, 0);
1182 irq_local_enable(irq_flags);
1183
1184 /* return error of last trigger */
1185 ret = group->trigger_ret;
1186 }
1187 }
1188
1189 return ret;
1190 }
1191
1192 /* report xrun occurrence */
dai_report_xrun(struct comp_dev * dev,uint32_t bytes)1193 static void dai_report_xrun(struct comp_dev *dev, uint32_t bytes)
1194 {
1195 struct dai_data *dd = comp_get_drvdata(dev);
1196 struct comp_buffer __sparse_cache *buf_c = buffer_acquire(dd->local_buffer);
1197
1198 if (dev->direction == SOF_IPC_STREAM_PLAYBACK) {
1199 comp_err(dev, "dai_report_xrun(): underrun due to no data available");
1200 comp_underrun(dev, buf_c, bytes);
1201 } else {
1202 comp_err(dev, "dai_report_xrun(): overrun due to no space available");
1203 comp_overrun(dev, buf_c, bytes);
1204 }
1205
1206 buffer_release(buf_c);
1207 }
1208
1209 /* copy and process stream data from source to sink buffers */
dai_copy(struct comp_dev * dev)1210 static int dai_copy(struct comp_dev *dev)
1211 {
1212 struct dai_data *dd = comp_get_drvdata(dev);
1213 uint32_t dma_fmt;
1214 uint32_t sampling;
1215 struct comp_buffer __sparse_cache *buf_c;
1216 struct dma_status stat;
1217 uint32_t avail_bytes = 0;
1218 uint32_t free_bytes = 0;
1219 uint32_t copy_bytes = 0;
1220 uint32_t src_samples;
1221 uint32_t sink_samples;
1222 uint32_t samples;
1223 int ret;
1224
1225 comp_dbg(dev, "dai_copy()");
1226
1227 /* get data sizes from DMA */
1228 ret = dma_get_status(dd->chan->dma->z_dev, dd->chan->index, &stat);
1229 switch (ret) {
1230 case 0:
1231 break;
1232 case -EPIPE:
1233 /* DMA status can return -EPIPE and current status content if xrun occurs */
1234 if (dev->direction == SOF_IPC_STREAM_PLAYBACK)
1235 comp_dbg(dev, "dai_copy(): dma_get_status() underrun occurred, ret = %u",
1236 ret);
1237 else
1238 comp_dbg(dev, "dai_copy(): dma_get_status() overrun occurred, ret = %u",
1239 ret);
1240 break;
1241 default:
1242 return ret;
1243 }
1244
1245 avail_bytes = stat.pending_length;
1246 free_bytes = stat.free;
1247
1248 buf_c = buffer_acquire(dd->dma_buffer);
1249
1250 dma_fmt = buf_c->stream.frame_fmt;
1251 sampling = get_sample_bytes(dma_fmt);
1252
1253 buffer_release(buf_c);
1254
1255 buf_c = buffer_acquire(dd->local_buffer);
1256
1257 /* calculate minimum size to copy */
1258 if (dev->direction == SOF_IPC_STREAM_PLAYBACK) {
1259 src_samples = audio_stream_get_avail_samples(&buf_c->stream);
1260 sink_samples = free_bytes / sampling;
1261 samples = MIN(src_samples, sink_samples);
1262 } else {
1263 src_samples = avail_bytes / sampling;
1264 sink_samples = audio_stream_get_free_samples(&buf_c->stream);
1265 samples = MIN(src_samples, sink_samples);
1266 }
1267
1268 /* limit bytes per copy to one period for the whole pipeline
1269 * in order to avoid high load spike
1270 * if FAST_MODE is enabled, then one period limitation is omitted
1271 */
1272 if (!(dd->ipc_config.feature_mask & BIT(IPC4_COPIER_FAST_MODE)))
1273 samples = MIN(samples, dd->period_bytes / sampling);
1274
1275 copy_bytes = samples * sampling;
1276
1277 comp_dbg(dev, "dai_copy(), dir: %d copy_bytes= 0x%x, frames= %d",
1278 dev->direction, copy_bytes,
1279 samples / buf_c->stream.channels);
1280
1281 buffer_release(buf_c);
1282
1283 /* Check possibility of glitch occurrence */
1284 if (dev->direction == SOF_IPC_STREAM_PLAYBACK &&
1285 copy_bytes + avail_bytes < dd->period_bytes)
1286 comp_warn(dev, "dai_copy(): Copy_bytes %d + avail bytes %d < period bytes %d, possible glitch",
1287 copy_bytes, avail_bytes, dd->period_bytes);
1288 else if (dev->direction == SOF_IPC_STREAM_CAPTURE &&
1289 copy_bytes + free_bytes < dd->period_bytes)
1290 comp_warn(dev, "dai_copy(): Copy_bytes %d + free bytes %d < period bytes %d, possible glitch",
1291 copy_bytes, free_bytes, dd->period_bytes);
1292
1293 /* return if nothing to copy */
1294 if (!copy_bytes) {
1295 comp_warn(dev, "dai_copy(): nothing to copy");
1296 return 0;
1297 }
1298
1299 /* trigger optional DAI_TRIGGER_COPY which prepares dai to copy */
1300 ret = dai_trigger(dd->dai->dev, dev->direction, DAI_TRIGGER_COPY);
1301 if (ret < 0)
1302 comp_warn(dev, "dai_copy(): dai trigger copy failed");
1303
1304 if (dai_dma_cb(dev, copy_bytes) == DMA_CB_STATUS_END)
1305 dma_stop(dd->chan->dma->z_dev, dd->chan->index);
1306
1307 ret = dma_reload(dd->chan->dma->z_dev, dd->chan->index, 0, 0, copy_bytes);
1308 if (ret < 0) {
1309 dai_report_xrun(dev, copy_bytes);
1310 return ret;
1311 }
1312
1313 dai_dma_position_update(dev);
1314
1315 return ret;
1316 }
1317
1318 /**
1319 * \brief Get DAI parameters and configure timestamping
1320 * \param[in, out] dev DAI device.
1321 * \return Error code.
1322 *
1323 * This function retrieves various DAI parameters such as type, direction, index, and DMA
1324 * controller information those are needed when configuring HW timestamping. Note that
1325 * DAI must be prepared before this function is used (for DMA information). If not, an error
1326 * is returned.
1327 */
dai_ts_config_op(struct comp_dev * dev)1328 static int dai_ts_config_op(struct comp_dev *dev)
1329 {
1330 struct dai_data *dd = comp_get_drvdata(dev);
1331 struct ipc_config_dai *dai = &dd->ipc_config;
1332 struct dai_ts_cfg cfg;
1333
1334 comp_dbg(dev, "dai_ts_config()");
1335 if (!dd->chan) {
1336 comp_err(dev, "dai_ts_config(), No DMA channel information");
1337 return -EINVAL;
1338 }
1339
1340 switch (dai->type) {
1341 case SOF_DAI_INTEL_SSP:
1342 cfg.type = DAI_INTEL_SSP;
1343 break;
1344 case SOF_DAI_INTEL_ALH:
1345 cfg.type = DAI_INTEL_ALH;
1346 break;
1347 case SOF_DAI_INTEL_DMIC:
1348 cfg.type = DAI_INTEL_DMIC;
1349 break;
1350 default:
1351 comp_err(dev, "dai_ts_config(), not supported dai type");
1352 return -EINVAL;
1353 }
1354
1355 cfg.direction = dai->direction;
1356 cfg.index = dd->dai->index;
1357 cfg.dma_id = dd->dma->plat_data.id;
1358 cfg.dma_chan_index = dd->chan->index;
1359 cfg.dma_chan_count = dd->dma->plat_data.channels;
1360
1361 return dai_ts_config(dd->dai->dev, &cfg);
1362 }
1363
dai_ts_start_op(struct comp_dev * dev)1364 static int dai_ts_start_op(struct comp_dev *dev)
1365 {
1366 struct dai_data *dd = comp_get_drvdata(dev);
1367 struct dai_ts_cfg cfg;
1368
1369 comp_dbg(dev, "dai_ts_start()");
1370
1371 return dai_ts_start(dd->dai->dev, &cfg);
1372 }
1373
dai_ts_get_op(struct comp_dev * dev,struct timestamp_data * tsd)1374 static int dai_ts_get_op(struct comp_dev *dev, struct timestamp_data *tsd)
1375 {
1376 struct dai_data *dd = comp_get_drvdata(dev);
1377 struct dai_ts_data tsdata;
1378 struct dai_ts_cfg cfg;
1379 int ret;
1380
1381 comp_dbg(dev, "dai_ts_get()");
1382
1383 ret = dai_ts_get(dd->dai->dev, &cfg, &tsdata);
1384
1385 if (ret < 0)
1386 return ret;
1387
1388 /* todo convert to timestamp_data */
1389
1390 return ret;
1391 }
1392
dai_ts_stop_op(struct comp_dev * dev)1393 static int dai_ts_stop_op(struct comp_dev *dev)
1394 {
1395 struct dai_data *dd = comp_get_drvdata(dev);
1396 struct dai_ts_cfg cfg;
1397
1398 comp_dbg(dev, "dai_ts_stop()");
1399
1400 return dai_ts_stop(dd->dai->dev, &cfg);
1401 }
1402
dai_get_init_delay_ms(struct dai * dai)1403 uint32_t dai_get_init_delay_ms(struct dai *dai)
1404 {
1405 const struct dai_properties *props;
1406 k_spinlock_key_t key;
1407 uint32_t init_delay;
1408
1409 if (!dai)
1410 return 0;
1411
1412 key = k_spin_lock(&dai->lock);
1413 props = dai_get_properties(dai->dev, 0, 0);
1414 init_delay = props->reg_init_delay;
1415 k_spin_unlock(&dai->lock, key);
1416
1417 return init_delay;
1418 }
1419
dai_get_processed_data(struct comp_dev * dev,uint32_t stream_no,bool input)1420 static uint64_t dai_get_processed_data(struct comp_dev *dev, uint32_t stream_no, bool input)
1421 {
1422 struct dai_data *dd = comp_get_drvdata(dev);
1423 uint64_t ret = 0;
1424 bool source = dev->direction == SOF_IPC_STREAM_CAPTURE;
1425
1426 /* Return value only if direction and stream number match.
1427 * The dai supports only one stream.
1428 */
1429 if (stream_no == 0 && source == input)
1430 ret = dd->total_data_processed;
1431
1432 return ret;
1433 }
1434
1435 static const struct comp_driver comp_dai = {
1436 .type = SOF_COMP_DAI,
1437 .uid = SOF_RT_UUID(dai_comp_uuid),
1438 .tctx = &dai_comp_tr,
1439 .ops = {
1440 .create = dai_new,
1441 .free = dai_free,
1442 .params = dai_params,
1443 .dai_get_hw_params = dai_comp_get_hw_params,
1444 .trigger = dai_comp_trigger,
1445 .copy = dai_copy,
1446 .prepare = dai_prepare,
1447 .reset = dai_reset,
1448 .position = dai_position,
1449 .dai_config = dai_config,
1450 .dai_ts_config = dai_ts_config_op,
1451 .dai_ts_start = dai_ts_start_op,
1452 .dai_ts_stop = dai_ts_stop_op,
1453 .dai_ts_get = dai_ts_get_op,
1454 .get_total_data_processed = dai_get_processed_data,
1455 },
1456 };
1457
1458 static SHARED_DATA struct comp_driver_info comp_dai_info = {
1459 .drv = &comp_dai,
1460 };
1461
sys_comp_dai_init(void)1462 UT_STATIC void sys_comp_dai_init(void)
1463 {
1464 comp_register(platform_shared_get(&comp_dai_info, sizeof(comp_dai_info)));
1465 }
1466
1467 DECLARE_MODULE(sys_comp_dai_init);
1468 SOF_MODULE_INIT(dai, sys_comp_dai_init);
1469