1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2021 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 // Author: Rander Wang <rander.wang@linux.intel.com>
7 
8 #include <sof/audio/buffer.h>
9 #include <sof/audio/component_ext.h>
10 #include <sof/audio/ipc-config.h>
11 #include <sof/common.h>
12 #include <sof/drivers/alh.h>
13 #include <rtos/idc.h>
14 #include <rtos/alloc.h>
15 #include <sof/lib/dai.h>
16 #include <sof/lib/notifier.h>
17 #include <sof/platform.h>
18 #include <rtos/sof.h>
19 #include <ipc4/gateway.h>
20 #include <ipc/header.h>
21 #include <ipc4/alh.h>
22 #include <ipc4/ssp.h>
23 #include <ipc4/copier.h>
24 #include <ipc4/fw_reg.h>
25 #include <ipc/dai.h>
26 #include <errno.h>
27 #include <stdbool.h>
28 #include <stddef.h>
29 #include <stdint.h>
30 
31 LOG_MODULE_DECLARE(ipc, CONFIG_SOF_LOG_LEVEL);
32 
dai_config_dma_channel(struct comp_dev * dev,const void * spec_config)33 int dai_config_dma_channel(struct comp_dev *dev, const void *spec_config)
34 {
35 	const struct ipc4_copier_module_cfg *copier_cfg = spec_config;
36 	struct dai_data *dd = comp_get_drvdata(dev);
37 	struct ipc_config_dai *dai = &dd->ipc_config;
38 	int channel;
39 
40 	switch (dai->type) {
41 	case SOF_DAI_INTEL_SSP:
42 		COMPILER_FALLTHROUGH;
43 	case SOF_DAI_INTEL_DMIC:
44 		channel = 0;
45 		break;
46 	case SOF_DAI_INTEL_HDA:
47 		channel = copier_cfg->gtw_cfg.node_id.f.v_index;
48 		break;
49 	case SOF_DAI_INTEL_ALH:
50 		/* As with HDA, the DMA channel is assigned in runtime,
51 		 * not during topology parsing.
52 		 */
53 		channel = 0;
54 		break;
55 	default:
56 		/* other types of DAIs not handled for now */
57 		comp_err(dev, "dai_config_dma_channel(): Unknown dai type %d", dai->type);
58 		channel = DMA_CHAN_INVALID;
59 		break;
60 	}
61 
62 	return channel;
63 }
64 
ipc_dai_data_config(struct comp_dev * dev)65 int ipc_dai_data_config(struct comp_dev *dev)
66 {
67 	struct dai_data *dd = comp_get_drvdata(dev);
68 	struct ipc_config_dai *dai = &dd->ipc_config;
69 	struct ipc4_copier_module_cfg *copier_cfg = dd->dai_spec_config;
70 	struct dai *dai_p = dd->dai;
71 #ifndef CONFIG_ZEPHYR_NATIVE_DRIVERS
72 	struct alh_pdata *alh;
73 #endif
74 
75 	if (!dai) {
76 		comp_err(dev, "dai_data_config(): no dai!\n");
77 		return -EINVAL;
78 	}
79 
80 	comp_dbg(dev, "dai_data_config() dai type = %d index = %d dd %p",
81 		 dai->type, dai->dai_index, dd);
82 
83 	/* cannot configure DAI while active */
84 	if (dev->state == COMP_STATE_ACTIVE) {
85 		comp_info(dev, "dai_data_config(): Component is in active state.");
86 		return 0;
87 	}
88 
89 	switch (dai->type) {
90 	case SOF_DAI_INTEL_SSP:
91 		/* set dma burst elems to slot number */
92 		dd->config.burst_elems = copier_cfg->base.audio_fmt.channels_count;
93 		break;
94 	case SOF_DAI_INTEL_DMIC:
95 		/* Depth is passed by DMIC driver that retrieves it from blob */
96 		dd->config.burst_elems = dai_get_fifo_depth(dd->dai, dai->direction);
97 		comp_dbg(dev, "dai_data_config() burst_elems = %d", dd->config.burst_elems);
98 		break;
99 	case SOF_DAI_INTEL_HDA:
100 		break;
101 	case SOF_DAI_INTEL_ALH:
102 #ifdef CONFIG_ZEPHYR_NATIVE_DRIVERS
103 		dd->stream_id = dai_get_stream_id(dai_p, dai->direction);
104 #else
105 		alh = dai_get_drvdata(dai_p);
106 		/* As with HDA, the DMA channel is assigned in runtime,
107 		 * not during topology parsing.
108 		 */
109 		dd->stream_id = alh->params.stream_id;
110 #endif
111 		/* SDW HW FIFO always requires 32bit MSB aligned sample data for
112 		 * all formats, such as 8/16/24/32 bits.
113 		 */
114 		dev->ipc_config.frame_fmt = SOF_IPC_FRAME_S32_LE;
115 
116 		dd->config.burst_elems = dai_get_fifo_depth(dd->dai, dai->direction);
117 
118 		comp_dbg(dev, "dai_data_config() SOF_DAI_INTEL_ALH dev->ipc_config.frame_fmt: %d, stream_id: %d",
119 			 dev->ipc_config.frame_fmt, dd->stream_id);
120 
121 		break;
122 	default:
123 		/* other types of DAIs not handled for now */
124 		comp_warn(dev, "dai_data_config(): Unknown dai type %d", dai->type);
125 		return -EINVAL;
126 	}
127 
128 	dai->dma_buffer_size = copier_cfg->gtw_cfg.dma_buffer_size;
129 
130 	/* some DAIs may not need extra config */
131 	return 0;
132 }
133 
134 /* dai config is not sent by ipc message */
ipc_comp_dai_config(struct ipc * ipc,struct ipc_config_dai * common_config,void * spec_config)135 int ipc_comp_dai_config(struct ipc *ipc, struct ipc_config_dai *common_config,
136 			void *spec_config)
137 {
138 	return 0;
139 }
140 
dai_dma_release(struct comp_dev * dev)141 void dai_dma_release(struct comp_dev *dev)
142 {
143 	struct dai_data *dd = comp_get_drvdata(dev);
144 
145 	/* cannot configure DAI while active */
146 	if (dev->state == COMP_STATE_ACTIVE) {
147 		comp_info(dev, "dai_config(): Component is in active state. Ignore resetting");
148 		return;
149 	}
150 
151 	/* put the allocated DMA channel first */
152 	if (dd->chan) {
153 		struct ipc4_llp_reading_slot slot;
154 
155 		if (dd->slot_info.node_id) {
156 			k_spinlock_key_t key;
157 
158 			/* reset llp position to 0 in memory window for reset state. */
159 			memset_s(&slot, sizeof(slot), 0, sizeof(slot));
160 			slot.node_id = dd->slot_info.node_id;
161 
162 			key = k_spin_lock(&sof_get()->fw_reg_lock);
163 			mailbox_sw_regs_write(dd->slot_info.reg_offset, &slot, sizeof(slot));
164 			k_spin_unlock(&sof_get()->fw_reg_lock, key);
165 		}
166 
167 		/* The stop sequnece of host driver is first pause and then reset
168 		 * dma is released for reset state and need to change dma state from
169 		 * pause to stop.
170 		 * TODO: refine power management when stream is paused
171 		 */
172 #if CONFIG_ZEPHYR_NATIVE_DRIVERS
173 		/* if reset is after pause dma has already been stopped */
174 		if (dev->state != COMP_STATE_PAUSED)
175 			dma_stop(dd->chan->dma->z_dev, dd->chan->index);
176 
177 		/* remove callback */
178 		notifier_unregister(dev, dd->chan, NOTIFIER_ID_DMA_COPY);
179 		dma_release_channel(dd->chan->dma->z_dev, dd->chan->index);
180 #else
181 		dma_stop_legacy(dd->chan);
182 
183 		/* remove callback */
184 		notifier_unregister(dev, dd->chan, NOTIFIER_ID_DMA_COPY);
185 		dma_channel_put_legacy(dd->chan);
186 #endif
187 		dd->chan->dev_data = NULL;
188 		dd->chan = NULL;
189 	}
190 }
191 
dai_release_llp_slot(struct comp_dev * dev)192 void dai_release_llp_slot(struct comp_dev *dev)
193 {
194 	struct dai_data *dd = comp_get_drvdata(dev);
195 	struct ipc4_llp_reading_slot slot;
196 	k_spinlock_key_t key;
197 
198 	if (!dd->slot_info.node_id)
199 		return;
200 
201 	memset_s(&slot, sizeof(slot), 0, sizeof(slot));
202 
203 	/* clear node id for released llp slot */
204 	key = k_spin_lock(&sof_get()->fw_reg_lock);
205 	mailbox_sw_regs_write(dd->slot_info.reg_offset, &slot, sizeof(slot));
206 	k_spin_unlock(&sof_get()->fw_reg_lock, key);
207 
208 	dd->slot_info.reg_offset = 0;
209 	dd->slot_info.node_id = 0;
210 }
211 
dai_get_unused_llp_slot(struct comp_dev * dev,union ipc4_connector_node_id * node)212 static int dai_get_unused_llp_slot(struct comp_dev *dev,
213 				   union ipc4_connector_node_id *node)
214 {
215 	struct ipc4_llp_reading_slot slot;
216 	k_spinlock_key_t key;
217 	uint32_t max_slot;
218 	uint32_t offset;
219 	int i;
220 
221 	/* sdw with multiple gateways uses sndw_reading_slots */
222 	if (node->f.dma_type == ipc4_alh_link_output_class && is_multi_gateway(*node)) {
223 		offset = SRAM_REG_LLP_SNDW_READING_SLOTS;
224 		max_slot = IPC4_MAX_LLP_SNDW_READING_SLOTS - 1;
225 	} else {
226 		offset = SRAM_REG_LLP_GPDMA_READING_SLOTS;
227 		max_slot = IPC4_MAX_LLP_GPDMA_READING_SLOTS;
228 	}
229 
230 	key = k_spin_lock(&sof_get()->fw_reg_lock);
231 
232 	/* find unused llp slot offset with node_id of zero */
233 	for (i = 0; i < max_slot; i++, offset += sizeof(slot)) {
234 		uint32_t node_id;
235 
236 		node_id = mailbox_sw_reg_read(offset);
237 		if (!node_id)
238 			break;
239 	}
240 
241 	if (i >= max_slot) {
242 		comp_err(dev, "can't find free slot");
243 		k_spin_unlock(&sof_get()->fw_reg_lock, key);
244 		return -EINVAL;
245 	}
246 
247 	memset_s(&slot, sizeof(slot), 0, sizeof(slot));
248 	slot.node_id = node->dw & IPC4_NODE_ID_MASK;
249 	mailbox_sw_regs_write(offset, &slot, sizeof(slot));
250 
251 	k_spin_unlock(&sof_get()->fw_reg_lock, key);
252 
253 	return offset;
254 }
255 
dai_init_llp_info(struct comp_dev * dev)256 static int dai_init_llp_info(struct comp_dev *dev)
257 {
258 	struct dai_data *dd = comp_get_drvdata(dev);
259 	struct ipc4_copier_module_cfg *copier_cfg;
260 	union ipc4_connector_node_id node;
261 	int ret;
262 
263 	copier_cfg = dd->dai_spec_config;
264 	node = copier_cfg->gtw_cfg.node_id;
265 
266 	/* HDA doesn't use llp slot */
267 	if (dd->ipc_config.type == SOF_DAI_INTEL_HDA)
268 		return 0;
269 
270 	/* don't support more gateway like EVAD */
271 	if (node.f.dma_type >= ipc4_max_connector_node_id_type) {
272 		comp_err(dev, "unsupported gateway %d", (int)node.f.dma_type);
273 		return -EINVAL;
274 	}
275 
276 	ret = dai_get_unused_llp_slot(dev, &node);
277 	if (ret < 0)
278 		return ret;
279 
280 	dd->slot_info.node_id = node.dw & IPC4_NODE_ID_MASK;
281 	dd->slot_info.reg_offset = ret;
282 
283 	return 0;
284 }
285 
dai_config(struct comp_dev * dev,struct ipc_config_dai * common_config,const void * spec_config)286 int dai_config(struct comp_dev *dev, struct ipc_config_dai *common_config,
287 	       const void *spec_config)
288 {
289 	const struct ipc4_copier_module_cfg *copier_cfg = spec_config;
290 	struct dai_data *dd = comp_get_drvdata(dev);
291 	int size;
292 	int ret;
293 
294 	/* ignore if message not for this DAI id/type */
295 	if (dd->ipc_config.dai_index != common_config->dai_index ||
296 	    dd->ipc_config.type != common_config->type)
297 		return 0;
298 
299 	comp_info(dev, "dai_config() dai type = %d index = %d dd %p",
300 		  common_config->type, common_config->dai_index, dd);
301 
302 	/* cannot configure DAI while active */
303 	if (dev->state == COMP_STATE_ACTIVE) {
304 		comp_info(dev, "dai_config(): Component is in active state. Ignore config");
305 		return 0;
306 	}
307 
308 	if (dd->chan) {
309 		comp_info(dev, "dai_config(): Configured. dma channel index %d, ignore...",
310 			  dd->chan->index);
311 		return 0;
312 	}
313 
314 #if CONFIG_COMP_DAI_GROUP
315 	if (common_config->group_id) {
316 		ret = dai_assign_group(dev, common_config->group_id);
317 
318 		if (ret)
319 			return ret;
320 	}
321 #endif
322 	/* do nothing for asking for channel free, for compatibility. */
323 	if (dai_config_dma_channel(dev, spec_config) == DMA_CHAN_INVALID)
324 		return 0;
325 
326 	dd->dai_dev = dev;
327 
328 	/* allocated dai_config if not yet */
329 	if (!dd->dai_spec_config) {
330 		size = sizeof(*copier_cfg);
331 		dd->dai_spec_config = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, size);
332 		if (!dd->dai_spec_config) {
333 			comp_err(dev, "dai_config(): No memory for dai_config size %d", size);
334 			return -ENOMEM;
335 		}
336 
337 		ret = memcpy_s(dd->dai_spec_config, size, copier_cfg, size);
338 		if (ret < 0) {
339 			rfree(dd->dai_spec_config);
340 			dd->dai_spec_config = NULL;
341 			return -EINVAL;
342 		}
343 	}
344 
345 	ret = dai_init_llp_info(dev);
346 	if (ret < 0)
347 		return ret;
348 
349 	return dai_set_config(dd->dai, common_config, copier_cfg->gtw_cfg.config_data);
350 }
351 
352 #if CONFIG_ZEPHYR_NATIVE_DRIVERS
dai_position(struct comp_dev * dev,struct sof_ipc_stream_posn * posn)353 int dai_position(struct comp_dev *dev, struct sof_ipc_stream_posn *posn)
354 {
355 	struct dai_data *dd = comp_get_drvdata(dev);
356 	struct dma_status status;
357 	int ret;
358 
359 	/* total processed bytes count */
360 	posn->dai_posn = dd->total_data_processed;
361 
362 	platform_dai_wallclock(dev, &dd->wallclock);
363 	posn->wallclock = dd->wallclock;
364 
365 	ret = dma_get_status(dd->dma->z_dev, dd->chan->index, &status);
366 	if (ret < 0)
367 		return ret;
368 
369 	posn->comp_posn = status.total_copied;
370 
371 	return 0;
372 }
373 
dai_dma_position_update(struct comp_dev * dev)374 void dai_dma_position_update(struct comp_dev *dev)
375 {
376 	struct dai_data *dd = comp_get_drvdata(dev);
377 	struct ipc4_llp_reading_slot slot;
378 	struct dma_status status;
379 	int ret;
380 
381 	if (!dd->slot_info.node_id)
382 		return;
383 
384 	ret = dma_get_status(dd->dma->z_dev, dd->chan->index, &status);
385 	if (ret < 0)
386 		return;
387 
388 	platform_dai_wallclock(dev, &dd->wallclock);
389 
390 	slot.node_id = dd->slot_info.node_id;
391 	slot.reading.llp_l = (uint32_t)status.total_copied;
392 	slot.reading.llp_u = (uint32_t)(status.total_copied >> 32);
393 	slot.reading.wclk_l = (uint32_t)dd->wallclock;
394 	slot.reading.wclk_u = (uint32_t)(dd->wallclock >> 32);
395 
396 	mailbox_sw_regs_write(dd->slot_info.reg_offset, &slot, sizeof(slot));
397 }
398 #else
dai_position(struct comp_dev * dev,struct sof_ipc_stream_posn * posn)399 int dai_position(struct comp_dev *dev, struct sof_ipc_stream_posn *posn)
400 {
401 	struct dai_data *dd = comp_get_drvdata(dev);
402 	struct dma_chan_status status;
403 
404 	/* total processed bytes count */
405 	posn->dai_posn = dd->total_data_processed;
406 
407 	platform_dai_wallclock(dev, &dd->wallclock);
408 	posn->wallclock = dd->wallclock;
409 
410 	status.ipc_posn_data = &posn->comp_posn;
411 	dma_status_legacy(dd->chan, &status, dev->direction);
412 
413 	return 0;
414 }
415 
dai_dma_position_update(struct comp_dev * dev)416 void dai_dma_position_update(struct comp_dev *dev)
417 {
418 	struct dai_data *dd = comp_get_drvdata(dev);
419 	struct ipc4_llp_reading_slot slot;
420 	struct dma_chan_status status;
421 	uint32_t llp_data[2];
422 
423 	if (!dd->slot_info.node_id)
424 		return;
425 
426 	status.ipc_posn_data = llp_data;
427 	dma_status_legacy(dd->chan, &status, dev->direction);
428 
429 	platform_dai_wallclock(dev, &dd->wallclock);
430 
431 	slot.node_id = dd->slot_info.node_id;
432 	slot.reading.llp_l = llp_data[0];
433 	slot.reading.llp_u = llp_data[1];
434 	slot.reading.wclk_l = (uint32_t)dd->wallclock;
435 	slot.reading.wclk_u = (uint32_t)(dd->wallclock >> 32);
436 
437 	mailbox_sw_regs_write(dd->slot_info.reg_offset, &slot, sizeof(slot));
438 }
439 #endif
440