1 /*
2  * Copyright (c) 2021, NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 
8 #define DT_DRV_COMPAT nxp_lpc_i2s
9 
10 #include <string.h>
11 #include <zephyr/drivers/dma.h>
12 #include <zephyr/drivers/i2s.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <fsl_i2s.h>
15 #include <fsl_dma.h>
16 #include <zephyr/logging/log.h>
17 #include <zephyr/irq.h>
18 #include <zephyr/drivers/pinctrl.h>
19 
20 LOG_MODULE_REGISTER(i2s_mcux_flexcomm);
21 
22 #define NUM_RX_DMA_BLOCKS	2
23 
24 /* Device constant configuration parameters */
25 struct i2s_mcux_config {
26 	I2S_Type *base;
27 	const struct device *clock_dev;
28 	clock_control_subsys_t clock_subsys;
29 	void (*irq_config)(const struct device *dev);
30 	const struct pinctrl_dev_config *pincfg;
31 };
32 
33 struct stream {
34 	int32_t state;
35 	const struct device *dev_dma;
36 	uint32_t channel; /* stores the channel for dma */
37 	struct i2s_config cfg;
38 	struct dma_config dma_cfg;
39 	bool last_block;
40 	struct k_msgq in_queue;
41 	struct k_msgq out_queue;
42 };
43 
44 struct i2s_txq_entry {
45 	void *mem_block;
46 	size_t size;
47 };
48 
49 struct i2s_mcux_data {
50 	struct stream rx;
51 	void *rx_in_msgs[CONFIG_I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT];
52 	void *rx_out_msgs[CONFIG_I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT];
53 	struct dma_block_config rx_dma_blocks[NUM_RX_DMA_BLOCKS];
54 
55 	struct stream tx;
56 	/* For tx, the in queue is for requests generated by
57 	 * the i2s_write() API call, and size must be tracked
58 	 * separate from the buffer size.
59 	 * The out_queue is for tracking buffers that should
60 	 * be freed once the DMA is done transferring it.
61 	 */
62 	struct i2s_txq_entry tx_in_msgs[CONFIG_I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT];
63 	void *tx_out_msgs[CONFIG_I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT];
64 	struct dma_block_config tx_dma_block;
65 };
66 
i2s_mcux_flexcomm_cfg_convert(uint32_t base_frequency,enum i2s_dir dir,const struct i2s_config * i2s_cfg,i2s_config_t * fsl_cfg)67 static int i2s_mcux_flexcomm_cfg_convert(uint32_t base_frequency,
68 					 enum i2s_dir dir,
69 					 const struct i2s_config *i2s_cfg,
70 					 i2s_config_t *fsl_cfg)
71 {
72 	if (dir == I2S_DIR_RX) {
73 		I2S_RxGetDefaultConfig(fsl_cfg);
74 	} else if (dir == I2S_DIR_TX) {
75 		I2S_TxGetDefaultConfig(fsl_cfg);
76 	}
77 
78 	fsl_cfg->dataLength = i2s_cfg->word_size;
79 	if ((i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) ==
80 	    I2S_FMT_DATA_FORMAT_I2S) {
81 		/* Classic I2S. We always use 2 channels */
82 		fsl_cfg->frameLength = 2 * i2s_cfg->word_size;
83 	} else {
84 		fsl_cfg->frameLength = i2s_cfg->channels * i2s_cfg->word_size;
85 	}
86 
87 	if (fsl_cfg->dataLength < 4 || fsl_cfg->dataLength > 32) {
88 		LOG_ERR("Unsupported data length");
89 		return -EINVAL;
90 	}
91 
92 	if (fsl_cfg->frameLength < 4 || fsl_cfg->frameLength > 2048) {
93 		LOG_ERR("Unsupported frame length");
94 		return -EINVAL;
95 	}
96 
97 	/* Set master/slave configuration */
98 	switch (i2s_cfg->options & (I2S_OPT_BIT_CLK_SLAVE |
99 				    I2S_OPT_FRAME_CLK_SLAVE)) {
100 	case I2S_OPT_BIT_CLK_MASTER | I2S_OPT_FRAME_CLK_MASTER:
101 		fsl_cfg->masterSlave = kI2S_MasterSlaveNormalMaster;
102 		break;
103 	case I2S_OPT_BIT_CLK_SLAVE | I2S_OPT_FRAME_CLK_SLAVE:
104 		fsl_cfg->masterSlave = kI2S_MasterSlaveNormalSlave;
105 		break;
106 	case I2S_OPT_BIT_CLK_SLAVE | I2S_OPT_FRAME_CLK_MASTER:
107 		/* Master using external CLK */
108 		fsl_cfg->masterSlave = kI2S_MasterSlaveExtSckMaster;
109 		break;
110 	case I2S_OPT_BIT_CLK_MASTER | I2S_OPT_FRAME_CLK_SLAVE:
111 		/* WS synchronized master */
112 		fsl_cfg->masterSlave = kI2S_MasterSlaveWsSyncMaster;
113 		break;
114 	}
115 
116 	switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) {
117 	case I2S_FMT_DATA_FORMAT_I2S:
118 		fsl_cfg->mode = kI2S_ModeI2sClassic;
119 		break;
120 	case I2S_FMT_DATA_FORMAT_PCM_SHORT:
121 		fsl_cfg->mode = kI2S_ModeDspWsShort;
122 		fsl_cfg->wsPol = true;
123 		break;
124 	case I2S_FMT_DATA_FORMAT_PCM_LONG:
125 		fsl_cfg->mode = kI2S_ModeDspWsLong;
126 		fsl_cfg->wsPol = true;
127 		break;
128 	case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
129 		fsl_cfg->mode = kI2S_ModeDspWs50;
130 		fsl_cfg->wsPol = true;
131 		break;
132 	default:
133 		LOG_ERR("Unsupported I2S data format");
134 		return -EINVAL;
135 	}
136 
137 	if (fsl_cfg->masterSlave == kI2S_MasterSlaveNormalMaster ||
138 		fsl_cfg->masterSlave == kI2S_MasterSlaveWsSyncMaster) {
139 		fsl_cfg->divider = base_frequency /
140 				   i2s_cfg->frame_clk_freq /
141 				   fsl_cfg->frameLength;
142 	}
143 
144 	/*
145 	 * Set frame and bit clock polarity according to
146 	 * inversion flags.
147 	 */
148 	switch (i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) {
149 	case I2S_FMT_CLK_NF_NB:
150 		break;
151 	case I2S_FMT_CLK_NF_IB:
152 		fsl_cfg->sckPol = !fsl_cfg->sckPol;
153 		break;
154 	case I2S_FMT_CLK_IF_NB:
155 		fsl_cfg->wsPol = !fsl_cfg->wsPol;
156 		break;
157 	case I2S_FMT_CLK_IF_IB:
158 		fsl_cfg->sckPol = !fsl_cfg->sckPol;
159 		fsl_cfg->wsPol = !fsl_cfg->wsPol;
160 		break;
161 	default:
162 		LOG_ERR("Unsupported clocks polarity");
163 		return -EINVAL;
164 	}
165 
166 	return 0;
167 }
168 
i2s_mcux_config_get(const struct device * dev,enum i2s_dir dir)169 static const struct i2s_config *i2s_mcux_config_get(const struct device *dev,
170 						    enum i2s_dir dir)
171 {
172 	struct i2s_mcux_data *dev_data = dev->data;
173 	struct stream *stream;
174 
175 	if (dir == I2S_DIR_RX) {
176 		stream = &dev_data->rx;
177 	} else {
178 		stream = &dev_data->tx;
179 	}
180 
181 	if (stream->state == I2S_STATE_NOT_READY) {
182 		return NULL;
183 	}
184 
185 	return &stream->cfg;
186 }
187 
i2s_mcux_configure(const struct device * dev,enum i2s_dir dir,const struct i2s_config * i2s_cfg)188 static int i2s_mcux_configure(const struct device *dev, enum i2s_dir dir,
189 			      const struct i2s_config *i2s_cfg)
190 {
191 	const struct i2s_mcux_config *cfg = dev->config;
192 	struct i2s_mcux_data *dev_data = dev->data;
193 	struct stream *stream;
194 	uint32_t base_frequency;
195 	i2s_config_t fsl_cfg;
196 	int result;
197 
198 	if (dir == I2S_DIR_RX) {
199 		stream = &dev_data->rx;
200 	} else if (dir == I2S_DIR_TX) {
201 		stream = &dev_data->tx;
202 	} else if (dir == I2S_DIR_BOTH) {
203 		return -ENOSYS;
204 	} else {
205 		LOG_ERR("Either RX or TX direction must be selected");
206 		return -EINVAL;
207 	}
208 
209 	if (stream->state != I2S_STATE_NOT_READY &&
210 	    stream->state != I2S_STATE_READY) {
211 		LOG_ERR("invalid state");
212 		return -EINVAL;
213 	}
214 
215 	if (i2s_cfg->frame_clk_freq == 0U) {
216 		stream->state = I2S_STATE_NOT_READY;
217 		return 0;
218 	}
219 
220 	/*
221 	 * The memory block passed by the user to the i2s_write function is
222 	 * tightly packed next to each other.
223 	 * However for 8-bit word_size the I2S hardware expects the data
224 	 * to be in 2bytes which does not match what is passed by the user.
225 	 * This will be addressed in a separate PR once the zephyr API committee
226 	 * finalizes on an I2S API for the user to probe hardware variations.
227 	 */
228 	if (i2s_cfg->word_size <= 8) {
229 		return -ENOTSUP;
230 	}
231 
232 	if (!device_is_ready(cfg->clock_dev)) {
233 		LOG_ERR("clock control device not ready");
234 		return -ENODEV;
235 	}
236 
237 	/* Figure out function base clock */
238 	if (clock_control_get_rate(cfg->clock_dev,
239 				   cfg->clock_subsys, &base_frequency)) {
240 		return -EINVAL;
241 	}
242 
243 	/*
244 	 * Validate the configuration by converting it to SDK
245 	 * format.
246 	 */
247 	result = i2s_mcux_flexcomm_cfg_convert(base_frequency, dir, i2s_cfg,
248 					       &fsl_cfg);
249 	if (result != 0) {
250 		return result;
251 	}
252 
253 	/* Apply the configuration */
254 	if (dir == I2S_DIR_RX) {
255 		I2S_RxInit(cfg->base, &fsl_cfg);
256 	} else {
257 		I2S_TxInit(cfg->base, &fsl_cfg);
258 	}
259 
260 	if ((i2s_cfg->channels > 2) &&
261 	    (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) !=
262 	    I2S_FMT_DATA_FORMAT_I2S) {
263 		/*
264 		 * More than 2 channels are enabled, so we need to enable
265 		 * secondary channel pairs.
266 		 */
267 #if (defined(FSL_FEATURE_I2S_SUPPORT_SECONDARY_CHANNEL) && \
268 	FSL_FEATURE_I2S_SUPPORT_SECONDARY_CHANNEL)
269 		for (uint32_t slot = 1; slot < i2s_cfg->channels / 2; slot++) {
270 			/* Position must be set so that data does not overlap
271 			 * with previous channel pair. Each channel pair
272 			 * will occupy slots of "word_size" bits.
273 			 */
274 			I2S_EnableSecondaryChannel(cfg->base, slot - 1, false,
275 						   i2s_cfg->word_size * 2 * slot);
276 		}
277 #else
278 		/* No support */
279 		return -ENOTSUP;
280 #endif
281 	}
282 
283 	/*
284 	 * I2S API definition specifies that a "16 bit word will occupy 2 bytes,
285 	 * a 24 or 32 bit word will occupy 4 bytes". Therefore, we will assume
286 	 * that "odd" word sizes will be aligned to 16 or 32 bit boundaries.
287 	 *
288 	 * FIFO depth is controlled by the number of bits per word (DATALEN).
289 	 * Per the RM:
290 	 * If the data length is 4-16, the FIFO should be filled
291 	 * with two 16 bit values (one for left, one for right channel)
292 	 *
293 	 * If the data length is 17-24, the FIFO should be filled with 2 24 bit
294 	 * values (one for left, one for right channel). We can just transfer
295 	 * 4 bytes, since the I2S API specifies 24 bit values would be aligned
296 	 * to a 32 bit boundary.
297 	 *
298 	 * If the data length is 25-32, the FIFO should be filled
299 	 * with one 32 bit value. First value is left channel, second is right.
300 	 *
301 	 * All this is to say that we can always use 4 byte transfer widths
302 	 * with the DMA engine, regardless of the data length.
303 	 */
304 	stream->dma_cfg.dest_data_size = 4U;
305 	stream->dma_cfg.source_data_size = 4U;
306 
307 	/* Save configuration for get_config */
308 	memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config));
309 
310 	stream->state = I2S_STATE_READY;
311 	return 0;
312 }
313 
i2s_purge_stream_buffers(struct stream * stream,struct k_mem_slab * mem_slab,bool tx)314 static inline void i2s_purge_stream_buffers(struct stream *stream,
315 					    struct k_mem_slab *mem_slab,
316 					    bool tx)
317 {
318 	void *buffer;
319 
320 	if (tx) {
321 		struct i2s_txq_entry queue_entry;
322 
323 		while (k_msgq_get(&stream->in_queue, &queue_entry, K_NO_WAIT) == 0) {
324 			k_mem_slab_free(mem_slab, queue_entry.mem_block);
325 		}
326 	} else {
327 		while (k_msgq_get(&stream->in_queue, &buffer, K_NO_WAIT) == 0) {
328 			k_mem_slab_free(mem_slab, buffer);
329 		}
330 	}
331 	while (k_msgq_get(&stream->out_queue, &buffer, K_NO_WAIT) == 0) {
332 		k_mem_slab_free(mem_slab, buffer);
333 	}
334 }
335 
i2s_mcux_tx_stream_disable(const struct device * dev,bool drop)336 static void i2s_mcux_tx_stream_disable(const struct device *dev, bool drop)
337 {
338 	const struct i2s_mcux_config *cfg = dev->config;
339 	struct i2s_mcux_data *dev_data = dev->data;
340 	struct stream *stream = &dev_data->tx;
341 	I2S_Type *base = cfg->base;
342 
343 	LOG_DBG("Stopping DMA channel %u for TX stream", stream->channel);
344 	dma_stop(stream->dev_dma, stream->channel);
345 
346 	/* Clear TX error interrupt flag */
347 	base->FIFOSTAT = I2S_FIFOSTAT_TXERR(1U);
348 	I2S_DisableInterrupts(base, (uint32_t)kI2S_TxErrorFlag);
349 
350 	if (base->CFG1 & I2S_CFG1_MAINENABLE_MASK) {
351 		/* Wait until all transmitted data get out of FIFO */
352 		while ((base->FIFOSTAT & I2S_FIFOSTAT_TXEMPTY_MASK) == 0U) {
353 		}
354 		/*
355 		 * The last piece of valid data can be still being transmitted from
356 		 * I2S at this moment
357 		 */
358 		/* Write additional data to FIFO */
359 		base->FIFOWR = 0U;
360 		while ((base->FIFOSTAT & I2S_FIFOSTAT_TXEMPTY_MASK) == 0U) {
361 		}
362 
363 		/* At this moment the additional data is out of FIFO, we can stop I2S */
364 		/* Disable TX DMA */
365 		base->FIFOCFG &= (~I2S_FIFOCFG_DMATX_MASK);
366 		base->FIFOCFG |= I2S_FIFOCFG_EMPTYTX_MASK;
367 
368 		I2S_Disable(base);
369 	}
370 
371 	/* purge buffers queued in the stream */
372 	if (drop) {
373 		i2s_purge_stream_buffers(stream, stream->cfg.mem_slab, true);
374 	}
375 }
376 
i2s_mcux_rx_stream_disable(const struct device * dev,bool drop)377 static void i2s_mcux_rx_stream_disable(const struct device *dev, bool drop)
378 {
379 	const struct i2s_mcux_config *cfg = dev->config;
380 	struct i2s_mcux_data *dev_data = dev->data;
381 	struct stream *stream = &dev_data->rx;
382 	I2S_Type *base = cfg->base;
383 
384 	LOG_DBG("Stopping DMA channel %u for RX stream", stream->channel);
385 	dma_stop(stream->dev_dma, stream->channel);
386 
387 	/* Clear RX error interrupt flag */
388 	base->FIFOSTAT = I2S_FIFOSTAT_RXERR(1U);
389 	I2S_DisableInterrupts(base, (uint32_t)kI2S_RxErrorFlag);
390 
391 	/* stop transfer */
392 	/* Disable Rx DMA */
393 	base->FIFOCFG &= (~I2S_FIFOCFG_DMARX_MASK);
394 	base->FIFOCFG |= I2S_FIFOCFG_EMPTYRX_MASK;
395 
396 	I2S_Disable(base);
397 
398 	/* purge buffers queued in the stream */
399 	if (drop) {
400 		i2s_purge_stream_buffers(stream, stream->cfg.mem_slab, false);
401 	}
402 }
403 
i2s_mcux_config_dma_blocks(const struct device * dev,enum i2s_dir dir,uint32_t * buffer,size_t block_size)404 static void i2s_mcux_config_dma_blocks(const struct device *dev,
405 				       enum i2s_dir dir, uint32_t *buffer,
406 				       size_t block_size)
407 {
408 	const struct i2s_mcux_config *cfg = dev->config;
409 	struct i2s_mcux_data *dev_data = dev->data;
410 	I2S_Type *base = cfg->base;
411 	struct dma_block_config *blk_cfg;
412 	struct stream *stream;
413 
414 	if (dir == I2S_DIR_RX) {
415 		stream = &dev_data->rx;
416 		blk_cfg = &dev_data->rx_dma_blocks[0];
417 		memset(blk_cfg, 0, sizeof(dev_data->rx_dma_blocks));
418 	} else {
419 		stream = &dev_data->tx;
420 		blk_cfg = &dev_data->tx_dma_block;
421 		memset(blk_cfg, 0, sizeof(dev_data->tx_dma_block));
422 	}
423 
424 	stream->dma_cfg.head_block = blk_cfg;
425 
426 	if (dir == I2S_DIR_RX) {
427 
428 		blk_cfg->source_address = (uint32_t)&base->FIFORD;
429 		blk_cfg->dest_address = (uint32_t)buffer[0];
430 		blk_cfg->block_size = block_size;
431 		blk_cfg->next_block = &dev_data->rx_dma_blocks[1];
432 		blk_cfg->dest_reload_en = 1;
433 
434 		blk_cfg = &dev_data->rx_dma_blocks[1];
435 		blk_cfg->source_address = (uint32_t)&base->FIFORD;
436 		blk_cfg->dest_address = (uint32_t)buffer[1];
437 		blk_cfg->block_size = block_size;
438 	} else {
439 		blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
440 		blk_cfg->source_address = (uint32_t)buffer;
441 		blk_cfg->block_size = block_size;
442 	}
443 
444 	stream->dma_cfg.user_data = (void *)dev;
445 
446 	dma_config(stream->dev_dma, stream->channel, &stream->dma_cfg);
447 
448 	LOG_DBG("dma_slot is %d", stream->dma_cfg.dma_slot);
449 	LOG_DBG("channel_direction is %d", stream->dma_cfg.channel_direction);
450 	LOG_DBG("complete_callback_en is %d",
451 		stream->dma_cfg.complete_callback_en);
452 	LOG_DBG("error_callback_dis is %d", stream->dma_cfg.error_callback_dis);
453 	LOG_DBG("source_handshake is %d", stream->dma_cfg.source_handshake);
454 	LOG_DBG("dest_handshake is %d", stream->dma_cfg.dest_handshake);
455 	LOG_DBG("channel_priority is %d", stream->dma_cfg.channel_priority);
456 	LOG_DBG("source_chaining_en is %d", stream->dma_cfg.source_chaining_en);
457 	LOG_DBG("dest_chaining_en is %d", stream->dma_cfg.dest_chaining_en);
458 	LOG_DBG("linked_channel is %d", stream->dma_cfg.linked_channel);
459 	LOG_DBG("source_data_size is %d", stream->dma_cfg.source_data_size);
460 	LOG_DBG("dest_data_size is %d", stream->dma_cfg.dest_data_size);
461 	LOG_DBG("source_burst_length is %d", stream->dma_cfg.source_burst_length);
462 	LOG_DBG("dest_burst_length is %d", stream->dma_cfg.dest_burst_length);
463 	LOG_DBG("block_count is %d", stream->dma_cfg.block_count);
464 }
465 
466 /* This function is executed in the interrupt context */
i2s_mcux_dma_tx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)467 static void i2s_mcux_dma_tx_callback(const struct device *dma_dev, void *arg,
468 				uint32_t channel, int status)
469 {
470 	const struct device *dev = (const struct device *)arg;
471 	struct i2s_mcux_data *dev_data = dev->data;
472 	struct stream *stream = &dev_data->tx;
473 	struct i2s_txq_entry queue_entry;
474 	int ret;
475 
476 	LOG_DBG("tx cb: %d", stream->state);
477 
478 	ret = k_msgq_get(&stream->out_queue, &queue_entry.mem_block, K_NO_WAIT);
479 	if (ret == 0) {
480 		/* transmission complete. free the buffer */
481 		k_mem_slab_free(stream->cfg.mem_slab, queue_entry.mem_block);
482 	} else {
483 		LOG_ERR("no buffer in output queue for channel %u", channel);
484 	}
485 
486 	/* Received a STOP trigger, terminate TX immediately */
487 	if (stream->last_block) {
488 		stream->state = I2S_STATE_READY;
489 		i2s_mcux_tx_stream_disable(dev, false);
490 		LOG_DBG("TX STOPPED");
491 		return;
492 	}
493 
494 	switch (stream->state) {
495 	case I2S_STATE_RUNNING:
496 	case I2S_STATE_STOPPING:
497 		/* get the next buffer from queue */
498 		ret = k_msgq_get(&stream->in_queue, &queue_entry, K_NO_WAIT);
499 		if (ret == 0) {
500 			/* config the DMA */
501 			i2s_mcux_config_dma_blocks(dev, I2S_DIR_TX,
502 						   (uint32_t *)queue_entry.mem_block,
503 						   queue_entry.size);
504 			k_msgq_put(&stream->out_queue, &queue_entry.mem_block, K_NO_WAIT);
505 			dma_start(stream->dev_dma, stream->channel);
506 		}
507 
508 		if (ret || status < 0) {
509 			/*
510 			 * DMA encountered an error (status < 0)
511 			 * or
512 			 * No buffers in input queue
513 			 */
514 			LOG_DBG("DMA status %08x channel %u k_msgq_get ret %d",
515 				status, channel, ret);
516 			if (stream->state == I2S_STATE_STOPPING) {
517 				stream->state = I2S_STATE_READY;
518 			} else {
519 				stream->state = I2S_STATE_ERROR;
520 			}
521 			i2s_mcux_tx_stream_disable(dev, false);
522 		}
523 		break;
524 	case I2S_STATE_ERROR:
525 		i2s_mcux_tx_stream_disable(dev, true);
526 		break;
527 	}
528 }
529 
i2s_mcux_dma_rx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)530 static void i2s_mcux_dma_rx_callback(const struct device *dma_dev, void *arg,
531 				uint32_t channel, int status)
532 {
533 	const struct device *dev = (const struct device *)arg;
534 	struct i2s_mcux_data *dev_data = dev->data;
535 	struct stream *stream = &dev_data->rx;
536 	void *buffer;
537 	int ret;
538 
539 	LOG_DBG("rx cb: %d", stream->state);
540 
541 	if (status < 0) {
542 		stream->state = I2S_STATE_ERROR;
543 		i2s_mcux_rx_stream_disable(dev, false);
544 		return;
545 	}
546 
547 	switch (stream->state) {
548 	case I2S_STATE_STOPPING:
549 	case I2S_STATE_RUNNING:
550 		/* retrieve buffer from input queue */
551 		ret = k_msgq_get(&stream->in_queue, &buffer, K_NO_WAIT);
552 		__ASSERT_NO_MSG(ret == 0);
553 
554 		/* put buffer to output queue */
555 		ret = k_msgq_put(&stream->out_queue, &buffer, K_NO_WAIT);
556 		if (ret != 0) {
557 			LOG_ERR("buffer %p -> out_queue %p err %d", buffer,
558 				&stream->out_queue, ret);
559 			i2s_mcux_rx_stream_disable(dev, false);
560 			stream->state = I2S_STATE_ERROR;
561 		}
562 		if (stream->state == I2S_STATE_RUNNING) {
563 			/* allocate new buffer for next audio frame */
564 			ret = k_mem_slab_alloc(stream->cfg.mem_slab, &buffer, K_NO_WAIT);
565 			if (ret != 0) {
566 				LOG_ERR("buffer alloc from slab %p err %d",
567 					stream->cfg.mem_slab, ret);
568 				i2s_mcux_rx_stream_disable(dev, false);
569 				stream->state = I2S_STATE_ERROR;
570 			} else {
571 				const struct i2s_mcux_config *cfg = dev->config;
572 				I2S_Type *base = cfg->base;
573 
574 				dma_reload(stream->dev_dma, stream->channel,
575 					   (uint32_t)&base->FIFORD, (uint32_t)buffer,
576 					   stream->cfg.block_size);
577 				/* put buffer in input queue */
578 				ret = k_msgq_put(&stream->in_queue, &buffer, K_NO_WAIT);
579 				if (ret != 0) {
580 					LOG_ERR("buffer %p -> in_queue %p err %d",
581 					buffer, &stream->in_queue, ret);
582 				}
583 				dma_start(stream->dev_dma, stream->channel);
584 			}
585 		} else {
586 			/* Received a STOP/DRAIN trigger */
587 			i2s_mcux_rx_stream_disable(dev, true);
588 			stream->state = I2S_STATE_READY;
589 		}
590 		break;
591 	case I2S_STATE_ERROR:
592 		i2s_mcux_rx_stream_disable(dev, true);
593 		break;
594 	}
595 }
596 
i2s_mcux_tx_stream_start(const struct device * dev)597 static int i2s_mcux_tx_stream_start(const struct device *dev)
598 {
599 	int ret = 0;
600 	const struct i2s_mcux_config *cfg = dev->config;
601 	struct i2s_mcux_data *dev_data = dev->data;
602 	struct stream *stream = &dev_data->tx;
603 	I2S_Type *base = cfg->base;
604 	struct i2s_txq_entry queue_entry;
605 
606 	/* retrieve buffer from input queue */
607 	ret = k_msgq_get(&stream->in_queue, &queue_entry, K_NO_WAIT);
608 	if (ret != 0) {
609 		LOG_ERR("No buffer in input queue to start transmission");
610 		return ret;
611 	}
612 
613 	i2s_mcux_config_dma_blocks(dev, I2S_DIR_TX,
614 				   (uint32_t *)queue_entry.mem_block,
615 				   queue_entry.size);
616 
617 	/* put buffer in output queue */
618 	ret = k_msgq_put(&stream->out_queue, &queue_entry.mem_block, K_NO_WAIT);
619 	if (ret != 0) {
620 		LOG_ERR("failed to put buffer in output queue");
621 		return ret;
622 	}
623 
624 	/* Enable TX DMA */
625 	base->FIFOCFG |= I2S_FIFOCFG_DMATX_MASK;
626 
627 	ret = dma_start(stream->dev_dma, stream->channel);
628 	if (ret < 0) {
629 		LOG_ERR("dma_start failed (%d)", ret);
630 		return ret;
631 	}
632 
633 	I2S_Enable(base);
634 	I2S_EnableInterrupts(base, (uint32_t)kI2S_TxErrorFlag);
635 
636 	return 0;
637 }
638 
i2s_mcux_rx_stream_start(const struct device * dev)639 static int i2s_mcux_rx_stream_start(const struct device *dev)
640 {
641 	int ret = 0;
642 	void *buffer[NUM_RX_DMA_BLOCKS];
643 	const struct i2s_mcux_config *cfg = dev->config;
644 	struct i2s_mcux_data *dev_data = dev->data;
645 	struct stream *stream = &dev_data->rx;
646 	I2S_Type *base = cfg->base;
647 	uint8_t num_of_bufs;
648 
649 	num_of_bufs = k_mem_slab_num_free_get(stream->cfg.mem_slab);
650 
651 	/*
652 	 * Need at least two buffers on the RX memory slab for
653 	 * reliable DMA reception.
654 	 */
655 	if (num_of_bufs <= 1) {
656 		return -EINVAL;
657 	}
658 
659 	for (int i = 0; i < NUM_RX_DMA_BLOCKS; i++) {
660 		ret = k_mem_slab_alloc(stream->cfg.mem_slab, &buffer[i],
661 							K_NO_WAIT);
662 		if (ret != 0) {
663 			LOG_ERR("buffer alloc from mem_slab failed (%d)", ret);
664 			return ret;
665 		}
666 	}
667 
668 	i2s_mcux_config_dma_blocks(dev, I2S_DIR_RX, (uint32_t *)buffer,
669 				   stream->cfg.block_size);
670 
671 	/* put buffers in input queue */
672 	for (int i = 0; i < NUM_RX_DMA_BLOCKS; i++) {
673 		ret = k_msgq_put(&stream->in_queue, &buffer[i], K_NO_WAIT);
674 		if (ret != 0) {
675 			LOG_ERR("failed to put buffer in input queue");
676 			return ret;
677 		}
678 	}
679 
680 	/* Enable RX DMA */
681 	base->FIFOCFG |= I2S_FIFOCFG_DMARX_MASK;
682 
683 	ret = dma_start(stream->dev_dma, stream->channel);
684 	if (ret < 0) {
685 		LOG_ERR("Failed to start DMA Ch%d (%d)", stream->channel, ret);
686 		return ret;
687 	}
688 
689 	I2S_Enable(base);
690 	I2S_EnableInterrupts(base, (uint32_t)kI2S_RxErrorFlag);
691 
692 	return 0;
693 }
694 
i2s_mcux_trigger(const struct device * dev,enum i2s_dir dir,enum i2s_trigger_cmd cmd)695 static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir,
696 			     enum i2s_trigger_cmd cmd)
697 {
698 	struct i2s_mcux_data *dev_data = dev->data;
699 	struct stream *stream;
700 	unsigned int key;
701 	int ret = 0;
702 
703 	if (dir == I2S_DIR_RX) {
704 		stream = &dev_data->rx;
705 	} else if (dir == I2S_DIR_TX) {
706 		stream = &dev_data->tx;
707 	} else if (dir == I2S_DIR_BOTH) {
708 		return -ENOSYS;
709 	} else {
710 		LOG_ERR("Either RX or TX direction must be selected");
711 		return -EINVAL;
712 	}
713 
714 	key = irq_lock();
715 
716 	switch (cmd) {
717 	case I2S_TRIGGER_START:
718 		if (stream->state != I2S_STATE_READY) {
719 			LOG_ERR("START trigger: invalid state %d",
720 				    stream->state);
721 			ret = -EIO;
722 			break;
723 		}
724 
725 		if (dir == I2S_DIR_TX) {
726 			ret = i2s_mcux_tx_stream_start(dev);
727 		} else {
728 			ret = i2s_mcux_rx_stream_start(dev);
729 		}
730 
731 		if (ret < 0) {
732 			LOG_ERR("START trigger failed %d", ret);
733 			break;
734 		}
735 
736 		stream->state = I2S_STATE_RUNNING;
737 		stream->last_block = false;
738 		break;
739 
740 	case I2S_TRIGGER_STOP:
741 		if (stream->state != I2S_STATE_RUNNING) {
742 			LOG_ERR("STOP trigger: invalid state %d", stream->state);
743 			ret = -EIO;
744 			break;
745 		}
746 		stream->state = I2S_STATE_STOPPING;
747 		stream->last_block = true;
748 		break;
749 
750 	case I2S_TRIGGER_DRAIN:
751 		if (stream->state != I2S_STATE_RUNNING) {
752 			LOG_ERR("DRAIN trigger: invalid state %d", stream->state);
753 			ret = -EIO;
754 			break;
755 		}
756 		stream->state = I2S_STATE_STOPPING;
757 		break;
758 
759 	case I2S_TRIGGER_DROP:
760 		if (stream->state == I2S_STATE_NOT_READY) {
761 			LOG_ERR("DROP trigger: invalid state %d", stream->state);
762 			ret = -EIO;
763 			break;
764 		}
765 		stream->state = I2S_STATE_READY;
766 		if (dir == I2S_DIR_TX) {
767 			i2s_mcux_tx_stream_disable(dev, true);
768 		} else {
769 			i2s_mcux_rx_stream_disable(dev, true);
770 		}
771 		break;
772 
773 	case I2S_TRIGGER_PREPARE:
774 		if (stream->state != I2S_STATE_ERROR) {
775 			LOG_ERR("PREPARE trigger: invalid state %d", stream->state);
776 			ret = -EIO;
777 			break;
778 		}
779 		stream->state = I2S_STATE_READY;
780 		if (dir == I2S_DIR_TX) {
781 			i2s_mcux_tx_stream_disable(dev, true);
782 		} else {
783 			i2s_mcux_rx_stream_disable(dev, true);
784 		}
785 		break;
786 
787 	default:
788 		LOG_ERR("Unsupported trigger command");
789 		ret = -EINVAL;
790 	}
791 
792 	irq_unlock(key);
793 
794 	return ret;
795 }
796 
i2s_mcux_read(const struct device * dev,void ** mem_block,size_t * size)797 static int i2s_mcux_read(const struct device *dev, void **mem_block,
798 			  size_t *size)
799 {
800 	struct i2s_mcux_data *dev_data = dev->data;
801 	struct stream *stream = &dev_data->rx;
802 	void *buffer;
803 	int ret = 0;
804 
805 	if (stream->state == I2S_STATE_NOT_READY) {
806 		LOG_ERR("invalid state %d", stream->state);
807 		return -EIO;
808 	}
809 
810 	ret = k_msgq_get(&stream->out_queue, &buffer,
811 			 SYS_TIMEOUT_MS(stream->cfg.timeout));
812 
813 	if (ret != 0) {
814 		if (stream->state == I2S_STATE_ERROR) {
815 			return -EIO;
816 		} else {
817 			return -EAGAIN;
818 		}
819 	}
820 
821 	*mem_block = buffer;
822 	*size = stream->cfg.block_size;
823 	return 0;
824 }
825 
i2s_mcux_write(const struct device * dev,void * mem_block,size_t size)826 static int i2s_mcux_write(const struct device *dev, void *mem_block,
827 			   size_t size)
828 {
829 	struct i2s_mcux_data *dev_data = dev->data;
830 	struct stream *stream = &dev_data->tx;
831 	int ret;
832 	struct i2s_txq_entry queue_entry = {
833 		.mem_block = mem_block,
834 		.size = size,
835 	};
836 
837 	if (stream->state != I2S_STATE_RUNNING &&
838 		stream->state != I2S_STATE_READY) {
839 		LOG_ERR("invalid state (%d)", stream->state);
840 		return -EIO;
841 	}
842 
843 	ret = k_msgq_put(&stream->in_queue, &queue_entry,
844 			 SYS_TIMEOUT_MS(stream->cfg.timeout));
845 
846 	if (ret) {
847 		LOG_ERR("k_msgq_put failed %d", ret);
848 		return ret;
849 	}
850 
851 	return ret;
852 }
853 
854 static const struct i2s_driver_api i2s_mcux_driver_api = {
855 	.configure = i2s_mcux_configure,
856 	.config_get = i2s_mcux_config_get,
857 	.read = i2s_mcux_read,
858 	.write = i2s_mcux_write,
859 	.trigger = i2s_mcux_trigger,
860 };
861 
i2s_mcux_isr(const struct device * dev)862 static void i2s_mcux_isr(const struct device *dev)
863 {
864 	const struct i2s_mcux_config *cfg = dev->config;
865 	struct i2s_mcux_data *dev_data = dev->data;
866 	struct stream *stream = &dev_data->tx;
867 	I2S_Type *base = cfg->base;
868 	uint32_t intstat = base->FIFOINTSTAT;
869 
870 	if ((intstat & I2S_FIFOINTSTAT_TXERR_MASK) != 0UL) {
871 		/* Clear TX error interrupt flag */
872 		base->FIFOSTAT = I2S_FIFOSTAT_TXERR(1U);
873 		stream = &dev_data->tx;
874 		stream->state = I2S_STATE_ERROR;
875 	}
876 
877 	if ((intstat & I2S_FIFOINTSTAT_RXERR_MASK) != 0UL) {
878 		/* Clear RX error interrupt flag */
879 		base->FIFOSTAT = I2S_FIFOSTAT_RXERR(1U);
880 		stream = &dev_data->rx;
881 		stream->state = I2S_STATE_ERROR;
882 	}
883 }
884 
i2s_mcux_init(const struct device * dev)885 static int i2s_mcux_init(const struct device *dev)
886 {
887 	const struct i2s_mcux_config *cfg = dev->config;
888 	struct i2s_mcux_data *const data = dev->data;
889 	int err;
890 
891 	err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT);
892 	if (err) {
893 		return err;
894 	}
895 
896 	cfg->irq_config(dev);
897 
898 	/* Initialize the buffer queues */
899 	k_msgq_init(&data->tx.in_queue, (char *)data->tx_in_msgs,
900 		    sizeof(struct i2s_txq_entry), CONFIG_I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT);
901 	k_msgq_init(&data->rx.in_queue, (char *)data->rx_in_msgs,
902 		    sizeof(void *), CONFIG_I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT);
903 	k_msgq_init(&data->tx.out_queue, (char *)data->tx_out_msgs,
904 		    sizeof(void *), CONFIG_I2S_MCUX_FLEXCOMM_TX_BLOCK_COUNT);
905 	k_msgq_init(&data->rx.out_queue, (char *)data->rx_out_msgs,
906 		    sizeof(void *), CONFIG_I2S_MCUX_FLEXCOMM_RX_BLOCK_COUNT);
907 
908 	if (data->tx.dev_dma != NULL) {
909 		if (!device_is_ready(data->tx.dev_dma)) {
910 			LOG_ERR("%s device not ready", data->tx.dev_dma->name);
911 			return -ENODEV;
912 		}
913 	}
914 
915 	if (data->rx.dev_dma != NULL) {
916 		if (!device_is_ready(data->rx.dev_dma)) {
917 			LOG_ERR("%s device not ready", data->rx.dev_dma->name);
918 			return -ENODEV;
919 		}
920 	}
921 
922 	data->tx.state = I2S_STATE_NOT_READY;
923 	data->rx.state = I2S_STATE_NOT_READY;
924 
925 	LOG_DBG("Device %s inited", dev->name);
926 
927 	return 0;
928 }
929 
930 #define I2S_DMA_CHANNELS(id)				\
931 	.tx = {						\
932 		.dev_dma = UTIL_AND(		\
933 			DT_INST_DMAS_HAS_NAME(id, tx),	\
934 			DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx))), \
935 		.channel = UTIL_AND(		\
936 			DT_INST_DMAS_HAS_NAME(id, tx),	\
937 			DT_INST_DMAS_CELL_BY_NAME(id, tx, channel)),	\
938 		.dma_cfg = {					\
939 			.channel_direction = MEMORY_TO_PERIPHERAL,	\
940 			.dma_callback = i2s_mcux_dma_tx_callback,	\
941 			.block_count = 1,		\
942 		}							\
943 	},								\
944 	.rx = {						\
945 		.dev_dma = UTIL_AND(		\
946 			DT_INST_DMAS_HAS_NAME(id, rx),	\
947 			DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx))), \
948 		.channel = UTIL_AND(		\
949 			DT_INST_DMAS_HAS_NAME(id, rx),	\
950 			DT_INST_DMAS_CELL_BY_NAME(id, rx, channel)),	\
951 		.dma_cfg = {				\
952 			.channel_direction = PERIPHERAL_TO_MEMORY,	\
953 			.dma_callback = i2s_mcux_dma_rx_callback,	\
954 			.complete_callback_en = true,			\
955 			.block_count = NUM_RX_DMA_BLOCKS,		\
956 		}							\
957 	}
958 
959 #define I2S_MCUX_FLEXCOMM_DEVICE(id)					\
960 	PINCTRL_DT_INST_DEFINE(id);					\
961 	static void i2s_mcux_config_func_##id(const struct device *dev); \
962 	static const struct i2s_mcux_config i2s_mcux_config_##id = {	\
963 		.base =							\
964 		(I2S_Type *)DT_INST_REG_ADDR(id),			\
965 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)), \
966 		.clock_subsys =				\
967 		(clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name),\
968 		.irq_config = i2s_mcux_config_func_##id,		\
969 		.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id),		\
970 	};								\
971 	static struct i2s_mcux_data i2s_mcux_data_##id = {		\
972 		I2S_DMA_CHANNELS(id)					\
973 	};								\
974 	DEVICE_DT_INST_DEFINE(id,					\
975 			    &i2s_mcux_init,			\
976 			    NULL,			\
977 			    &i2s_mcux_data_##id,			\
978 			    &i2s_mcux_config_##id,			\
979 			    POST_KERNEL,				\
980 			    CONFIG_I2S_INIT_PRIORITY,			\
981 			    &i2s_mcux_driver_api);			\
982 	static void i2s_mcux_config_func_##id(const struct device *dev)	\
983 	{								\
984 		IRQ_CONNECT(DT_INST_IRQN(id),				\
985 			    DT_INST_IRQ(id, priority),			\
986 			    i2s_mcux_isr,						\
987 			    DEVICE_DT_INST_GET(id),	\
988 			    0);						\
989 		irq_enable(DT_INST_IRQN(id));				\
990 	}
991 
992 DT_INST_FOREACH_STATUS_OKAY(I2S_MCUX_FLEXCOMM_DEVICE)
993