1 /*
2 * Copyright 2021,2023-2024 NXP Semiconductor INC.
3 * All rights reserved.
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /** @file
9 * @brief I2S bus (SAI) driver for NXP i.MX RT series.
10 */
11
12 #include <errno.h>
13 #include <string.h>
14 #include <zephyr/sys/__assert.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/device.h>
17 #include <zephyr/init.h>
18 #include <zephyr/drivers/dma.h>
19 #include <zephyr/drivers/i2s.h>
20 #include <zephyr/drivers/pinctrl.h>
21 #include <zephyr/drivers/clock_control.h>
22 #include <zephyr/dt-bindings/clock/imx_ccm.h>
23 #include <zephyr/sys/barrier.h>
24 #include <soc.h>
25
26 #include "i2s_mcux_sai.h"
27
28 #define LOG_DOMAIN dev_i2s_mcux
29 #define LOG_LEVEL CONFIG_I2S_LOG_LEVEL
30 #include <zephyr/logging/log.h>
31 #include <zephyr/irq.h>
32
33 LOG_MODULE_REGISTER(LOG_DOMAIN);
34
35 #define DT_DRV_COMPAT nxp_mcux_i2s
36 #define NUM_DMA_BLOCKS_RX_PREP 3
37 #define MAX_TX_DMA_BLOCKS CONFIG_DMA_TCD_QUEUE_SIZE
38 #if (NUM_DMA_BLOCKS_RX_PREP >= CONFIG_DMA_TCD_QUEUE_SIZE)
39 #error NUM_DMA_BLOCKS_RX_PREP must be < CONFIG_DMA_TCD_QUEUE_SIZE
40 #endif
41 #if defined(CONFIG_DMA_MCUX_EDMA) && (NUM_DMA_BLOCKS_RX_PREP < 3)
42 #error eDMA avoids TCD coherency issue if NUM_DMA_BLOCKS_RX_PREP >= 3
43 #endif
44
45 /*
46 * SAI driver uses source_gather_en/dest_scatter_en feature of DMA, and relies
47 * on DMA driver managing circular list of DMA blocks. Like eDMA driver links
48 * Transfer Control Descriptors (TCDs) in list, and manages the tcdpool.
49 * Calling dma_reload() adds new DMA block to DMA channel already configured,
50 * into the DMA driver's circular list of blocks.
51
52 * This indicates the Tx/Rx stream.
53 *
54 * in_queue and out_queue are used as follows
55 * transmit stream:
56 * application provided buffer is queued to in_queue until loaded to DMA.
57 * when DMA channel is idle, buffer is retrieved from in_queue and loaded
58 * to DMA and queued to out_queue. when DMA completes, buffer is retrieved
59 * from out_queue and freed.
60 *
61 * receive stream:
62 * driver allocates buffer from slab and loads DMA buffer is queued to
63 * in_queue when DMA completes, buffer is retrieved from in_queue
64 * and queued to out_queue when application reads, buffer is read
65 * (may optionally block) from out_queue and presented to application.
66 */
67 struct stream {
68 int32_t state;
69 uint32_t dma_channel;
70 uint32_t start_channel;
71 void (*irq_call_back)(void);
72 struct i2s_config cfg;
73 struct dma_config dma_cfg;
74 struct dma_block_config dma_block;
75 uint8_t free_tx_dma_blocks;
76 bool last_block;
77 struct k_msgq in_queue;
78 struct k_msgq out_queue;
79 };
80
81 struct i2s_mcux_config {
82 I2S_Type *base;
83 uint32_t clk_src;
84 uint32_t clk_pre_div;
85 uint32_t clk_src_div;
86 uint32_t pll_src;
87 uint32_t pll_lp;
88 uint32_t pll_pd;
89 uint32_t pll_num;
90 uint32_t pll_den;
91 uint32_t *mclk_control_base;
92 uint32_t mclk_pin_mask;
93 uint32_t mclk_pin_offset;
94 uint32_t tx_channel;
95 clock_control_subsys_t clk_sub_sys;
96 const struct device *ccm_dev;
97 const struct pinctrl_dev_config *pinctrl;
98 void (*irq_connect)(const struct device *dev);
99 bool rx_sync_mode;
100 bool tx_sync_mode;
101 };
102
103 /* Device run time data */
104 struct i2s_dev_data {
105 const struct device *dev_dma;
106 struct stream tx;
107 void *tx_in_msgs[CONFIG_I2S_TX_BLOCK_COUNT];
108 void *tx_out_msgs[CONFIG_I2S_TX_BLOCK_COUNT];
109 struct stream rx;
110 void *rx_in_msgs[CONFIG_I2S_RX_BLOCK_COUNT];
111 void *rx_out_msgs[CONFIG_I2S_RX_BLOCK_COUNT];
112 };
113
114 static void i2s_dma_tx_callback(const struct device *, void *, uint32_t, int);
115 static void i2s_tx_stream_disable(const struct device *, bool drop);
116 static void i2s_rx_stream_disable(const struct device *, bool in_drop, bool out_drop);
117
i2s_purge_stream_buffers(struct stream * strm,struct k_mem_slab * mem_slab,bool in_drop,bool out_drop)118 static inline void i2s_purge_stream_buffers(struct stream *strm, struct k_mem_slab *mem_slab,
119 bool in_drop, bool out_drop)
120 {
121 void *buffer;
122
123 if (in_drop) {
124 while (k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT) == 0) {
125 k_mem_slab_free(mem_slab, buffer);
126 }
127 }
128
129 if (out_drop) {
130 while (k_msgq_get(&strm->out_queue, &buffer, K_NO_WAIT) == 0) {
131 k_mem_slab_free(mem_slab, buffer);
132 }
133 }
134 }
135
i2s_tx_stream_disable(const struct device * dev,bool drop)136 static void i2s_tx_stream_disable(const struct device *dev, bool drop)
137 {
138 struct i2s_dev_data *dev_data = dev->data;
139 struct stream *strm = &dev_data->tx;
140 const struct device *dev_dma = dev_data->dev_dma;
141 const struct i2s_mcux_config *dev_cfg = dev->config;
142
143 LOG_DBG("Stopping DMA channel %u for TX stream", strm->dma_channel);
144
145 /* Disable FIFO DMA request */
146 SAI_TxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, false);
147
148 dma_stop(dev_dma, strm->dma_channel);
149
150 /* wait for TX FIFO to drain before disabling */
151 while ((dev_cfg->base->TCSR & I2S_TCSR_FWF_MASK) == 0) {
152 ;
153 }
154
155 /* Disable the channel FIFO */
156 dev_cfg->base->TCR3 &= ~I2S_TCR3_TCE_MASK;
157
158 /* Disable Tx */
159 SAI_TxEnable(dev_cfg->base, false);
160
161 /* If Tx is disabled, reset the FIFO pointer, clear error flags */
162 if ((dev_cfg->base->TCSR & I2S_TCSR_TE_MASK) == 0UL) {
163 dev_cfg->base->TCSR |= (I2S_TCSR_FR_MASK | I2S_TCSR_SR_MASK);
164 dev_cfg->base->TCSR &= ~I2S_TCSR_SR_MASK;
165 }
166
167 /* purge buffers queued in the stream */
168 if (drop) {
169 i2s_purge_stream_buffers(strm, dev_data->tx.cfg.mem_slab, true, true);
170 }
171 }
172
i2s_rx_stream_disable(const struct device * dev,bool in_drop,bool out_drop)173 static void i2s_rx_stream_disable(const struct device *dev, bool in_drop, bool out_drop)
174 {
175 struct i2s_dev_data *dev_data = dev->data;
176 struct stream *strm = &dev_data->rx;
177 const struct device *dev_dma = dev_data->dev_dma;
178 const struct i2s_mcux_config *dev_cfg = dev->config;
179
180 LOG_DBG("Stopping RX stream & DMA channel %u", strm->dma_channel);
181 dma_stop(dev_dma, strm->dma_channel);
182
183 /* Disable the channel FIFO */
184 dev_cfg->base->RCR3 &= ~I2S_RCR3_RCE_MASK;
185
186 /* Disable DMA enable bit */
187 SAI_RxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, false);
188
189 /* Disable Rx */
190 SAI_RxEnable(dev_cfg->base, false);
191
192 /* wait for Receiver to disable */
193 while (dev_cfg->base->RCSR & I2S_RCSR_RE_MASK) {
194 ;
195 }
196 /* reset the FIFO pointer and clear error flags */
197 dev_cfg->base->RCSR |= (I2S_RCSR_FR_MASK | I2S_RCSR_SR_MASK);
198 dev_cfg->base->RCSR &= ~I2S_RCSR_SR_MASK;
199
200 /* purge buffers queued in the stream */
201 if (in_drop || out_drop) {
202 i2s_purge_stream_buffers(strm, dev_data->rx.cfg.mem_slab, in_drop, out_drop);
203 }
204 }
205
i2s_tx_reload_multiple_dma_blocks(const struct device * dev,uint8_t * blocks_queued)206 static int i2s_tx_reload_multiple_dma_blocks(const struct device *dev, uint8_t *blocks_queued)
207 {
208 struct i2s_dev_data *dev_data = dev->data;
209 const struct i2s_mcux_config *dev_cfg = dev->config;
210 I2S_Type *base = (I2S_Type *)dev_cfg->base;
211 struct stream *strm = &dev_data->tx;
212 void *buffer = NULL;
213 int ret = 0;
214 unsigned int key;
215
216 *blocks_queued = 0;
217
218 key = irq_lock();
219
220 /* queue additional blocks to DMA if in_queue and DMA has free blocks */
221 while (strm->free_tx_dma_blocks) {
222 /* get the next buffer from queue */
223 ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
224 if (ret) {
225 /* in_queue is empty, no more blocks to send to DMA */
226 ret = 0;
227 break;
228 }
229
230 /* reload the DMA */
231 ret = dma_reload(dev_data->dev_dma, strm->dma_channel, (uint32_t)buffer,
232 (uint32_t)&base->TDR[strm->start_channel], strm->cfg.block_size);
233 if (ret != 0) {
234 LOG_ERR("dma_reload() failed with error 0x%x", ret);
235 break;
236 }
237
238 (strm->free_tx_dma_blocks)--;
239
240 ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT);
241 if (ret != 0) {
242 LOG_ERR("buffer %p -> out %p err %d", buffer, &strm->out_queue, ret);
243 break;
244 }
245
246 (*blocks_queued)++;
247 }
248
249 irq_unlock(key);
250 return ret;
251 }
252
253 /* This function is executed in the interrupt context */
i2s_dma_tx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)254 static void i2s_dma_tx_callback(const struct device *dma_dev, void *arg, uint32_t channel,
255 int status)
256 {
257 const struct device *dev = (struct device *)arg;
258 struct i2s_dev_data *dev_data = dev->data;
259 struct stream *strm = &dev_data->tx;
260 void *buffer = NULL;
261 int ret;
262 uint8_t blocks_queued;
263
264 LOG_DBG("tx cb");
265
266 ret = k_msgq_get(&strm->out_queue, &buffer, K_NO_WAIT);
267 if (ret == 0) {
268 /* transmission complete. free the buffer */
269 k_mem_slab_free(strm->cfg.mem_slab, buffer);
270 (strm->free_tx_dma_blocks)++;
271 } else {
272 LOG_ERR("no buf in out_queue for channel %u", channel);
273 }
274
275 if (strm->free_tx_dma_blocks > MAX_TX_DMA_BLOCKS) {
276 strm->state = I2S_STATE_ERROR;
277 LOG_ERR("free_tx_dma_blocks exceeded maximum, now %d", strm->free_tx_dma_blocks);
278 goto disabled_exit_no_drop;
279 }
280
281 /* Received a STOP trigger, terminate TX immediately */
282 if (strm->last_block) {
283 strm->state = I2S_STATE_READY;
284 LOG_DBG("TX STOPPED last_block set");
285 goto disabled_exit_no_drop;
286 }
287
288 if (ret) {
289 /* k_msgq_get() returned error, and was not last_block */
290 strm->state = I2S_STATE_ERROR;
291 goto disabled_exit_no_drop;
292 }
293
294 switch (strm->state) {
295 case I2S_STATE_RUNNING:
296 case I2S_STATE_STOPPING:
297 ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
298
299 if (ret) {
300 strm->state = I2S_STATE_ERROR;
301 goto disabled_exit_no_drop;
302 }
303
304 if (blocks_queued || (strm->free_tx_dma_blocks < MAX_TX_DMA_BLOCKS)) {
305 goto enabled_exit;
306 } else {
307 /* all DMA blocks are free but no blocks were queued */
308 if (strm->state == I2S_STATE_STOPPING) {
309 /* TX queue has drained */
310 strm->state = I2S_STATE_READY;
311 LOG_DBG("TX stream has stopped");
312 } else {
313 strm->state = I2S_STATE_ERROR;
314 LOG_ERR("TX Failed to reload DMA");
315 }
316 goto disabled_exit_no_drop;
317 }
318
319 case I2S_STATE_ERROR:
320 default:
321 goto disabled_exit_drop;
322 }
323
324 disabled_exit_no_drop:
325 i2s_tx_stream_disable(dev, false);
326 return;
327
328 disabled_exit_drop:
329 i2s_tx_stream_disable(dev, true);
330 return;
331
332 enabled_exit:
333 return;
334 }
335
i2s_dma_rx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)336 static void i2s_dma_rx_callback(const struct device *dma_dev, void *arg, uint32_t channel,
337 int status)
338 {
339 struct device *dev = (struct device *)arg;
340 const struct i2s_mcux_config *dev_cfg = dev->config;
341 I2S_Type *base = (I2S_Type *)dev_cfg->base;
342 struct i2s_dev_data *dev_data = dev->data;
343 struct stream *strm = &dev_data->rx;
344 void *buffer;
345 int ret;
346
347 LOG_DBG("RX cb");
348
349 switch (strm->state) {
350 case I2S_STATE_STOPPING:
351 case I2S_STATE_RUNNING:
352 /* retrieve buffer from input queue */
353 ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
354 __ASSERT_NO_MSG(ret == 0);
355
356 /* put buffer to output queue */
357 ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT);
358 if (ret != 0) {
359 LOG_ERR("buffer %p -> out_queue %p err %d", buffer, &strm->out_queue, ret);
360 i2s_rx_stream_disable(dev, false, false);
361 strm->state = I2S_STATE_ERROR;
362 return;
363 }
364 if (strm->state == I2S_STATE_RUNNING) {
365 /* allocate new buffer for next audio frame */
366 ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT);
367 if (ret != 0) {
368 LOG_ERR("buffer alloc from slab %p err %d", strm->cfg.mem_slab,
369 ret);
370 i2s_rx_stream_disable(dev, false, false);
371 strm->state = I2S_STATE_ERROR;
372 } else {
373 uint32_t data_path = strm->start_channel;
374
375 ret = dma_reload(dev_data->dev_dma, strm->dma_channel,
376 (uint32_t)&base->RDR[data_path], (uint32_t)buffer,
377 strm->cfg.block_size);
378 if (ret != 0) {
379 LOG_ERR("dma_reload() failed with error 0x%x", ret);
380 i2s_rx_stream_disable(dev, false, false);
381 strm->state = I2S_STATE_ERROR;
382 return;
383 }
384
385 /* put buffer in input queue */
386 ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
387 if (ret != 0) {
388 LOG_ERR("%p -> in_queue %p err %d", buffer, &strm->in_queue,
389 ret);
390 }
391
392 }
393 } else {
394 i2s_rx_stream_disable(dev, true, false);
395 /* Received a STOP/DRAIN trigger */
396 strm->state = I2S_STATE_READY;
397 }
398 break;
399 case I2S_STATE_ERROR:
400 i2s_rx_stream_disable(dev, true, true);
401 break;
402 }
403 }
404
enable_mclk_direction(const struct device * dev,bool dir)405 static void enable_mclk_direction(const struct device *dev, bool dir)
406 {
407 const struct i2s_mcux_config *dev_cfg = dev->config;
408 uint32_t offset = dev_cfg->mclk_pin_offset;
409 uint32_t mask = dev_cfg->mclk_pin_mask;
410 uint32_t *base = (uint32_t *)(dev_cfg->mclk_control_base + offset);
411
412 if (dir) {
413 *base |= mask;
414 } else {
415 *base &= ~mask;
416 }
417 }
418
get_mclk_rate(const struct device * dev,uint32_t * mclk)419 static void get_mclk_rate(const struct device *dev, uint32_t *mclk)
420 {
421 const struct i2s_mcux_config *dev_cfg = dev->config;
422 const struct device *ccm_dev = dev_cfg->ccm_dev;
423 clock_control_subsys_t clk_sub_sys = dev_cfg->clk_sub_sys;
424 uint32_t rate = 0;
425
426 if (device_is_ready(ccm_dev)) {
427 clock_control_get_rate(ccm_dev, clk_sub_sys, &rate);
428 } else {
429 LOG_ERR("CCM driver is not installed");
430 *mclk = rate;
431 return;
432 }
433 *mclk = rate;
434 }
435
i2s_mcux_config(const struct device * dev,enum i2s_dir dir,const struct i2s_config * i2s_cfg)436 static int i2s_mcux_config(const struct device *dev, enum i2s_dir dir,
437 const struct i2s_config *i2s_cfg)
438 {
439 const struct i2s_mcux_config *dev_cfg = dev->config;
440 I2S_Type *base = (I2S_Type *)dev_cfg->base;
441 struct i2s_dev_data *dev_data = dev->data;
442 sai_transceiver_t config;
443 uint32_t mclk;
444 /*num_words is frame size*/
445 uint8_t num_words = i2s_cfg->channels;
446 uint8_t word_size_bits = i2s_cfg->word_size;
447
448 if ((dev_data->tx.state != I2S_STATE_NOT_READY) &&
449 (dev_data->tx.state != I2S_STATE_READY) &&
450 (dev_data->rx.state != I2S_STATE_NOT_READY) &&
451 (dev_data->rx.state != I2S_STATE_READY)) {
452 LOG_ERR("invalid state tx(%u) rx(%u)", dev_data->tx.state, dev_data->rx.state);
453 if (dir == I2S_DIR_TX) {
454 dev_data->tx.state = I2S_STATE_NOT_READY;
455 } else {
456 dev_data->rx.state = I2S_STATE_NOT_READY;
457 }
458 return -EINVAL;
459 }
460
461 if (i2s_cfg->frame_clk_freq == 0U) {
462 LOG_ERR("Invalid frame_clk_freq %u", i2s_cfg->frame_clk_freq);
463 if (dir == I2S_DIR_TX) {
464 dev_data->tx.state = I2S_STATE_NOT_READY;
465 } else {
466 dev_data->rx.state = I2S_STATE_NOT_READY;
467 }
468 return 0;
469 }
470
471 if (word_size_bits < SAI_WORD_SIZE_BITS_MIN || word_size_bits > SAI_WORD_SIZE_BITS_MAX) {
472 LOG_ERR("Unsupported I2S word size %u", word_size_bits);
473 if (dir == I2S_DIR_TX) {
474 dev_data->tx.state = I2S_STATE_NOT_READY;
475 } else {
476 dev_data->rx.state = I2S_STATE_NOT_READY;
477 }
478 return -EINVAL;
479 }
480
481 if (num_words < SAI_WORD_PER_FRAME_MIN || num_words > SAI_WORD_PER_FRAME_MAX) {
482 LOG_ERR("Unsupported words length %u", num_words);
483 if (dir == I2S_DIR_TX) {
484 dev_data->tx.state = I2S_STATE_NOT_READY;
485 } else {
486 dev_data->rx.state = I2S_STATE_NOT_READY;
487 }
488 return -EINVAL;
489 }
490
491 if ((i2s_cfg->options & I2S_OPT_PINGPONG) == I2S_OPT_PINGPONG) {
492 LOG_ERR("Ping-pong mode not supported");
493 if (dir == I2S_DIR_TX) {
494 dev_data->tx.state = I2S_STATE_NOT_READY;
495 } else {
496 dev_data->rx.state = I2S_STATE_NOT_READY;
497 }
498 return -ENOTSUP;
499 }
500
501 memset(&config, 0, sizeof(config));
502
503 const bool is_mclk_slave = i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE;
504
505 enable_mclk_direction(dev, !is_mclk_slave);
506
507 get_mclk_rate(dev, &mclk);
508 LOG_DBG("mclk is %d", mclk);
509
510 /* bit clock source is MCLK */
511 config.bitClock.bclkSource = kSAI_BclkSourceMclkDiv;
512 /*
513 * additional settings for bclk
514 * read the SDK header file for more details
515 */
516 config.bitClock.bclkInputDelay = false;
517
518 /* frame sync default configurations */
519 #if defined(FSL_FEATURE_SAI_HAS_ON_DEMAND_MODE) && FSL_FEATURE_SAI_HAS_ON_DEMAND_MODE
520 config.frameSync.frameSyncGenerateOnDemand = false;
521 #endif
522
523 /* serial data default configurations */
524 #if defined(FSL_FEATURE_SAI_HAS_CHANNEL_MODE) && FSL_FEATURE_SAI_HAS_CHANNEL_MODE
525 config.serialData.dataMode = kSAI_DataPinStateOutputZero;
526 #endif
527
528 config.frameSync.frameSyncPolarity = kSAI_PolarityActiveLow;
529 config.bitClock.bclkSrcSwap = false;
530 /* format */
531 switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) {
532 case I2S_FMT_DATA_FORMAT_I2S:
533 SAI_GetClassicI2SConfig(&config, word_size_bits, kSAI_Stereo, dev_cfg->tx_channel);
534 break;
535 case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
536 SAI_GetLeftJustifiedConfig(&config, word_size_bits, kSAI_Stereo,
537 dev_cfg->tx_channel);
538 break;
539 case I2S_FMT_DATA_FORMAT_PCM_SHORT:
540 SAI_GetDSPConfig(&config, kSAI_FrameSyncLenOneBitClk, word_size_bits, kSAI_Stereo,
541 dev_cfg->tx_channel);
542 /* We need to set the data word count manually, since the HAL
543 * function does not
544 */
545 config.serialData.dataWordNum = num_words;
546 config.frameSync.frameSyncEarly = true;
547 config.bitClock.bclkPolarity = kSAI_SampleOnFallingEdge;
548 break;
549 case I2S_FMT_DATA_FORMAT_PCM_LONG:
550 SAI_GetTDMConfig(&config, kSAI_FrameSyncLenPerWordWidth, word_size_bits, num_words,
551 dev_cfg->tx_channel);
552 config.bitClock.bclkPolarity = kSAI_SampleOnFallingEdge;
553 break;
554 default:
555 LOG_ERR("Unsupported I2S data format");
556 if (dir == I2S_DIR_TX) {
557 dev_data->tx.state = I2S_STATE_NOT_READY;
558 } else {
559 dev_data->rx.state = I2S_STATE_NOT_READY;
560 }
561 return -EINVAL;
562 }
563
564 /* sync mode configurations */
565 if (dir == I2S_DIR_TX) {
566 /* TX */
567 if (dev_cfg->tx_sync_mode) {
568 config.syncMode = kSAI_ModeSync;
569 } else {
570 config.syncMode = kSAI_ModeAsync;
571 }
572 } else {
573 /* RX */
574 if (dev_cfg->rx_sync_mode) {
575 config.syncMode = kSAI_ModeSync;
576 } else {
577 config.syncMode = kSAI_ModeAsync;
578 }
579 }
580
581 if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE) {
582 if (i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) {
583 config.masterSlave = kSAI_Slave;
584 } else {
585 config.masterSlave = kSAI_Bclk_Master_FrameSync_Slave;
586 }
587 } else {
588 if (i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) {
589 config.masterSlave = kSAI_Bclk_Slave_FrameSync_Master;
590 } else {
591 config.masterSlave = kSAI_Master;
592 }
593 }
594
595 /* clock signal polarity */
596 switch (i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) {
597 case I2S_FMT_CLK_NF_NB:
598 /* No action required, leave the configuration untouched */
599 break;
600
601 case I2S_FMT_CLK_NF_IB:
602 /* Swap bclk polarity */
603 config.bitClock.bclkPolarity =
604 (config.bitClock.bclkPolarity == kSAI_SampleOnFallingEdge)
605 ? kSAI_SampleOnRisingEdge
606 : kSAI_SampleOnFallingEdge;
607 break;
608
609 case I2S_FMT_CLK_IF_NB:
610 /* Swap frame sync polarity */
611 config.frameSync.frameSyncPolarity =
612 (config.frameSync.frameSyncPolarity == kSAI_PolarityActiveHigh)
613 ? kSAI_PolarityActiveLow
614 : kSAI_PolarityActiveHigh;
615 break;
616
617 case I2S_FMT_CLK_IF_IB:
618 /* Swap frame sync and bclk polarity */
619 config.frameSync.frameSyncPolarity =
620 (config.frameSync.frameSyncPolarity == kSAI_PolarityActiveHigh)
621 ? kSAI_PolarityActiveLow
622 : kSAI_PolarityActiveHigh;
623 config.bitClock.bclkPolarity =
624 (config.bitClock.bclkPolarity == kSAI_SampleOnFallingEdge)
625 ? kSAI_SampleOnRisingEdge
626 : kSAI_SampleOnFallingEdge;
627 break;
628 }
629
630 /* PCM short format always requires that WS be one BCLK cycle */
631 if ((i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) != I2S_FMT_DATA_FORMAT_PCM_SHORT) {
632 config.frameSync.frameSyncWidth = (uint8_t)word_size_bits;
633 }
634
635 if (dir == I2S_DIR_TX) {
636 memcpy(&dev_data->tx.cfg, i2s_cfg, sizeof(struct i2s_config));
637 LOG_DBG("tx slab free_list = 0x%x", (uint32_t)i2s_cfg->mem_slab->free_list);
638 LOG_DBG("tx slab num_blocks = %d", (uint32_t)i2s_cfg->mem_slab->info.num_blocks);
639 LOG_DBG("tx slab block_size = %d", (uint32_t)i2s_cfg->mem_slab->info.block_size);
640 LOG_DBG("tx slab buffer = 0x%x", (uint32_t)i2s_cfg->mem_slab->buffer);
641
642 /* set bit clock divider */
643 SAI_TxSetConfig(base, &config);
644 dev_data->tx.start_channel = config.startChannel;
645 /* Disable the channel FIFO */
646 base->TCR3 &= ~I2S_TCR3_TCE_MASK;
647 SAI_TxSetBitClockRate(base, mclk, i2s_cfg->frame_clk_freq, word_size_bits,
648 i2s_cfg->channels);
649 LOG_DBG("tx start_channel = %d", dev_data->tx.start_channel);
650 /*set up dma settings*/
651 dev_data->tx.dma_cfg.source_data_size = word_size_bits / 8;
652 dev_data->tx.dma_cfg.dest_data_size = word_size_bits / 8;
653 dev_data->tx.dma_cfg.source_burst_length = i2s_cfg->word_size / 8;
654 dev_data->tx.dma_cfg.dest_burst_length = i2s_cfg->word_size / 8;
655 dev_data->tx.dma_cfg.user_data = (void *)dev;
656 dev_data->tx.state = I2S_STATE_READY;
657 } else {
658 /* For RX, DMA reads from FIFO whenever data present */
659 config.fifo.fifoWatermark = 0;
660
661 memcpy(&dev_data->rx.cfg, i2s_cfg, sizeof(struct i2s_config));
662 LOG_DBG("rx slab free_list = 0x%x", (uint32_t)i2s_cfg->mem_slab->free_list);
663 LOG_DBG("rx slab num_blocks = %d", (uint32_t)i2s_cfg->mem_slab->info.num_blocks);
664 LOG_DBG("rx slab block_size = %d", (uint32_t)i2s_cfg->mem_slab->info.block_size);
665 LOG_DBG("rx slab buffer = 0x%x", (uint32_t)i2s_cfg->mem_slab->buffer);
666
667 /* set bit clock divider */
668 SAI_RxSetConfig(base, &config);
669 dev_data->rx.start_channel = config.startChannel;
670 SAI_RxSetBitClockRate(base, mclk, i2s_cfg->frame_clk_freq, word_size_bits,
671 i2s_cfg->channels);
672 LOG_DBG("rx start_channel = %d", dev_data->rx.start_channel);
673 /*set up dma settings*/
674 dev_data->rx.dma_cfg.source_data_size = word_size_bits / 8;
675 dev_data->rx.dma_cfg.dest_data_size = word_size_bits / 8;
676 dev_data->rx.dma_cfg.source_burst_length = i2s_cfg->word_size / 8;
677 dev_data->rx.dma_cfg.dest_burst_length = i2s_cfg->word_size / 8;
678 dev_data->rx.dma_cfg.user_data = (void *)dev;
679 dev_data->rx.state = I2S_STATE_READY;
680 }
681
682 return 0;
683 }
684
i2s_mcux_config_get(const struct device * dev,enum i2s_dir dir)685 const struct i2s_config *i2s_mcux_config_get(const struct device *dev, enum i2s_dir dir)
686 {
687 struct i2s_dev_data *dev_data = dev->data;
688
689 if (dir == I2S_DIR_RX) {
690 return &dev_data->rx.cfg;
691 }
692
693 return &dev_data->tx.cfg;
694 }
695
i2s_tx_stream_start(const struct device * dev)696 static int i2s_tx_stream_start(const struct device *dev)
697 {
698 int ret = 0;
699 void *buffer;
700 struct i2s_dev_data *dev_data = dev->data;
701 struct stream *strm = &dev_data->tx;
702 const struct device *dev_dma = dev_data->dev_dma;
703 const struct i2s_mcux_config *dev_cfg = dev->config;
704 I2S_Type *base = (I2S_Type *)dev_cfg->base;
705
706 /* retrieve buffer from input queue */
707 ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
708 if (ret != 0) {
709 LOG_ERR("No buffer in input queue to start");
710 return -EIO;
711 }
712
713 LOG_DBG("tx stream start");
714
715 /* Driver keeps track of how many DMA blocks can be loaded to the DMA */
716 strm->free_tx_dma_blocks = MAX_TX_DMA_BLOCKS;
717
718 /* Configure the DMA with the first TX block */
719 struct dma_block_config *blk_cfg = &strm->dma_block;
720
721 memset(blk_cfg, 0, sizeof(struct dma_block_config));
722
723 uint32_t data_path = strm->start_channel;
724
725 blk_cfg->dest_address = (uint32_t)&base->TDR[data_path];
726 blk_cfg->source_address = (uint32_t)buffer;
727 blk_cfg->block_size = strm->cfg.block_size;
728 blk_cfg->dest_scatter_en = 1;
729
730 strm->dma_cfg.block_count = 1;
731
732 strm->dma_cfg.head_block = &strm->dma_block;
733 strm->dma_cfg.user_data = (void *)dev;
734
735 (strm->free_tx_dma_blocks)--;
736 dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg);
737
738 /* put buffer in output queue */
739 ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT);
740 if (ret != 0) {
741 LOG_ERR("failed to put buffer in output queue");
742 return ret;
743 }
744
745 uint8_t blocks_queued;
746
747 ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
748 if (ret) {
749 LOG_ERR("i2s_tx_reload_multiple_dma_blocks() failed (%d)", ret);
750 return ret;
751 }
752
753 ret = dma_start(dev_dma, strm->dma_channel);
754 if (ret < 0) {
755 LOG_ERR("dma_start failed (%d)", ret);
756 return ret;
757 }
758
759 /* Enable DMA enable bit */
760 SAI_TxEnableDMA(base, kSAI_FIFORequestDMAEnable, true);
761
762 /* Enable the channel FIFO */
763 base->TCR3 |= I2S_TCR3_TCE(1UL << strm->start_channel);
764
765 /* Enable SAI Tx clock */
766 SAI_TxEnable(base, true);
767
768 return 0;
769 }
770
i2s_rx_stream_start(const struct device * dev)771 static int i2s_rx_stream_start(const struct device *dev)
772 {
773 int ret = 0;
774 void *buffer;
775 struct i2s_dev_data *dev_data = dev->data;
776 struct stream *strm = &dev_data->rx;
777 const struct device *dev_dma = dev_data->dev_dma;
778 const struct i2s_mcux_config *dev_cfg = dev->config;
779 I2S_Type *base = (I2S_Type *)dev_cfg->base;
780 uint8_t num_of_bufs;
781
782 num_of_bufs = k_mem_slab_num_free_get(strm->cfg.mem_slab);
783
784 /*
785 * Need at least NUM_DMA_BLOCKS_RX_PREP buffers on the RX memory slab
786 * for reliable DMA reception.
787 */
788 if (num_of_bufs < NUM_DMA_BLOCKS_RX_PREP) {
789 return -EINVAL;
790 }
791
792 /* allocate 1st receive buffer from SLAB */
793 ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT);
794 if (ret != 0) {
795 LOG_DBG("buffer alloc from mem_slab failed (%d)", ret);
796 return ret;
797 }
798
799 /* Configure DMA block */
800 struct dma_block_config *blk_cfg = &strm->dma_block;
801
802 memset(blk_cfg, 0, sizeof(struct dma_block_config));
803
804 uint32_t data_path = strm->start_channel;
805
806 blk_cfg->dest_address = (uint32_t)buffer;
807 blk_cfg->source_address = (uint32_t)&base->RDR[data_path];
808 blk_cfg->block_size = strm->cfg.block_size;
809
810 blk_cfg->source_gather_en = 1;
811
812 strm->dma_cfg.block_count = 1;
813 strm->dma_cfg.head_block = &strm->dma_block;
814 strm->dma_cfg.user_data = (void *)dev;
815
816 dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg);
817
818 /* put buffer in input queue */
819 ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
820 if (ret != 0) {
821 LOG_ERR("failed to put buffer in input queue, ret1 %d", ret);
822 return ret;
823 }
824
825 /* prep DMA for each of remaining (NUM_DMA_BLOCKS_RX_PREP-1) buffers */
826 for (int i = 0; i < NUM_DMA_BLOCKS_RX_PREP - 1; i++) {
827
828 /* allocate receive buffer from SLAB */
829 ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT);
830 if (ret != 0) {
831 LOG_ERR("buffer alloc from mem_slab failed (%d)", ret);
832 return ret;
833 }
834
835 ret = dma_reload(dev_dma, strm->dma_channel, (uint32_t)&base->RDR[data_path],
836 (uint32_t)buffer, blk_cfg->block_size);
837 if (ret != 0) {
838 LOG_ERR("dma_reload() failed with error 0x%x", ret);
839 return ret;
840 }
841
842 /* put buffer in input queue */
843 ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
844 if (ret != 0) {
845 LOG_ERR("failed to put buffer in input queue, ret2 %d", ret);
846 return ret;
847 }
848 }
849
850 LOG_DBG("Starting DMA Ch%u", strm->dma_channel);
851 ret = dma_start(dev_dma, strm->dma_channel);
852 if (ret < 0) {
853 LOG_ERR("Failed to start DMA Ch%d (%d)", strm->dma_channel, ret);
854 return ret;
855 }
856
857 /* Enable DMA enable bit */
858 SAI_RxEnableDMA(base, kSAI_FIFORequestDMAEnable, true);
859
860 /* Enable the channel FIFO */
861 base->RCR3 |= I2S_RCR3_RCE(1UL << strm->start_channel);
862
863 /* Enable SAI Rx clock */
864 SAI_RxEnable(base, true);
865
866 return 0;
867 }
868
i2s_mcux_trigger(const struct device * dev,enum i2s_dir dir,enum i2s_trigger_cmd cmd)869 static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd)
870 {
871 struct i2s_dev_data *dev_data = dev->data;
872 struct stream *strm;
873 unsigned int key;
874 int ret = 0;
875
876 if (dir == I2S_DIR_BOTH) {
877 return -ENOSYS;
878 }
879
880 strm = (dir == I2S_DIR_TX) ? &dev_data->tx : &dev_data->rx;
881
882 key = irq_lock();
883 switch (cmd) {
884 case I2S_TRIGGER_START:
885 if (strm->state != I2S_STATE_READY) {
886 LOG_ERR("START trigger: invalid state %u", strm->state);
887 ret = -EIO;
888 break;
889 }
890
891 if (dir == I2S_DIR_TX) {
892 ret = i2s_tx_stream_start(dev);
893 } else {
894 ret = i2s_rx_stream_start(dev);
895 }
896
897 if (ret < 0) {
898 LOG_DBG("START trigger failed %d", ret);
899 ret = -EIO;
900 break;
901 }
902
903 strm->state = I2S_STATE_RUNNING;
904 strm->last_block = false;
905 break;
906
907 case I2S_TRIGGER_DROP:
908 if (strm->state == I2S_STATE_NOT_READY) {
909 LOG_ERR("DROP trigger: invalid state %d", strm->state);
910 ret = -EIO;
911 break;
912 }
913
914 strm->state = I2S_STATE_READY;
915 if (dir == I2S_DIR_TX) {
916 i2s_tx_stream_disable(dev, true);
917 } else {
918 i2s_rx_stream_disable(dev, true, true);
919 }
920 break;
921
922 case I2S_TRIGGER_STOP:
923 if (strm->state != I2S_STATE_RUNNING) {
924 LOG_ERR("STOP trigger: invalid state %d", strm->state);
925 ret = -EIO;
926 break;
927 }
928
929 strm->state = I2S_STATE_STOPPING;
930 strm->last_block = true;
931 break;
932
933 case I2S_TRIGGER_DRAIN:
934 if (strm->state != I2S_STATE_RUNNING) {
935 LOG_ERR("DRAIN/STOP trigger: invalid state %d", strm->state);
936 ret = -EIO;
937 break;
938 }
939
940 strm->state = I2S_STATE_STOPPING;
941 break;
942
943 case I2S_TRIGGER_PREPARE:
944 if (strm->state != I2S_STATE_ERROR) {
945 LOG_ERR("PREPARE trigger: invalid state %d", strm->state);
946 ret = -EIO;
947 break;
948 }
949 strm->state = I2S_STATE_READY;
950 if (dir == I2S_DIR_TX) {
951 i2s_tx_stream_disable(dev, true);
952 } else {
953 i2s_rx_stream_disable(dev, true, true);
954 }
955 break;
956
957 default:
958 LOG_ERR("Unsupported trigger command");
959 ret = -EINVAL;
960 }
961
962 irq_unlock(key);
963 return ret;
964 }
965
i2s_mcux_read(const struct device * dev,void ** mem_block,size_t * size)966 static int i2s_mcux_read(const struct device *dev, void **mem_block, size_t *size)
967 {
968 struct i2s_dev_data *dev_data = dev->data;
969 struct stream *strm = &dev_data->rx;
970 void *buffer;
971 int status, ret = 0;
972
973 LOG_DBG("i2s_mcux_read");
974 if (strm->state == I2S_STATE_NOT_READY) {
975 LOG_ERR("invalid state %d", strm->state);
976 return -EIO;
977 }
978
979 status = k_msgq_get(&strm->out_queue, &buffer, SYS_TIMEOUT_MS(strm->cfg.timeout));
980 if (status != 0) {
981 if (strm->state == I2S_STATE_ERROR) {
982 ret = -EIO;
983 } else {
984 LOG_DBG("need retry");
985 ret = -EAGAIN;
986 }
987 return ret;
988 }
989
990 *mem_block = buffer;
991 *size = strm->cfg.block_size;
992 return 0;
993 }
994
i2s_mcux_write(const struct device * dev,void * mem_block,size_t size)995 static int i2s_mcux_write(const struct device *dev, void *mem_block, size_t size)
996 {
997 struct i2s_dev_data *dev_data = dev->data;
998 struct stream *strm = &dev_data->tx;
999 int ret;
1000
1001 LOG_DBG("i2s_mcux_write");
1002 if (strm->state != I2S_STATE_RUNNING && strm->state != I2S_STATE_READY) {
1003 LOG_ERR("invalid state (%d)", strm->state);
1004 return -EIO;
1005 }
1006
1007 ret = k_msgq_put(&strm->in_queue, &mem_block, SYS_TIMEOUT_MS(strm->cfg.timeout));
1008 if (ret) {
1009 LOG_DBG("k_msgq_put returned code %d", ret);
1010 return ret;
1011 }
1012
1013 return ret;
1014 }
1015
sai_driver_irq(const struct device * dev)1016 static void sai_driver_irq(const struct device *dev)
1017 {
1018 const struct i2s_mcux_config *dev_cfg = dev->config;
1019 I2S_Type *base = (I2S_Type *)dev_cfg->base;
1020
1021 if ((base->TCSR & I2S_TCSR_FEF_MASK) == I2S_TCSR_FEF_MASK) {
1022 /* Clear FIFO error flag to continue transfer */
1023 SAI_TxClearStatusFlags(base, I2S_TCSR_FEF_MASK);
1024
1025 /* Reset FIFO for safety */
1026 SAI_TxSoftwareReset(base, kSAI_ResetTypeFIFO);
1027
1028 LOG_DBG("sai tx error occurred");
1029 }
1030
1031 if ((base->RCSR & I2S_RCSR_FEF_MASK) == I2S_RCSR_FEF_MASK) {
1032 /* Clear FIFO error flag to continue transfer */
1033 SAI_RxClearStatusFlags(base, I2S_RCSR_FEF_MASK);
1034
1035 /* Reset FIFO for safety */
1036 SAI_RxSoftwareReset(base, kSAI_ResetTypeFIFO);
1037
1038 LOG_DBG("sai rx error occurred");
1039 }
1040 }
1041
1042 /* clear IRQ sources atm */
i2s_mcux_isr(void * arg)1043 static void i2s_mcux_isr(void *arg)
1044 {
1045 struct device *dev = (struct device *)arg;
1046 const struct i2s_mcux_config *dev_cfg = dev->config;
1047 I2S_Type *base = (I2S_Type *)dev_cfg->base;
1048
1049 if ((base->RCSR & I2S_TCSR_FEF_MASK) == I2S_TCSR_FEF_MASK) {
1050 sai_driver_irq(dev);
1051 }
1052
1053 if ((base->TCSR & I2S_RCSR_FEF_MASK) == I2S_RCSR_FEF_MASK) {
1054 sai_driver_irq(dev);
1055 }
1056 /*
1057 * Add for ARM errata 838869, affects Cortex-M4,
1058 * Cortex-M4F Store immediate overlapping exception return operation
1059 * might vector to incorrect interrupt
1060 */
1061 #if defined __CORTEX_M && (__CORTEX_M == 4U)
1062 barrier_dsync_fence_full();
1063 #endif
1064 }
1065
audio_clock_settings(const struct device * dev)1066 static void audio_clock_settings(const struct device *dev)
1067 {
1068 clock_audio_pll_config_t audioPllConfig;
1069 const struct i2s_mcux_config *dev_cfg = dev->config;
1070 uint32_t clock_name = (uint32_t)dev_cfg->clk_sub_sys;
1071
1072 /*Clock setting for SAI*/
1073 imxrt_audio_codec_pll_init(clock_name, dev_cfg->clk_src, dev_cfg->clk_pre_div,
1074 dev_cfg->clk_src_div);
1075
1076 #ifdef CONFIG_SOC_SERIES_IMXRT11XX
1077 audioPllConfig.loopDivider = dev_cfg->pll_lp;
1078 audioPllConfig.postDivider = dev_cfg->pll_pd;
1079 audioPllConfig.numerator = dev_cfg->pll_num;
1080 audioPllConfig.denominator = dev_cfg->pll_den;
1081 audioPllConfig.ssEnable = false;
1082 #elif defined CONFIG_SOC_SERIES_IMXRT10XX
1083 audioPllConfig.src = dev_cfg->pll_src;
1084 audioPllConfig.loopDivider = dev_cfg->pll_lp;
1085 audioPllConfig.postDivider = dev_cfg->pll_pd;
1086 audioPllConfig.numerator = dev_cfg->pll_num;
1087 audioPllConfig.denominator = dev_cfg->pll_den;
1088 #else
1089 #error Initialize SOC Series-specific clock_audio_pll_config_t
1090 #endif /* CONFIG_SOC_SERIES */
1091
1092 CLOCK_InitAudioPll(&audioPllConfig);
1093 }
1094
i2s_mcux_initialize(const struct device * dev)1095 static int i2s_mcux_initialize(const struct device *dev)
1096 {
1097 const struct i2s_mcux_config *dev_cfg = dev->config;
1098 I2S_Type *base = (I2S_Type *)dev_cfg->base;
1099 struct i2s_dev_data *dev_data = dev->data;
1100 uint32_t mclk;
1101 int err;
1102
1103 if (!dev_data->dev_dma) {
1104 LOG_ERR("DMA device not found");
1105 return -ENODEV;
1106 }
1107
1108 /* Initialize the buffer queues */
1109 k_msgq_init(&dev_data->tx.in_queue, (char *)dev_data->tx_in_msgs, sizeof(void *),
1110 CONFIG_I2S_TX_BLOCK_COUNT);
1111 k_msgq_init(&dev_data->rx.in_queue, (char *)dev_data->rx_in_msgs, sizeof(void *),
1112 CONFIG_I2S_RX_BLOCK_COUNT);
1113 k_msgq_init(&dev_data->tx.out_queue, (char *)dev_data->tx_out_msgs, sizeof(void *),
1114 CONFIG_I2S_TX_BLOCK_COUNT);
1115 k_msgq_init(&dev_data->rx.out_queue, (char *)dev_data->rx_out_msgs, sizeof(void *),
1116 CONFIG_I2S_RX_BLOCK_COUNT);
1117
1118 /* register ISR */
1119 dev_cfg->irq_connect(dev);
1120 /* pinctrl */
1121 err = pinctrl_apply_state(dev_cfg->pinctrl, PINCTRL_STATE_DEFAULT);
1122 if (err) {
1123 LOG_ERR("mclk pinctrl setup failed (%d)", err);
1124 return err;
1125 }
1126
1127 /*clock configuration*/
1128 audio_clock_settings(dev);
1129
1130 SAI_Init(base);
1131
1132 dev_data->tx.state = I2S_STATE_NOT_READY;
1133 dev_data->rx.state = I2S_STATE_NOT_READY;
1134
1135 #if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
1136 (defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
1137 sai_master_clock_t mclkConfig = {
1138 #if defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)
1139 .mclkOutputEnable = true,
1140 #if !(defined(FSL_FEATURE_SAI_HAS_NO_MCR_MICS) && (FSL_FEATURE_SAI_HAS_NO_MCR_MICS))
1141 .mclkSource = kSAI_MclkSourceSysclk,
1142 #endif
1143 #endif
1144 };
1145 #endif
1146
1147 get_mclk_rate(dev, &mclk);
1148 /* master clock configurations */
1149 #if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
1150 (defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
1151 #if defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER)
1152 mclkConfig.mclkHz = mclk;
1153 mclkConfig.mclkSourceClkHz = mclk;
1154 #endif
1155 SAI_SetMasterClockConfig(base, &mclkConfig);
1156 #endif
1157
1158 LOG_INF("Device %s initialized", dev->name);
1159
1160 return 0;
1161 }
1162
1163 static DEVICE_API(i2s, i2s_mcux_driver_api) = {
1164 .configure = i2s_mcux_config,
1165 .read = i2s_mcux_read,
1166 .write = i2s_mcux_write,
1167 .config_get = i2s_mcux_config_get,
1168 .trigger = i2s_mcux_trigger,
1169 };
1170
1171 #define I2S_MCUX_INIT(i2s_id) \
1172 static void i2s_irq_connect_##i2s_id(const struct device *dev); \
1173 \
1174 PINCTRL_DT_INST_DEFINE(i2s_id); \
1175 \
1176 static const struct i2s_mcux_config i2s_##i2s_id##_config = { \
1177 .base = (I2S_Type *)DT_INST_REG_ADDR(i2s_id), \
1178 .clk_src = DT_INST_PROP(i2s_id, clock_mux), \
1179 .clk_pre_div = DT_INST_PROP(i2s_id, pre_div), \
1180 .clk_src_div = DT_INST_PROP(i2s_id, podf), \
1181 .pll_src = DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), pll_clocks, src, value), \
1182 .pll_lp = DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), pll_clocks, lp, value), \
1183 .pll_pd = DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), pll_clocks, pd, value), \
1184 .pll_num = DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), pll_clocks, num, value), \
1185 .pll_den = DT_PHA_BY_NAME(DT_DRV_INST(i2s_id), pll_clocks, den, value), \
1186 .mclk_control_base = \
1187 (uint32_t *)DT_REG_ADDR(DT_PHANDLE(DT_DRV_INST(i2s_id), pinmuxes)), \
1188 .mclk_pin_mask = DT_PHA_BY_IDX(DT_DRV_INST(i2s_id), pinmuxes, 0, function), \
1189 .mclk_pin_offset = DT_PHA_BY_IDX(DT_DRV_INST(i2s_id), pinmuxes, 0, pin), \
1190 .clk_sub_sys = \
1191 (clock_control_subsys_t)DT_INST_CLOCKS_CELL_BY_IDX(i2s_id, 0, name), \
1192 .ccm_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(i2s_id)), \
1193 .irq_connect = i2s_irq_connect_##i2s_id, \
1194 .pinctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(i2s_id), \
1195 .tx_sync_mode = DT_INST_PROP(i2s_id, nxp_tx_sync_mode), \
1196 .rx_sync_mode = DT_INST_PROP(i2s_id, nxp_rx_sync_mode), \
1197 .tx_channel = DT_INST_PROP(i2s_id, nxp_tx_channel), \
1198 }; \
1199 \
1200 static struct i2s_dev_data i2s_##i2s_id##_data = { \
1201 .dev_dma = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(i2s_id, rx)), \
1202 .tx = \
1203 { \
1204 .dma_channel = DT_INST_PROP(i2s_id, nxp_tx_dma_channel), \
1205 .dma_cfg = \
1206 { \
1207 .source_burst_length = CONFIG_I2S_EDMA_BURST_SIZE, \
1208 .dest_burst_length = CONFIG_I2S_EDMA_BURST_SIZE, \
1209 .dma_callback = i2s_dma_tx_callback, \
1210 .complete_callback_en = 1, \
1211 .error_callback_dis = 1, \
1212 .block_count = 1, \
1213 .head_block = &i2s_##i2s_id##_data.tx.dma_block, \
1214 .channel_direction = MEMORY_TO_PERIPHERAL, \
1215 .dma_slot = DT_INST_DMAS_CELL_BY_NAME(i2s_id, tx, \
1216 source), \
1217 .cyclic = 1, \
1218 }, \
1219 }, \
1220 .rx = \
1221 { \
1222 .dma_channel = DT_INST_PROP(i2s_id, nxp_rx_dma_channel), \
1223 .dma_cfg = \
1224 { \
1225 .source_burst_length = CONFIG_I2S_EDMA_BURST_SIZE, \
1226 .dest_burst_length = CONFIG_I2S_EDMA_BURST_SIZE, \
1227 .dma_callback = i2s_dma_rx_callback, \
1228 .complete_callback_en = 1, \
1229 .error_callback_dis = 1, \
1230 .block_count = 1, \
1231 .head_block = &i2s_##i2s_id##_data.rx.dma_block, \
1232 .channel_direction = PERIPHERAL_TO_MEMORY, \
1233 .dma_slot = DT_INST_DMAS_CELL_BY_NAME(i2s_id, rx, \
1234 source), \
1235 .cyclic = 1, \
1236 }, \
1237 }, \
1238 }; \
1239 \
1240 DEVICE_DT_INST_DEFINE(i2s_id, &i2s_mcux_initialize, NULL, &i2s_##i2s_id##_data, \
1241 &i2s_##i2s_id##_config, POST_KERNEL, CONFIG_I2S_INIT_PRIORITY, \
1242 &i2s_mcux_driver_api); \
1243 \
1244 static void i2s_irq_connect_##i2s_id(const struct device *dev) \
1245 { \
1246 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(i2s_id, 0, irq), \
1247 DT_INST_IRQ_BY_IDX(i2s_id, 0, priority), i2s_mcux_isr, \
1248 DEVICE_DT_INST_GET(i2s_id), 0); \
1249 irq_enable(DT_INST_IRQN(i2s_id)); \
1250 }
1251
1252 DT_INST_FOREACH_STATUS_OKAY(I2S_MCUX_INIT)
1253