1 /*
2 * Copyright 2021,2023-2024 NXP Semiconductor INC.
3 * All rights reserved.
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /** @file
9 * @brief I2S bus (SAI) driver for NXP i.MX RT series.
10 */
11
12 #define DT_DRV_COMPAT nxp_mcux_i2s
13
14 #include <errno.h>
15 #include <string.h>
16 #include <zephyr/sys/__assert.h>
17 #include <zephyr/kernel.h>
18 #include <zephyr/device.h>
19 #include <zephyr/init.h>
20 #include <zephyr/drivers/dma.h>
21 #include <zephyr/drivers/i2s.h>
22 #include <zephyr/drivers/pinctrl.h>
23 #include <zephyr/drivers/clock_control.h>
24 #include <zephyr/dt-bindings/clock/imx_ccm.h>
25 #include <zephyr/sys/barrier.h>
26 #include <zephyr/device.h>
27 #include <soc.h>
28
29 #include <fsl_sai.h>
30 #include <fsl_edma.h>
31
32 #include <zephyr/logging/log.h>
33 LOG_MODULE_REGISTER(dev_i2s_mcux, CONFIG_I2S_LOG_LEVEL);
34
35 #define NUM_DMA_BLOCKS_RX_PREP 3
36 #if defined(CONFIG_DMA_MCUX_EDMA)
37 BUILD_ASSERT(NUM_DMA_BLOCKS_RX_PREP >= 3,
38 "eDMA avoids TCD coherency issue if NUM_DMA_BLOCKS_RX_PREP >= 3");
39 #endif /* CONFIG_DMA_MCUX_EDMA */
40
41 #define MAX_TX_DMA_BLOCKS CONFIG_DMA_TCD_QUEUE_SIZE
42 BUILD_ASSERT(MAX_TX_DMA_BLOCKS > NUM_DMA_BLOCKS_RX_PREP,
43 "NUM_DMA_BLOCKS_RX_PREP must be < CONFIG_DMA_TCD_QUEUE_SIZE");
44
45 #define SAI_WORD_SIZE_BITS_MIN 8
46 #define SAI_WORD_SIZE_BITS_MAX 32
47
48 #define SAI_WORD_PER_FRAME_MIN 0
49 #define SAI_WORD_PER_FRAME_MAX 32
50
51 /*
52 * SAI driver uses source_gather_en/dest_scatter_en feature of DMA, and relies
53 * on DMA driver managing circular list of DMA blocks. Like eDMA driver links
54 * Transfer Control Descriptors (TCDs) in list, and manages the tcdpool.
55 * Calling dma_reload() adds new DMA block to DMA channel already configured,
56 * into the DMA driver's circular list of blocks.
57
58 * This indicates the Tx/Rx stream.
59 *
60 * in_queue and out_queue are used as follows
61 * transmit stream:
62 * application provided buffer is queued to in_queue until loaded to DMA.
63 * when DMA channel is idle, buffer is retrieved from in_queue and loaded
64 * to DMA and queued to out_queue. when DMA completes, buffer is retrieved
65 * from out_queue and freed.
66 *
67 * receive stream:
68 * driver allocates buffer from slab and loads DMA buffer is queued to
69 * in_queue when DMA completes, buffer is retrieved from in_queue
70 * and queued to out_queue when application reads, buffer is read
71 * (may optionally block) from out_queue and presented to application.
72 */
73 struct stream {
74 enum i2s_state state;
75 uint32_t dma_channel;
76 uint32_t start_channel;
77 void (*irq_call_back)(void);
78 struct i2s_config cfg;
79 struct dma_config dma_cfg;
80 struct dma_block_config dma_block;
81 uint8_t free_tx_dma_blocks;
82 bool last_block;
83 struct k_msgq in_queue;
84 struct k_msgq out_queue;
85 };
86
87 struct i2s_mcux_config {
88 I2S_Type *base;
89 uint32_t clk_src;
90 uint32_t clk_pre_div;
91 uint32_t clk_src_div;
92 uint32_t pll_src;
93 uint32_t pll_lp;
94 uint32_t pll_pd;
95 uint32_t pll_num;
96 uint32_t pll_den;
97 uint32_t mclk_control_base;
98 uint32_t mclk_pin_mask;
99 uint32_t mclk_pin_offset;
100 uint32_t tx_channel;
101 clock_control_subsys_t clk_sub_sys;
102 const struct device *ccm_dev;
103 const struct pinctrl_dev_config *pinctrl;
104 void (*irq_connect)(const struct device *dev);
105 sai_sync_mode_t rx_sync_mode;
106 sai_sync_mode_t tx_sync_mode;
107 };
108
109 /* Device run time data */
110 struct i2s_dev_data {
111 const struct device *dev_dma;
112 struct stream tx;
113 void *tx_in_msgs[CONFIG_I2S_TX_BLOCK_COUNT];
114 void *tx_out_msgs[CONFIG_I2S_TX_BLOCK_COUNT];
115 struct stream rx;
116 void *rx_in_msgs[CONFIG_I2S_RX_BLOCK_COUNT];
117 void *rx_out_msgs[CONFIG_I2S_RX_BLOCK_COUNT];
118 };
119
i2s_purge_stream_buffers(struct stream * strm,struct k_mem_slab * mem_slab,bool in_drop,bool out_drop)120 static void i2s_purge_stream_buffers(struct stream *strm, struct k_mem_slab *mem_slab, bool in_drop,
121 bool out_drop)
122 {
123 void *buffer;
124
125 if (in_drop) {
126 while (k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT) == 0) {
127 k_mem_slab_free(mem_slab, buffer);
128 }
129 }
130
131 if (out_drop) {
132 while (k_msgq_get(&strm->out_queue, &buffer, K_NO_WAIT) == 0) {
133 k_mem_slab_free(mem_slab, buffer);
134 }
135 }
136 }
137
i2s_tx_stream_disable(const struct device * dev,bool drop)138 static void i2s_tx_stream_disable(const struct device *dev, bool drop)
139 {
140 struct i2s_dev_data *dev_data = dev->data;
141 struct stream *strm = &dev_data->tx;
142 const struct device *dev_dma = dev_data->dev_dma;
143 const struct i2s_mcux_config *dev_cfg = dev->config;
144
145 LOG_DBG("Stopping DMA channel %u for TX stream", strm->dma_channel);
146
147 /* Disable FIFO DMA request */
148 SAI_TxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, false);
149
150 dma_stop(dev_dma, strm->dma_channel);
151
152 /* wait for TX FIFO to drain before disabling */
153 while ((dev_cfg->base->TCSR & I2S_TCSR_FWF_MASK) == 0) {
154 ;
155 }
156
157 /* Disable the channel FIFO */
158 dev_cfg->base->TCR3 &= ~I2S_TCR3_TCE_MASK;
159
160 /* Disable Tx */
161 SAI_TxEnable(dev_cfg->base, false);
162
163 /* If Tx is disabled, reset the FIFO pointer, clear error flags */
164 if ((dev_cfg->base->TCSR & I2S_TCSR_TE_MASK) == 0UL) {
165 dev_cfg->base->TCSR |= (I2S_TCSR_FR_MASK | I2S_TCSR_SR_MASK);
166 dev_cfg->base->TCSR &= ~I2S_TCSR_SR_MASK;
167 }
168
169 /* purge buffers queued in the stream */
170 i2s_purge_stream_buffers(strm, dev_data->tx.cfg.mem_slab, drop, drop);
171 }
172
i2s_rx_stream_disable(const struct device * dev,bool in_drop,bool out_drop)173 static void i2s_rx_stream_disable(const struct device *dev, bool in_drop, bool out_drop)
174 {
175 struct i2s_dev_data *dev_data = dev->data;
176 struct stream *strm = &dev_data->rx;
177 const struct device *dev_dma = dev_data->dev_dma;
178 const struct i2s_mcux_config *dev_cfg = dev->config;
179
180 LOG_DBG("Stopping RX stream & DMA channel %u", strm->dma_channel);
181 dma_stop(dev_dma, strm->dma_channel);
182
183 /* Disable the channel FIFO */
184 dev_cfg->base->RCR3 &= ~I2S_RCR3_RCE_MASK;
185
186 /* Disable DMA enable bit */
187 SAI_RxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, false);
188
189 /* Disable Rx */
190 SAI_RxEnable(dev_cfg->base, false);
191
192 /* wait for Receiver to disable */
193 while (dev_cfg->base->RCSR & I2S_RCSR_RE_MASK) {
194 ;
195 }
196 /* reset the FIFO pointer and clear error flags */
197 dev_cfg->base->RCSR |= (I2S_RCSR_FR_MASK | I2S_RCSR_SR_MASK);
198 dev_cfg->base->RCSR &= ~I2S_RCSR_SR_MASK;
199
200 /* purge buffers queued in the stream */
201 i2s_purge_stream_buffers(strm, dev_data->rx.cfg.mem_slab, in_drop, out_drop);
202 }
203
i2s_tx_reload_multiple_dma_blocks(const struct device * dev,uint8_t * blocks_queued)204 static int i2s_tx_reload_multiple_dma_blocks(const struct device *dev, uint8_t *blocks_queued)
205 {
206 struct i2s_dev_data *dev_data = dev->data;
207 const struct i2s_mcux_config *dev_cfg = dev->config;
208 I2S_Type *base = (I2S_Type *)dev_cfg->base;
209 struct stream *strm = &dev_data->tx;
210 void *buffer = NULL;
211 int ret = 0;
212 unsigned int key;
213
214 *blocks_queued = 0;
215
216 key = irq_lock();
217
218 /* queue additional blocks to DMA if in_queue and DMA has free blocks */
219 while (strm->free_tx_dma_blocks) {
220 /* get the next buffer from queue */
221 ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
222 if (ret) {
223 /* in_queue is empty, no more blocks to send to DMA */
224 ret = 0;
225 break;
226 }
227
228 /* reload the DMA */
229 ret = dma_reload(dev_data->dev_dma, strm->dma_channel, (uint32_t)buffer,
230 (uint32_t)&base->TDR[strm->start_channel], strm->cfg.block_size);
231 if (ret != 0) {
232 LOG_ERR("dma_reload() failed with error 0x%x", ret);
233 break;
234 }
235
236 (strm->free_tx_dma_blocks)--;
237
238 ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT);
239 if (ret != 0) {
240 LOG_ERR("buffer %p -> out %p err %d", buffer, &strm->out_queue, ret);
241 break;
242 }
243
244 (*blocks_queued)++;
245 }
246
247 irq_unlock(key);
248 return ret;
249 }
250
251 /* This function is executed in the interrupt context */
i2s_dma_tx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)252 static void i2s_dma_tx_callback(const struct device *dma_dev, void *arg, uint32_t channel,
253 int status)
254 {
255 const struct device *dev = (struct device *)arg;
256 struct i2s_dev_data *dev_data = dev->data;
257 struct stream *strm = &dev_data->tx;
258 uint8_t blocks_queued;
259 void *buffer = NULL;
260 int ret;
261
262 LOG_DBG("tx cb");
263
264 ret = k_msgq_get(&strm->out_queue, &buffer, K_NO_WAIT);
265 if (ret == 0) {
266 /* transmission complete. free the buffer */
267 k_mem_slab_free(strm->cfg.mem_slab, buffer);
268 (strm->free_tx_dma_blocks)++;
269 } else {
270 LOG_ERR("no buf in out_queue for channel %u", channel);
271 }
272
273 if (strm->free_tx_dma_blocks > MAX_TX_DMA_BLOCKS) {
274 strm->state = I2S_STATE_ERROR;
275 LOG_ERR("free_tx_dma_blocks exceeded maximum, now %d", strm->free_tx_dma_blocks);
276 goto disabled_exit_no_drop;
277 }
278
279 /* Received a STOP trigger, terminate TX immediately */
280 if (strm->last_block) {
281 strm->state = I2S_STATE_READY;
282 LOG_DBG("TX STOPPED last_block set");
283 goto disabled_exit_no_drop;
284 }
285
286 if (ret) {
287 /* k_msgq_get() returned error, and was not last_block */
288 strm->state = I2S_STATE_ERROR;
289 goto disabled_exit_no_drop;
290 }
291
292 if (strm->state != I2S_STATE_RUNNING && strm->state != I2S_STATE_STOPPING) {
293 goto disabled_exit_drop;
294 }
295
296 ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
297 if (ret) {
298 strm->state = I2S_STATE_ERROR;
299 goto disabled_exit_no_drop;
300 }
301
302 if (blocks_queued || (strm->free_tx_dma_blocks < MAX_TX_DMA_BLOCKS)) {
303 goto enabled_exit;
304 }
305
306 /* all DMA blocks are free but no blocks were queued */
307 if (strm->state == I2S_STATE_STOPPING) {
308 /* TX queue has drained */
309 strm->state = I2S_STATE_READY;
310 LOG_DBG("TX stream has stopped");
311 goto disabled_exit_no_drop;
312 }
313
314 LOG_WRN("TX input queue empty!");
315 if (strm->free_tx_dma_blocks >= MAX_TX_DMA_BLOCKS) {
316 /* In running state, no TX blocks for transferring, so stop
317 * TX (This will disable bit clock to avoid dummy bits
318 * received in RX side.
319 */
320 const struct i2s_mcux_config *dev_cfg = dev->config;
321 I2S_Type *base = (I2S_Type *)dev_cfg->base;
322
323 SAI_TxEnable(base, false);
324 LOG_WRN("TX is paused.");
325 }
326 goto enabled_exit;
327
328
329 disabled_exit_no_drop:
330 i2s_tx_stream_disable(dev, false);
331 return;
332
333 disabled_exit_drop:
334 i2s_tx_stream_disable(dev, true);
335 return;
336
337 enabled_exit:
338 return;
339 }
340
i2s_dma_rx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)341 static void i2s_dma_rx_callback(const struct device *dma_dev, void *arg, uint32_t channel,
342 int status)
343 {
344 struct device *dev = (struct device *)arg;
345 const struct i2s_mcux_config *dev_cfg = dev->config;
346 I2S_Type *base = (I2S_Type *)dev_cfg->base;
347 struct i2s_dev_data *dev_data = dev->data;
348 struct stream *strm = &dev_data->rx;
349 void *buffer;
350 int ret;
351
352 LOG_DBG("RX cb");
353
354 if (strm->state == I2S_STATE_ERROR) {
355 i2s_rx_stream_disable(dev, true, true);
356 }
357
358 if (strm->state != I2S_STATE_STOPPING && strm->state != I2S_STATE_RUNNING) {
359 return;
360 }
361
362 /* retrieve buffer from input queue */
363 ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
364 __ASSERT_NO_MSG(ret == 0);
365
366 /* put buffer to output queue */
367 ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT);
368 if (ret != 0) {
369 LOG_ERR("buffer %p -> out_queue %p err %d", buffer, &strm->out_queue, ret);
370 goto error;
371 }
372
373 if (strm->state == I2S_STATE_STOPPING) {
374 i2s_rx_stream_disable(dev, true, false);
375 /* Received a STOP/DRAIN trigger */
376 strm->state = I2S_STATE_READY;
377 return;
378 }
379
380 /* Now the only possible case is the running state */
381
382 /* allocate new buffer for next audio frame */
383 ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT);
384 if (ret != 0) {
385 LOG_ERR("buffer alloc from slab %p err %d", strm->cfg.mem_slab, ret);
386 goto error;
387 }
388
389 uint32_t data_path = strm->start_channel;
390
391 ret = dma_reload(dev_data->dev_dma, strm->dma_channel,
392 (uint32_t)&base->RDR[data_path], (uint32_t)buffer,
393 strm->cfg.block_size);
394 if (ret != 0) {
395 LOG_ERR("dma_reload() failed with error 0x%x", ret);
396 goto error;
397 }
398
399 /* put buffer in input queue */
400 ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
401 if (ret != 0) {
402 LOG_ERR("%p -> in_queue %p err %d", buffer, &strm->in_queue, ret);
403 }
404
405 return;
406
407 error:
408 i2s_rx_stream_disable(dev, false, false);
409 strm->state = I2S_STATE_ERROR;
410 }
411
enable_mclk_direction(const struct device * dev,bool dir)412 static void enable_mclk_direction(const struct device *dev, bool dir)
413 {
414 const struct i2s_mcux_config *dev_cfg = dev->config;
415 uint32_t control_base = dev_cfg->mclk_control_base;
416 uint32_t offset = dev_cfg->mclk_pin_offset;
417 uint32_t mask = dev_cfg->mclk_pin_mask;
418 uint32_t *base = (uint32_t *)(control_base + offset);
419
420 if (dir) {
421 *base |= mask;
422 } else {
423 *base &= ~mask;
424 }
425 }
426
get_mclk_rate(const struct device * dev,uint32_t * mclk)427 static void get_mclk_rate(const struct device *dev, uint32_t *mclk)
428 {
429 const struct i2s_mcux_config *dev_cfg = dev->config;
430 const struct device *ccm_dev = dev_cfg->ccm_dev;
431 clock_control_subsys_t clk_sub_sys = dev_cfg->clk_sub_sys;
432 uint32_t rate = 0;
433
434 if (device_is_ready(ccm_dev)) {
435 clock_control_get_rate(ccm_dev, clk_sub_sys, &rate);
436 } else {
437 LOG_ERR("CCM driver is not installed");
438 *mclk = rate;
439 return;
440 }
441 *mclk = rate;
442 }
443
i2s_mcux_config(const struct device * dev,enum i2s_dir dir,const struct i2s_config * i2s_cfg)444 static int i2s_mcux_config(const struct device *dev, enum i2s_dir dir,
445 const struct i2s_config *i2s_cfg)
446 {
447 const struct i2s_mcux_config *dev_cfg = dev->config;
448 I2S_Type *base = (I2S_Type *)dev_cfg->base;
449 struct i2s_dev_data *dev_data = dev->data;
450 enum i2s_state *tx_state = &(dev_data->tx.state);
451 enum i2s_state *rx_state = &(dev_data->rx.state);
452 uint8_t word_size_bits = i2s_cfg->word_size;
453 uint8_t word_size_bytes = word_size_bits / 8;
454 uint8_t num_words = i2s_cfg->channels;
455 sai_transceiver_t config;
456 int ret = -EINVAL;
457 uint32_t mclk;
458
459 if ((dev_data->tx.state != I2S_STATE_NOT_READY) &&
460 (dev_data->tx.state != I2S_STATE_READY) &&
461 (dev_data->rx.state != I2S_STATE_NOT_READY) &&
462 (dev_data->rx.state != I2S_STATE_READY)) {
463 LOG_ERR("invalid state tx(%u) rx(%u)", dev_data->tx.state, dev_data->rx.state);
464 goto invalid_config;
465 }
466
467 if (i2s_cfg->frame_clk_freq == 0U) {
468 LOG_ERR("Invalid frame_clk_freq %u", i2s_cfg->frame_clk_freq);
469 goto invalid_config;
470 }
471
472 if (word_size_bits < SAI_WORD_SIZE_BITS_MIN || word_size_bits > SAI_WORD_SIZE_BITS_MAX) {
473 LOG_ERR("Unsupported I2S word size %u", word_size_bits);
474 goto invalid_config;
475 }
476
477 if (num_words < SAI_WORD_PER_FRAME_MIN || num_words > SAI_WORD_PER_FRAME_MAX) {
478 LOG_ERR("Unsupported words length %u", num_words);
479 goto invalid_config;
480 }
481
482 if ((i2s_cfg->options & I2S_OPT_PINGPONG) == I2S_OPT_PINGPONG) {
483 LOG_ERR("Ping-pong mode not supported");
484 ret = -ENOTSUP;
485 goto invalid_config;
486 }
487
488 memset(&config, 0, sizeof(config));
489
490 const bool is_mclk_slave = i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE;
491
492 enable_mclk_direction(dev, !is_mclk_slave);
493
494 get_mclk_rate(dev, &mclk);
495 LOG_DBG("mclk is %d", mclk);
496
497 /* bit clock source is MCLK */
498 config.bitClock.bclkSource = kSAI_BclkSourceMclkDiv;
499 /*
500 * additional settings for bclk
501 * read the SDK header file for more details
502 */
503 config.bitClock.bclkInputDelay = false;
504
505 /* frame sync default configurations */
506 #if defined(FSL_FEATURE_SAI_HAS_ON_DEMAND_MODE) && FSL_FEATURE_SAI_HAS_ON_DEMAND_MODE
507 config.frameSync.frameSyncGenerateOnDemand = false;
508 #endif
509
510 /* serial data default configurations */
511 #if defined(FSL_FEATURE_SAI_HAS_CHANNEL_MODE) && FSL_FEATURE_SAI_HAS_CHANNEL_MODE
512 config.serialData.dataMode = kSAI_DataPinStateOutputZero;
513 #endif
514
515 config.frameSync.frameSyncPolarity = kSAI_PolarityActiveLow;
516 config.bitClock.bclkSrcSwap = false;
517 /* format */
518 switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) {
519 case I2S_FMT_DATA_FORMAT_I2S:
520 SAI_GetClassicI2SConfig(&config, word_size_bits, kSAI_Stereo, dev_cfg->tx_channel);
521 break;
522 case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
523 SAI_GetLeftJustifiedConfig(&config, word_size_bits, kSAI_Stereo,
524 dev_cfg->tx_channel);
525 break;
526 case I2S_FMT_DATA_FORMAT_PCM_SHORT:
527 SAI_GetDSPConfig(&config, kSAI_FrameSyncLenOneBitClk, word_size_bits, kSAI_Stereo,
528 dev_cfg->tx_channel);
529 /* We need to set the data word count manually, since the HAL
530 * function does not
531 */
532 config.serialData.dataWordNum = num_words;
533 config.frameSync.frameSyncEarly = true;
534 config.bitClock.bclkPolarity = kSAI_SampleOnFallingEdge;
535 break;
536 case I2S_FMT_DATA_FORMAT_PCM_LONG:
537 SAI_GetTDMConfig(&config, kSAI_FrameSyncLenPerWordWidth, word_size_bits, num_words,
538 dev_cfg->tx_channel);
539 config.bitClock.bclkPolarity = kSAI_SampleOnFallingEdge;
540 break;
541 default:
542 LOG_ERR("Unsupported I2S data format");
543 ret = -EINVAL;
544 goto invalid_config;
545 }
546
547 /* sync mode configurations */
548 if (dir == I2S_DIR_TX) {
549 config.syncMode = dev_cfg->tx_sync_mode;
550 } else if (dir == I2S_DIR_RX) {
551 config.syncMode = dev_cfg->rx_sync_mode;
552 }
553
554 bool frame_clk_slave = i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE;
555 bool bit_clk_slave = i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE;
556
557 if (frame_clk_slave && bit_clk_slave) {
558 config.masterSlave = kSAI_Slave;
559 } else if (frame_clk_slave && !bit_clk_slave) {
560 config.masterSlave = kSAI_Bclk_Master_FrameSync_Slave;
561 } else if (!frame_clk_slave && bit_clk_slave) {
562 config.masterSlave = kSAI_Bclk_Slave_FrameSync_Master;
563 } else {
564 config.masterSlave = kSAI_Master;
565 }
566
567 /* clock signal polarity */
568 switch (i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) {
569 case I2S_FMT_CLK_NF_NB:
570 /* No action required, leave the configuration untouched */
571 break;
572
573 case I2S_FMT_CLK_NF_IB:
574 /* Swap bclk polarity */
575 config.bitClock.bclkPolarity =
576 (config.bitClock.bclkPolarity == kSAI_SampleOnFallingEdge)
577 ? kSAI_SampleOnRisingEdge
578 : kSAI_SampleOnFallingEdge;
579 break;
580 case I2S_FMT_CLK_IF_NB:
581 /* Swap frame sync polarity */
582 config.frameSync.frameSyncPolarity =
583 (config.frameSync.frameSyncPolarity == kSAI_PolarityActiveHigh)
584 ? kSAI_PolarityActiveLow
585 : kSAI_PolarityActiveHigh;
586 break;
587 case I2S_FMT_CLK_IF_IB:
588 /* Swap frame sync and bclk polarity */
589 config.frameSync.frameSyncPolarity =
590 (config.frameSync.frameSyncPolarity == kSAI_PolarityActiveHigh)
591 ? kSAI_PolarityActiveLow
592 : kSAI_PolarityActiveHigh;
593 config.bitClock.bclkPolarity =
594 (config.bitClock.bclkPolarity == kSAI_SampleOnFallingEdge)
595 ? kSAI_SampleOnRisingEdge
596 : kSAI_SampleOnFallingEdge;
597 break;
598 }
599
600 /* PCM short format always requires that WS be one BCLK cycle */
601 if ((i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) != I2S_FMT_DATA_FORMAT_PCM_SHORT) {
602 config.frameSync.frameSyncWidth = (uint8_t)word_size_bits;
603 }
604
605 if (dir == I2S_DIR_TX) {
606 memcpy(&dev_data->tx.cfg, i2s_cfg, sizeof(struct i2s_config));
607 LOG_DBG("tx slab free_list = 0x%x", (uint32_t)i2s_cfg->mem_slab->free_list);
608 LOG_DBG("tx slab num_blocks = %d", (uint32_t)i2s_cfg->mem_slab->info.num_blocks);
609 LOG_DBG("tx slab block_size = %d", (uint32_t)i2s_cfg->mem_slab->info.block_size);
610 LOG_DBG("tx slab buffer = 0x%x", (uint32_t)i2s_cfg->mem_slab->buffer);
611
612 config.fifo.fifoWatermark = (uint32_t)FSL_FEATURE_SAI_FIFO_COUNTn(base) - 1;
613 /* set bit clock divider */
614 SAI_TxSetConfig(base, &config);
615 dev_data->tx.start_channel = config.startChannel;
616 /* Disable the channel FIFO */
617 base->TCR3 &= ~I2S_TCR3_TCE_MASK;
618 SAI_TxSetBitClockRate(base, mclk, i2s_cfg->frame_clk_freq, word_size_bits,
619 i2s_cfg->channels);
620 LOG_DBG("tx start_channel = %d", dev_data->tx.start_channel);
621 /*set up dma settings*/
622 dev_data->tx.dma_cfg.source_data_size = word_size_bytes;
623 dev_data->tx.dma_cfg.dest_data_size = word_size_bytes;
624 dev_data->tx.dma_cfg.source_burst_length = word_size_bytes;
625 dev_data->tx.dma_cfg.dest_burst_length = word_size_bytes;
626 dev_data->tx.dma_cfg.user_data = (void *)dev;
627 dev_data->tx.state = I2S_STATE_READY;
628 } else {
629 /* For RX, DMA reads from FIFO whenever data present */
630 config.fifo.fifoWatermark = 0;
631
632 memcpy(&dev_data->rx.cfg, i2s_cfg, sizeof(struct i2s_config));
633 LOG_DBG("rx slab free_list = 0x%x", (uint32_t)i2s_cfg->mem_slab->free_list);
634 LOG_DBG("rx slab num_blocks = %d", (uint32_t)i2s_cfg->mem_slab->info.num_blocks);
635 LOG_DBG("rx slab block_size = %d", (uint32_t)i2s_cfg->mem_slab->info.block_size);
636 LOG_DBG("rx slab buffer = 0x%x", (uint32_t)i2s_cfg->mem_slab->buffer);
637
638 /* set bit clock divider */
639 SAI_RxSetConfig(base, &config);
640 dev_data->rx.start_channel = config.startChannel;
641 SAI_RxSetBitClockRate(base, mclk, i2s_cfg->frame_clk_freq, word_size_bits,
642 i2s_cfg->channels);
643 LOG_DBG("rx start_channel = %d", dev_data->rx.start_channel);
644 /*set up dma settings*/
645 dev_data->rx.dma_cfg.source_data_size = word_size_bytes;
646 dev_data->rx.dma_cfg.dest_data_size = word_size_bytes;
647 dev_data->rx.dma_cfg.source_burst_length = word_size_bytes;
648 dev_data->rx.dma_cfg.dest_burst_length = word_size_bytes;
649 dev_data->rx.dma_cfg.user_data = (void *)dev;
650 dev_data->rx.state = I2S_STATE_READY;
651 }
652
653 return 0;
654
655 invalid_config:
656 if (dir == I2S_DIR_TX) {
657 *tx_state = I2S_STATE_NOT_READY;
658 } else if (dir == I2S_DIR_RX) {
659 *rx_state = I2S_STATE_NOT_READY;
660 }
661 return ret;
662 }
663
i2s_mcux_config_get(const struct device * dev,enum i2s_dir dir)664 const struct i2s_config *i2s_mcux_config_get(const struct device *dev, enum i2s_dir dir)
665 {
666 struct i2s_dev_data *dev_data = dev->data;
667
668 if (dir == I2S_DIR_RX) {
669 return &dev_data->rx.cfg;
670 }
671
672 return &dev_data->tx.cfg;
673 }
674
i2s_tx_stream_start(const struct device * dev)675 static int i2s_tx_stream_start(const struct device *dev)
676 {
677 int ret = 0;
678 void *buffer;
679 struct i2s_dev_data *dev_data = dev->data;
680 struct stream *strm = &dev_data->tx;
681 const struct device *dev_dma = dev_data->dev_dma;
682 const struct i2s_mcux_config *dev_cfg = dev->config;
683 I2S_Type *base = (I2S_Type *)dev_cfg->base;
684
685 /* retrieve buffer from input queue */
686 ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
687 if (ret != 0) {
688 LOG_ERR("No buffer in input queue to start");
689 return -EIO;
690 }
691
692 LOG_DBG("tx stream start");
693
694 /* Driver keeps track of how many DMA blocks can be loaded to the DMA */
695 strm->free_tx_dma_blocks = MAX_TX_DMA_BLOCKS;
696
697 /* Configure the DMA with the first TX block */
698 struct dma_block_config *blk_cfg = &strm->dma_block;
699
700 memset(blk_cfg, 0, sizeof(struct dma_block_config));
701
702 uint32_t data_path = strm->start_channel;
703
704 blk_cfg->dest_address = (uint32_t)&base->TDR[data_path];
705 blk_cfg->source_address = (uint32_t)buffer;
706 blk_cfg->block_size = strm->cfg.block_size;
707 blk_cfg->dest_scatter_en = 1;
708
709 strm->dma_cfg.block_count = 1;
710
711 strm->dma_cfg.head_block = &strm->dma_block;
712 strm->dma_cfg.user_data = (void *)dev;
713
714 (strm->free_tx_dma_blocks)--;
715 dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg);
716
717 /* put buffer in output queue */
718 ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT);
719 if (ret != 0) {
720 LOG_ERR("failed to put buffer in output queue");
721 return ret;
722 }
723
724 uint8_t blocks_queued;
725
726 ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
727 if (ret) {
728 LOG_ERR("i2s_tx_reload_multiple_dma_blocks() failed (%d)", ret);
729 return ret;
730 }
731
732 ret = dma_start(dev_dma, strm->dma_channel);
733 if (ret < 0) {
734 LOG_ERR("dma_start failed (%d)", ret);
735 return ret;
736 }
737
738 /* Enable DMA enable bit */
739 SAI_TxEnableDMA(base, kSAI_FIFORequestDMAEnable, true);
740
741 /* Enable the channel FIFO */
742 base->TCR3 |= I2S_TCR3_TCE(1UL << strm->start_channel);
743
744 /* Enable SAI Tx clock */
745 SAI_TxEnable(base, true);
746
747 return 0;
748 }
749
i2s_rx_stream_start(const struct device * dev)750 static int i2s_rx_stream_start(const struct device *dev)
751 {
752 int ret = 0;
753 void *buffer;
754 struct i2s_dev_data *dev_data = dev->data;
755 struct stream *strm = &dev_data->rx;
756 const struct device *dev_dma = dev_data->dev_dma;
757 const struct i2s_mcux_config *dev_cfg = dev->config;
758 I2S_Type *base = (I2S_Type *)dev_cfg->base;
759 uint8_t num_of_bufs;
760
761 num_of_bufs = k_mem_slab_num_free_get(strm->cfg.mem_slab);
762
763 /*
764 * Need at least NUM_DMA_BLOCKS_RX_PREP buffers on the RX memory slab
765 * for reliable DMA reception.
766 */
767 if (num_of_bufs < NUM_DMA_BLOCKS_RX_PREP) {
768 return -EINVAL;
769 }
770
771 /* allocate 1st receive buffer from SLAB */
772 ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT);
773 if (ret != 0) {
774 LOG_DBG("buffer alloc from mem_slab failed (%d)", ret);
775 return ret;
776 }
777
778 /* Configure DMA block */
779 struct dma_block_config *blk_cfg = &strm->dma_block;
780
781 memset(blk_cfg, 0, sizeof(struct dma_block_config));
782
783 uint32_t data_path = strm->start_channel;
784
785 blk_cfg->dest_address = (uint32_t)buffer;
786 blk_cfg->source_address = (uint32_t)&base->RDR[data_path];
787 blk_cfg->block_size = strm->cfg.block_size;
788
789 blk_cfg->source_gather_en = 1;
790
791 strm->dma_cfg.block_count = 1;
792 strm->dma_cfg.head_block = &strm->dma_block;
793 strm->dma_cfg.user_data = (void *)dev;
794
795 dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg);
796
797 /* put buffer in input queue */
798 ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
799 if (ret != 0) {
800 LOG_ERR("failed to put buffer in input queue, ret1 %d", ret);
801 return ret;
802 }
803
804 /* prep DMA for each of remaining (NUM_DMA_BLOCKS_RX_PREP-1) buffers */
805 for (int i = 0; i < NUM_DMA_BLOCKS_RX_PREP - 1; i++) {
806
807 /* allocate receive buffer from SLAB */
808 ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT);
809 if (ret != 0) {
810 LOG_ERR("buffer alloc from mem_slab failed (%d)", ret);
811 return ret;
812 }
813
814 ret = dma_reload(dev_dma, strm->dma_channel, (uint32_t)&base->RDR[data_path],
815 (uint32_t)buffer, blk_cfg->block_size);
816 if (ret != 0) {
817 LOG_ERR("dma_reload() failed with error 0x%x", ret);
818 return ret;
819 }
820
821 /* put buffer in input queue */
822 ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
823 if (ret != 0) {
824 LOG_ERR("failed to put buffer in input queue, ret2 %d", ret);
825 return ret;
826 }
827 }
828
829 LOG_DBG("Starting DMA Ch%u", strm->dma_channel);
830 ret = dma_start(dev_dma, strm->dma_channel);
831 if (ret < 0) {
832 LOG_ERR("Failed to start DMA Ch%d (%d)", strm->dma_channel, ret);
833 return ret;
834 }
835
836 /* Enable DMA enable bit */
837 SAI_RxEnableDMA(base, kSAI_FIFORequestDMAEnable, true);
838
839 /* Enable the channel FIFO */
840 base->RCR3 |= I2S_RCR3_RCE(1UL << strm->start_channel);
841
842 /* Enable SAI Rx clock */
843 SAI_RxEnable(base, true);
844
845 return 0;
846 }
847
i2s_mcux_trigger(const struct device * dev,enum i2s_dir dir,enum i2s_trigger_cmd cmd)848 static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd)
849 {
850 struct i2s_dev_data *dev_data = dev->data;
851 struct stream *strm;
852 unsigned int key;
853 int ret = 0;
854
855 if (dir == I2S_DIR_BOTH) {
856 return -ENOSYS;
857 }
858
859 strm = (dir == I2S_DIR_TX) ? &dev_data->tx : &dev_data->rx;
860
861 key = irq_lock();
862 switch (cmd) {
863 case I2S_TRIGGER_START:
864 if (strm->state != I2S_STATE_READY) {
865 LOG_ERR("START trigger: invalid state %u", strm->state);
866 ret = -EIO;
867 break;
868 }
869
870 if (dir == I2S_DIR_TX) {
871 ret = i2s_tx_stream_start(dev);
872 } else {
873 ret = i2s_rx_stream_start(dev);
874 }
875
876 if (ret < 0) {
877 LOG_DBG("START trigger failed %d", ret);
878 ret = -EIO;
879 break;
880 }
881
882 strm->state = I2S_STATE_RUNNING;
883 strm->last_block = false;
884 break;
885
886 case I2S_TRIGGER_DROP:
887 if (strm->state == I2S_STATE_NOT_READY) {
888 LOG_ERR("DROP trigger: invalid state %d", strm->state);
889 ret = -EIO;
890 break;
891 }
892
893 strm->state = I2S_STATE_READY;
894 if (dir == I2S_DIR_TX) {
895 i2s_tx_stream_disable(dev, true);
896 } else {
897 i2s_rx_stream_disable(dev, true, true);
898 }
899 break;
900
901 case I2S_TRIGGER_STOP:
902 if (strm->state != I2S_STATE_RUNNING) {
903 LOG_ERR("STOP trigger: invalid state %d", strm->state);
904 ret = -EIO;
905 break;
906 }
907
908 strm->state = I2S_STATE_STOPPING;
909 strm->last_block = true;
910 break;
911
912 case I2S_TRIGGER_DRAIN:
913 if (strm->state != I2S_STATE_RUNNING) {
914 LOG_ERR("DRAIN/STOP trigger: invalid state %d", strm->state);
915 ret = -EIO;
916 break;
917 }
918
919 strm->state = I2S_STATE_STOPPING;
920 break;
921
922 case I2S_TRIGGER_PREPARE:
923 if (strm->state != I2S_STATE_ERROR) {
924 LOG_ERR("PREPARE trigger: invalid state %d", strm->state);
925 ret = -EIO;
926 break;
927 }
928 strm->state = I2S_STATE_READY;
929 if (dir == I2S_DIR_TX) {
930 i2s_tx_stream_disable(dev, true);
931 } else {
932 i2s_rx_stream_disable(dev, true, true);
933 }
934 break;
935
936 default:
937 LOG_ERR("Unsupported trigger command");
938 ret = -EINVAL;
939 }
940
941 irq_unlock(key);
942 return ret;
943 }
944
i2s_mcux_read(const struct device * dev,void ** mem_block,size_t * size)945 static int i2s_mcux_read(const struct device *dev, void **mem_block, size_t *size)
946 {
947 struct i2s_dev_data *dev_data = dev->data;
948 struct stream *strm = &dev_data->rx;
949 void *buffer;
950 int status, ret = 0;
951
952 LOG_DBG("i2s_mcux_read");
953 if (strm->state == I2S_STATE_NOT_READY) {
954 LOG_ERR("invalid state %d", strm->state);
955 return -EIO;
956 }
957
958 status = k_msgq_get(&strm->out_queue, &buffer, SYS_TIMEOUT_MS(strm->cfg.timeout));
959 if (status != 0) {
960 if (strm->state == I2S_STATE_ERROR) {
961 ret = -EIO;
962 } else {
963 LOG_DBG("need retry");
964 ret = -EAGAIN;
965 }
966 return ret;
967 }
968
969 *mem_block = buffer;
970 *size = strm->cfg.block_size;
971 return 0;
972 }
973
i2s_mcux_write(const struct device * dev,void * mem_block,size_t size)974 static int i2s_mcux_write(const struct device *dev, void *mem_block, size_t size)
975 {
976 struct i2s_dev_data *dev_data = dev->data;
977 struct stream *strm = &dev_data->tx;
978 int ret;
979
980 LOG_DBG("i2s_mcux_write");
981 if (strm->state != I2S_STATE_RUNNING && strm->state != I2S_STATE_READY) {
982 LOG_ERR("invalid state (%d)", strm->state);
983 return -EIO;
984 }
985
986 ret = k_msgq_put(&strm->in_queue, &mem_block, SYS_TIMEOUT_MS(strm->cfg.timeout));
987 if (ret) {
988 LOG_DBG("k_msgq_put returned code %d", ret);
989 return ret;
990 }
991
992 if (strm->state == I2S_STATE_RUNNING && strm->free_tx_dma_blocks >= MAX_TX_DMA_BLOCKS) {
993 uint8_t blocks_queued = 0;
994 const struct i2s_mcux_config *dev_cfg = dev->config;
995 I2S_Type *base = (I2S_Type *)dev_cfg->base;
996 /* As DMA has been stopped because reloading failure in TX callback,
997 * here is a good place to reload it and resume TX.
998 */
999 ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
1000 if (ret == 0 && blocks_queued > 0) {
1001 SAI_TxEnable(base, true);
1002 LOG_WRN("TX is resumed");
1003 } else {
1004 LOG_ERR("TX block reload err, TX is not resumed");
1005 return ret;
1006 }
1007 }
1008
1009 return ret;
1010 }
1011
sai_driver_irq(const struct device * dev)1012 static void sai_driver_irq(const struct device *dev)
1013 {
1014 const struct i2s_mcux_config *dev_cfg = dev->config;
1015 I2S_Type *base = (I2S_Type *)dev_cfg->base;
1016
1017 if ((base->TCSR & I2S_TCSR_FEF_MASK) == I2S_TCSR_FEF_MASK) {
1018 /* Clear FIFO error flag to continue transfer */
1019 SAI_TxClearStatusFlags(base, I2S_TCSR_FEF_MASK);
1020
1021 /* Reset FIFO for safety */
1022 SAI_TxSoftwareReset(base, kSAI_ResetTypeFIFO);
1023
1024 LOG_DBG("sai tx error occurred");
1025 }
1026
1027 if ((base->RCSR & I2S_RCSR_FEF_MASK) == I2S_RCSR_FEF_MASK) {
1028 /* Clear FIFO error flag to continue transfer */
1029 SAI_RxClearStatusFlags(base, I2S_RCSR_FEF_MASK);
1030
1031 /* Reset FIFO for safety */
1032 SAI_RxSoftwareReset(base, kSAI_ResetTypeFIFO);
1033
1034 LOG_DBG("sai rx error occurred");
1035 }
1036 }
1037
1038 /* clear IRQ sources atm */
i2s_mcux_isr(void * arg)1039 static void i2s_mcux_isr(void *arg)
1040 {
1041 struct device *dev = (struct device *)arg;
1042 const struct i2s_mcux_config *dev_cfg = dev->config;
1043 I2S_Type *base = (I2S_Type *)dev_cfg->base;
1044
1045 if ((base->RCSR & I2S_TCSR_FEF_MASK) == I2S_TCSR_FEF_MASK) {
1046 sai_driver_irq(dev);
1047 }
1048
1049 if ((base->TCSR & I2S_RCSR_FEF_MASK) == I2S_RCSR_FEF_MASK) {
1050 sai_driver_irq(dev);
1051 }
1052 /*
1053 * Add for ARM errata 838869, affects Cortex-M4,
1054 * Cortex-M4F Store immediate overlapping exception return operation
1055 * might vector to incorrect interrupt
1056 */
1057 #if defined __CORTEX_M && (__CORTEX_M == 4U)
1058 barrier_dsync_fence_full();
1059 #endif
1060 }
1061
audio_clock_settings(const struct device * dev)1062 static void audio_clock_settings(const struct device *dev)
1063 {
1064 #ifdef CONFIG_I2S_HAS_PLL_SETTING
1065 clock_audio_pll_config_t audioPllConfig;
1066 const struct i2s_mcux_config *dev_cfg = dev->config;
1067 uint32_t clock_name = (uint32_t)dev_cfg->clk_sub_sys;
1068
1069 /*Clock setting for SAI*/
1070 imxrt_audio_codec_pll_init(clock_name, dev_cfg->clk_src, dev_cfg->clk_pre_div,
1071 dev_cfg->clk_src_div);
1072
1073 #ifdef CONFIG_SOC_SERIES_IMXRT11XX
1074 audioPllConfig.loopDivider = dev_cfg->pll_lp;
1075 audioPllConfig.postDivider = dev_cfg->pll_pd;
1076 audioPllConfig.numerator = dev_cfg->pll_num;
1077 audioPllConfig.denominator = dev_cfg->pll_den;
1078 audioPllConfig.ssEnable = false;
1079 #elif defined CONFIG_SOC_SERIES_IMXRT10XX
1080 audioPllConfig.src = dev_cfg->pll_src;
1081 audioPllConfig.loopDivider = dev_cfg->pll_lp;
1082 audioPllConfig.postDivider = dev_cfg->pll_pd;
1083 audioPllConfig.numerator = dev_cfg->pll_num;
1084 audioPllConfig.denominator = dev_cfg->pll_den;
1085 #else
1086 #error Initialize SOC Series-specific clock_audio_pll_config_t
1087 #endif /* CONFIG_SOC_SERIES */
1088
1089 CLOCK_InitAudioPll(&audioPllConfig);
1090 #endif
1091 }
1092
i2s_mcux_initialize(const struct device * dev)1093 static int i2s_mcux_initialize(const struct device *dev)
1094 {
1095 const struct i2s_mcux_config *dev_cfg = dev->config;
1096 I2S_Type *base = (I2S_Type *)dev_cfg->base;
1097 struct i2s_dev_data *dev_data = dev->data;
1098 uint32_t mclk;
1099 int err;
1100
1101 if (!dev_data->dev_dma) {
1102 LOG_ERR("DMA device not found");
1103 return -ENODEV;
1104 }
1105
1106 /* Initialize the buffer queues */
1107 k_msgq_init(&dev_data->tx.in_queue, (char *)dev_data->tx_in_msgs, sizeof(void *),
1108 CONFIG_I2S_TX_BLOCK_COUNT);
1109 k_msgq_init(&dev_data->rx.in_queue, (char *)dev_data->rx_in_msgs, sizeof(void *),
1110 CONFIG_I2S_RX_BLOCK_COUNT);
1111 k_msgq_init(&dev_data->tx.out_queue, (char *)dev_data->tx_out_msgs, sizeof(void *),
1112 CONFIG_I2S_TX_BLOCK_COUNT);
1113 k_msgq_init(&dev_data->rx.out_queue, (char *)dev_data->rx_out_msgs, sizeof(void *),
1114 CONFIG_I2S_RX_BLOCK_COUNT);
1115
1116 /* register ISR */
1117 dev_cfg->irq_connect(dev);
1118 /* pinctrl */
1119 err = pinctrl_apply_state(dev_cfg->pinctrl, PINCTRL_STATE_DEFAULT);
1120 if (err) {
1121 LOG_ERR("mclk pinctrl setup failed (%d)", err);
1122 return err;
1123 }
1124
1125 /*clock configuration*/
1126 audio_clock_settings(dev);
1127
1128 SAI_Init(base);
1129
1130 dev_data->tx.state = I2S_STATE_NOT_READY;
1131 dev_data->rx.state = I2S_STATE_NOT_READY;
1132
1133 #if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
1134 (defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
1135 sai_master_clock_t mclkConfig = {
1136 #if defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)
1137 .mclkOutputEnable = true,
1138 #if !(defined(FSL_FEATURE_SAI_HAS_NO_MCR_MICS) && (FSL_FEATURE_SAI_HAS_NO_MCR_MICS))
1139 .mclkSource = kSAI_MclkSourceSysclk,
1140 #endif
1141 #endif
1142 };
1143 #endif
1144
1145 get_mclk_rate(dev, &mclk);
1146 /* master clock configurations */
1147 #if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
1148 (defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
1149 #if ((defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER)) || \
1150 (defined(FSL_FEATURE_SAI_HAS_MCR_MCLK_POST_DIV) && (FSL_FEATURE_SAI_HAS_MCR_MCLK_POST_DIV)))
1151 mclkConfig.mclkHz = mclk;
1152 mclkConfig.mclkSourceClkHz = mclk;
1153 #endif
1154 SAI_SetMasterClockConfig(base, &mclkConfig);
1155 #endif
1156
1157 LOG_INF("Device %s initialized", dev->name);
1158
1159 return 0;
1160 }
1161
1162 static DEVICE_API(i2s, i2s_mcux_driver_api) = {
1163 .configure = i2s_mcux_config,
1164 .read = i2s_mcux_read,
1165 .write = i2s_mcux_write,
1166 .config_get = i2s_mcux_config_get,
1167 .trigger = i2s_mcux_trigger,
1168 };
1169
1170 #define I2S_MCUX_INIT(i2s_id) \
1171 static void i2s_irq_connect_##i2s_id(const struct device *dev); \
1172 \
1173 PINCTRL_DT_INST_DEFINE(i2s_id); \
1174 \
1175 static const struct i2s_mcux_config i2s_##i2s_id##_config = { \
1176 .base = (I2S_Type *)DT_INST_REG_ADDR(i2s_id), \
1177 .clk_src = DT_INST_PROP_OR(i2s_id, clock_mux, 0), \
1178 .clk_pre_div = DT_INST_PROP_OR(i2s_id, pre_div, 0), \
1179 .clk_src_div = DT_INST_PROP_OR(i2s_id, podf, 0), \
1180 .pll_src = DT_PHA_BY_NAME_OR(DT_DRV_INST(i2s_id), pll_clocks, src, value, 0), \
1181 .pll_lp = DT_PHA_BY_NAME_OR(DT_DRV_INST(i2s_id), pll_clocks, lp, value, 0), \
1182 .pll_pd = DT_PHA_BY_NAME_OR(DT_DRV_INST(i2s_id), pll_clocks, pd, value, 0), \
1183 .pll_num = DT_PHA_BY_NAME_OR(DT_DRV_INST(i2s_id), pll_clocks, num, value, 0), \
1184 .pll_den = DT_PHA_BY_NAME_OR(DT_DRV_INST(i2s_id), pll_clocks, den, value, 0), \
1185 .mclk_control_base = DT_REG_ADDR(DT_PHANDLE(DT_DRV_INST(i2s_id), pinmuxes)), \
1186 .mclk_pin_mask = DT_PHA_BY_IDX(DT_DRV_INST(i2s_id), pinmuxes, 0, mask), \
1187 .mclk_pin_offset = DT_PHA_BY_IDX(DT_DRV_INST(i2s_id), pinmuxes, 0, offset), \
1188 .clk_sub_sys = \
1189 (clock_control_subsys_t)DT_INST_CLOCKS_CELL_BY_IDX(i2s_id, 0, name), \
1190 .ccm_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(i2s_id)), \
1191 .irq_connect = i2s_irq_connect_##i2s_id, \
1192 .pinctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(i2s_id), \
1193 .tx_sync_mode = \
1194 DT_INST_PROP(i2s_id, nxp_tx_sync_mode) ? kSAI_ModeSync : kSAI_ModeAsync, \
1195 .rx_sync_mode = \
1196 DT_INST_PROP(i2s_id, nxp_rx_sync_mode) ? kSAI_ModeSync : kSAI_ModeAsync, \
1197 .tx_channel = DT_INST_PROP(i2s_id, nxp_tx_channel), \
1198 }; \
1199 \
1200 static struct i2s_dev_data i2s_##i2s_id##_data = { \
1201 .dev_dma = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(i2s_id, rx)), \
1202 .tx = \
1203 { \
1204 .dma_channel = DT_INST_PROP(i2s_id, nxp_tx_dma_channel), \
1205 .dma_cfg = \
1206 { \
1207 .source_burst_length = CONFIG_I2S_EDMA_BURST_SIZE, \
1208 .dest_burst_length = CONFIG_I2S_EDMA_BURST_SIZE, \
1209 .dma_callback = i2s_dma_tx_callback, \
1210 .complete_callback_en = 1, \
1211 .error_callback_dis = 1, \
1212 .block_count = 1, \
1213 .head_block = &i2s_##i2s_id##_data.tx.dma_block, \
1214 .channel_direction = MEMORY_TO_PERIPHERAL, \
1215 .dma_slot = DT_INST_DMAS_CELL_BY_NAME(i2s_id, tx, \
1216 source), \
1217 .cyclic = 1, \
1218 }, \
1219 }, \
1220 .rx = \
1221 { \
1222 .dma_channel = DT_INST_PROP(i2s_id, nxp_rx_dma_channel), \
1223 .dma_cfg = \
1224 { \
1225 .source_burst_length = CONFIG_I2S_EDMA_BURST_SIZE, \
1226 .dest_burst_length = CONFIG_I2S_EDMA_BURST_SIZE, \
1227 .dma_callback = i2s_dma_rx_callback, \
1228 .complete_callback_en = 1, \
1229 .error_callback_dis = 1, \
1230 .block_count = 1, \
1231 .head_block = &i2s_##i2s_id##_data.rx.dma_block, \
1232 .channel_direction = PERIPHERAL_TO_MEMORY, \
1233 .dma_slot = DT_INST_DMAS_CELL_BY_NAME(i2s_id, rx, \
1234 source), \
1235 .cyclic = 1, \
1236 }, \
1237 }, \
1238 }; \
1239 \
1240 DEVICE_DT_INST_DEFINE(i2s_id, &i2s_mcux_initialize, NULL, &i2s_##i2s_id##_data, \
1241 &i2s_##i2s_id##_config, POST_KERNEL, CONFIG_I2S_INIT_PRIORITY, \
1242 &i2s_mcux_driver_api); \
1243 \
1244 static void i2s_irq_connect_##i2s_id(const struct device *dev) \
1245 { \
1246 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(i2s_id, 0, irq), \
1247 DT_INST_IRQ_BY_IDX(i2s_id, 0, priority), i2s_mcux_isr, \
1248 DEVICE_DT_INST_GET(i2s_id), 0); \
1249 irq_enable(DT_INST_IRQN(i2s_id)); \
1250 }
1251
1252 DT_INST_FOREACH_STATUS_OKAY(I2S_MCUX_INIT)
1253