1 /*
2 * Copyright (c) 2018 STMicroelectronics
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT st_stm32_i2s
8
9 #include <string.h>
10 #include <zephyr/drivers/dma.h>
11 #include <zephyr/drivers/i2s.h>
12 #include <zephyr/drivers/dma/dma_stm32.h>
13 #include <soc.h>
14 #include <stm32_ll_rcc.h>
15 #include <stm32_ll_spi.h>
16 #include <zephyr/drivers/clock_control/stm32_clock_control.h>
17 #include <zephyr/drivers/clock_control.h>
18 #include <zephyr/drivers/pinctrl.h>
19 #include <zephyr/cache.h>
20
21 #include "i2s_ll_stm32.h"
22 #include <zephyr/logging/log.h>
23 #include <zephyr/irq.h>
24 LOG_MODULE_REGISTER(i2s_ll_stm32);
25
div_round_closest(uint32_t dividend,uint32_t divisor)26 static unsigned int div_round_closest(uint32_t dividend, uint32_t divisor)
27 {
28 return (dividend + (divisor / 2U)) / divisor;
29 }
30
queue_is_empty(struct k_msgq * q)31 static bool queue_is_empty(struct k_msgq *q)
32 {
33 return (k_msgq_num_used_get(q) == 0) ? true : false;
34 }
35
36 /*
37 * Get data from the queue
38 */
queue_get(struct k_msgq * q,void ** mem_block,size_t * size,int32_t timeout)39 static int queue_get(struct k_msgq *q, void **mem_block, size_t *size, int32_t timeout)
40 {
41 struct queue_item item;
42 int result = k_msgq_get(q, &item, SYS_TIMEOUT_MS(timeout));
43
44 if (result == 0) {
45 *mem_block = item.mem_block;
46 *size = item.size;
47 }
48 return result;
49 }
50
51 /*
52 * Put data in the queue
53 */
queue_put(struct k_msgq * q,void * mem_block,size_t size,int32_t timeout)54 static int queue_put(struct k_msgq *q, void *mem_block, size_t size, int32_t timeout)
55 {
56 struct queue_item item = {.mem_block = mem_block, .size = size};
57
58 return k_msgq_put(q, &item, SYS_TIMEOUT_MS(timeout));
59 }
60
stream_queue_drop(struct stream * s)61 static void stream_queue_drop(struct stream *s)
62 {
63 size_t size;
64 void *mem_block;
65
66 while (queue_get(s->msgq, &mem_block, &size, 0) == 0) {
67 k_mem_slab_free(s->cfg.mem_slab, mem_block);
68 }
69 }
70
i2s_stm32_enable_clock(const struct device * dev)71 static int i2s_stm32_enable_clock(const struct device *dev)
72 {
73 const struct i2s_stm32_cfg *cfg = dev->config;
74 const struct device *clk;
75 int ret;
76
77 clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
78
79 if (!device_is_ready(clk)) {
80 LOG_ERR("clock control device not ready");
81 return -ENODEV;
82 }
83
84 ret = clock_control_on(clk, (clock_control_subsys_t)&cfg->pclken[0]);
85 if (ret != 0) {
86 LOG_ERR("Could not enable I2S clock");
87 return -EIO;
88 }
89
90 if (cfg->pclk_len > 1) {
91 /* Enable I2S clock source */
92 ret = clock_control_configure(clk,
93 (clock_control_subsys_t)&cfg->pclken[1],
94 NULL);
95 if (ret < 0) {
96 LOG_ERR("Could not configure I2S domain clock");
97 return -EIO;
98 }
99 }
100
101 return 0;
102 }
103
i2s_stm32_set_clock(const struct device * dev,uint32_t bit_clk_freq)104 static int i2s_stm32_set_clock(const struct device *dev,
105 uint32_t bit_clk_freq)
106 {
107 const struct i2s_stm32_cfg *cfg = dev->config;
108 uint32_t freq_in = 0U;
109 uint8_t i2s_div, i2s_odd;
110
111 if (cfg->pclk_len > 1) {
112 /* Handle multiple clock sources */
113 if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
114 (clock_control_subsys_t)&cfg->pclken[1],
115 &freq_in) < 0) {
116 LOG_ERR("Failed call clock_control_get_rate(pclken[1])");
117 return -EIO;
118 }
119 } else {
120 /* Handle single clock source */
121 if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
122 (clock_control_subsys_t)&cfg->pclken[0],
123 &freq_in) < 0) {
124 LOG_ERR("Failed call clock_control_get_rate(pclken[0])");
125 return -EIO;
126 }
127 }
128 /*
129 * The ratio between input clock (I2SxClk) and output
130 * clock on the pad (I2S_CK) is obtained using the
131 * following formula:
132 * (i2s_div * 2) + i2s_odd
133 */
134 i2s_div = div_round_closest(freq_in, bit_clk_freq);
135 i2s_odd = (i2s_div & 0x1) ? 1 : 0;
136 i2s_div >>= 1;
137
138 /* i2s_div == 0 || i2s_div == 1 are forbidden */
139 if (i2s_div < 2U) {
140 LOG_ERR("The linear prescaler value is unsupported");
141 return -EINVAL;
142 }
143
144 LOG_DBG("i2s_div: %d - i2s_odd: %d", i2s_div, i2s_odd);
145
146 LL_I2S_SetPrescalerLinear(cfg->i2s, i2s_div);
147 LL_I2S_SetPrescalerParity(cfg->i2s, i2s_odd);
148
149 return 0;
150 }
151
i2s_stm32_configure(const struct device * dev,enum i2s_dir dir,const struct i2s_config * i2s_cfg)152 static int i2s_stm32_configure(const struct device *dev, enum i2s_dir dir,
153 const struct i2s_config *i2s_cfg)
154 {
155 const struct i2s_stm32_cfg *const cfg = dev->config;
156 struct i2s_stm32_data *const dev_data = dev->data;
157 /* For words greater than 16-bit the channel length is considered 32-bit */
158 const uint32_t channel_length = i2s_cfg->word_size > 16U ? 32U : 16U;
159 /*
160 * comply with the i2s_config driver remark:
161 * When I2S data format is selected parameter channels is ignored,
162 * number of words in a frame is always 2.
163 */
164 const uint32_t num_channels =
165 ((i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) == I2S_FMT_DATA_FORMAT_I2S)
166 ? 2U
167 : i2s_cfg->channels;
168 struct stream *stream;
169 uint32_t bit_clk_freq;
170 bool enable_mck;
171 int ret;
172
173 if (dir == I2S_DIR_RX) {
174 stream = &dev_data->rx;
175 } else if (dir == I2S_DIR_TX) {
176 stream = &dev_data->tx;
177 } else if (dir == I2S_DIR_BOTH) {
178 return -ENOSYS;
179 } else {
180 LOG_ERR("Either RX or TX direction must be selected");
181 return -EINVAL;
182 }
183
184 if (stream->state != I2S_STATE_NOT_READY &&
185 stream->state != I2S_STATE_READY) {
186 LOG_ERR("invalid state");
187 return -EINVAL;
188 }
189
190 stream->master = true;
191 if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE ||
192 i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) {
193 stream->master = false;
194 }
195
196 if (i2s_cfg->frame_clk_freq == 0U) {
197 stream_queue_drop(stream);
198 memset(&stream->cfg, 0, sizeof(struct i2s_config));
199 stream->state = I2S_STATE_NOT_READY;
200 return 0;
201 }
202
203 memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config));
204
205 /* conditions to enable master clock output */
206 enable_mck = stream->master && cfg->master_clk_sel;
207
208 /* set I2S bitclock */
209 bit_clk_freq = i2s_cfg->frame_clk_freq *
210 channel_length * num_channels;
211
212 if (enable_mck) {
213 /*
214 * Compensate for the master clock dividers.
215 * MCK = N * CK, where N:
216 * 8 when the channel frame is 16-bit wide
217 * 4 when the channel frame is 32-bit wide
218 */
219 bit_clk_freq *= channel_length == 16U ? 4U * 2U : 4U;
220 }
221
222 ret = i2s_stm32_set_clock(dev, bit_clk_freq);
223 if (ret < 0) {
224 return ret;
225 }
226
227 /* set I2S Master Clock output in the MCK pin, enabled in the DT */
228 if (enable_mck) {
229 LL_I2S_EnableMasterClock(cfg->i2s);
230 } else {
231 LL_I2S_DisableMasterClock(cfg->i2s);
232 }
233
234 /*
235 * set I2S Data Format
236 * 16-bit data extended on 32-bit channel length excluded
237 */
238 if (i2s_cfg->word_size == 16U) {
239 LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_16B);
240 } else if (i2s_cfg->word_size == 24U) {
241 LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_24B);
242 } else if (i2s_cfg->word_size == 32U) {
243 LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_32B);
244 } else {
245 LOG_ERR("invalid word size");
246 return -EINVAL;
247 }
248
249 /* set I2S Standard */
250 switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) {
251 case I2S_FMT_DATA_FORMAT_I2S:
252 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PHILIPS);
253 break;
254
255 case I2S_FMT_DATA_FORMAT_PCM_SHORT:
256 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PCM_SHORT);
257 break;
258
259 case I2S_FMT_DATA_FORMAT_PCM_LONG:
260 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PCM_LONG);
261 break;
262
263 case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
264 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_MSB);
265 break;
266
267 case I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED:
268 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_LSB);
269 break;
270
271 default:
272 LOG_ERR("Unsupported I2S data format");
273 return -EINVAL;
274 }
275
276 /* set I2S clock polarity */
277 if ((i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) == I2S_FMT_BIT_CLK_INV)
278 LL_I2S_SetClockPolarity(cfg->i2s, LL_I2S_POLARITY_HIGH);
279 else
280 LL_I2S_SetClockPolarity(cfg->i2s, LL_I2S_POLARITY_LOW);
281
282 stream->state = I2S_STATE_READY;
283 return 0;
284 }
285
i2s_stm32_trigger(const struct device * dev,enum i2s_dir dir,enum i2s_trigger_cmd cmd)286 static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir,
287 enum i2s_trigger_cmd cmd)
288 {
289 struct i2s_stm32_data *const dev_data = dev->data;
290 const struct i2s_stm32_cfg *const cfg = dev->config;
291 struct stream *stream;
292 unsigned int key;
293 int ret;
294
295 if (dir == I2S_DIR_RX) {
296 stream = &dev_data->rx;
297 } else if (dir == I2S_DIR_TX) {
298 stream = &dev_data->tx;
299 } else if (dir == I2S_DIR_BOTH) {
300 return -ENOSYS;
301 } else {
302 LOG_ERR("Either RX or TX direction must be selected");
303 return -EINVAL;
304 }
305
306 switch (cmd) {
307 case I2S_TRIGGER_START:
308 if (stream->state != I2S_STATE_READY) {
309 LOG_ERR("START trigger: invalid state %d",
310 stream->state);
311 return -EIO;
312 }
313
314 __ASSERT_NO_MSG(stream->mem_block == NULL);
315
316 ret = stream->stream_start(stream, dev);
317 if (ret < 0) {
318 LOG_ERR("START trigger failed %d", ret);
319 return ret;
320 }
321
322 stream->state = I2S_STATE_RUNNING;
323 stream->last_block = false;
324 break;
325
326 case I2S_TRIGGER_STOP:
327 key = irq_lock();
328 if (stream->state != I2S_STATE_RUNNING) {
329 irq_unlock(key);
330 LOG_ERR("STOP trigger: invalid state");
331 return -EIO;
332 }
333 do_trigger_stop:
334 if (ll_func_i2s_dma_busy(cfg->i2s)) {
335 stream->state = I2S_STATE_STOPPING;
336 /*
337 * Indicate that the transition to I2S_STATE_STOPPING
338 * is triggered by STOP command
339 */
340 stream->tx_stop_for_drain = false;
341 } else {
342 stream->stream_disable(stream, dev);
343 stream->state = I2S_STATE_READY;
344 stream->last_block = true;
345 }
346 irq_unlock(key);
347 break;
348
349 case I2S_TRIGGER_DRAIN:
350 key = irq_lock();
351 if (stream->state != I2S_STATE_RUNNING) {
352 irq_unlock(key);
353 LOG_ERR("DRAIN trigger: invalid state");
354 return -EIO;
355 }
356
357 if (dir == I2S_DIR_TX) {
358 if ((queue_is_empty(stream->msgq) == false) ||
359 (ll_func_i2s_dma_busy(cfg->i2s))) {
360 stream->state = I2S_STATE_STOPPING;
361 /*
362 * Indicate that the transition to I2S_STATE_STOPPING
363 * is triggered by DRAIN command
364 */
365 stream->tx_stop_for_drain = true;
366 } else {
367 stream->stream_disable(stream, dev);
368 stream->state = I2S_STATE_READY;
369 }
370 } else if (dir == I2S_DIR_RX) {
371 goto do_trigger_stop;
372 } else {
373 LOG_ERR("Unavailable direction");
374 return -EINVAL;
375 }
376 irq_unlock(key);
377 break;
378
379 case I2S_TRIGGER_DROP:
380 if (stream->state == I2S_STATE_NOT_READY) {
381 LOG_ERR("DROP trigger: invalid state");
382 return -EIO;
383 }
384 stream->stream_disable(stream, dev);
385 stream_queue_drop(stream);
386 stream->state = I2S_STATE_READY;
387 break;
388
389 case I2S_TRIGGER_PREPARE:
390 if (stream->state != I2S_STATE_ERROR) {
391 LOG_ERR("PREPARE trigger: invalid state");
392 return -EIO;
393 }
394 stream->state = I2S_STATE_READY;
395 stream_queue_drop(stream);
396 break;
397
398 default:
399 LOG_ERR("Unsupported trigger command");
400 return -EINVAL;
401 }
402
403 return 0;
404 }
405
i2s_stm32_read(const struct device * dev,void ** mem_block,size_t * size)406 static int i2s_stm32_read(const struct device *dev, void **mem_block,
407 size_t *size)
408 {
409 struct i2s_stm32_data *const dev_data = dev->data;
410 int ret;
411
412 if (dev_data->rx.state == I2S_STATE_NOT_READY) {
413 LOG_DBG("invalid state");
414 return -EIO;
415 }
416
417 /* Get data from the beginning of RX queue */
418 ret = queue_get(dev_data->rx.msgq, mem_block, size, dev_data->rx.cfg.timeout);
419 if (ret < 0) {
420 return -EIO;
421 }
422
423 return 0;
424 }
425
i2s_stm32_write(const struct device * dev,void * mem_block,size_t size)426 static int i2s_stm32_write(const struct device *dev, void *mem_block,
427 size_t size)
428 {
429 struct i2s_stm32_data *const dev_data = dev->data;
430
431 if (dev_data->tx.state != I2S_STATE_RUNNING &&
432 dev_data->tx.state != I2S_STATE_READY) {
433 LOG_DBG("invalid state");
434 return -EIO;
435 }
436
437 /* Add data to the end of the TX queue */
438 return queue_put(dev_data->tx.msgq, mem_block, size, dev_data->tx.cfg.timeout);
439 }
440
441 static DEVICE_API(i2s, i2s_stm32_driver_api) = {
442 .configure = i2s_stm32_configure,
443 .read = i2s_stm32_read,
444 .write = i2s_stm32_write,
445 .trigger = i2s_stm32_trigger,
446 };
447
448 #define STM32_DMA_NUM_CHANNELS 8
449 static const struct device *active_dma_rx_channel[STM32_DMA_NUM_CHANNELS];
450 static const struct device *active_dma_tx_channel[STM32_DMA_NUM_CHANNELS];
451
reload_dma(const struct device * dev_dma,uint32_t channel,struct dma_config * dcfg,void * src,void * dst,uint32_t blk_size)452 static int reload_dma(const struct device *dev_dma, uint32_t channel,
453 struct dma_config *dcfg, void *src, void *dst,
454 uint32_t blk_size)
455 {
456 int ret;
457
458 ret = dma_reload(dev_dma, channel, (uint32_t)src, (uint32_t)dst, blk_size);
459 if (ret < 0) {
460 return ret;
461 }
462
463 ret = dma_start(dev_dma, channel);
464
465 return ret;
466 }
467
start_dma(const struct device * dev_dma,uint32_t channel,struct dma_config * dcfg,void * src,bool src_addr_increment,void * dst,bool dst_addr_increment,uint8_t fifo_threshold,uint32_t blk_size)468 static int start_dma(const struct device *dev_dma, uint32_t channel,
469 struct dma_config *dcfg, void *src,
470 bool src_addr_increment, void *dst,
471 bool dst_addr_increment, uint8_t fifo_threshold,
472 uint32_t blk_size)
473 {
474 struct dma_block_config blk_cfg;
475 int ret;
476
477 memset(&blk_cfg, 0, sizeof(blk_cfg));
478 blk_cfg.block_size = blk_size;
479 blk_cfg.source_address = (uint32_t)src;
480 blk_cfg.dest_address = (uint32_t)dst;
481 if (src_addr_increment) {
482 blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
483 } else {
484 blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
485 }
486 if (dst_addr_increment) {
487 blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
488 } else {
489 blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
490 }
491 blk_cfg.fifo_mode_control = fifo_threshold;
492
493 dcfg->head_block = &blk_cfg;
494
495 ret = dma_config(dev_dma, channel, dcfg);
496 if (ret < 0) {
497 return ret;
498 }
499
500 ret = dma_start(dev_dma, channel);
501
502 return ret;
503 }
504
505 static const struct device *get_dev_from_rx_dma_channel(uint32_t dma_channel);
506 static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel);
507 static void rx_stream_disable(struct stream *stream, const struct device *dev);
508 static void tx_stream_disable(struct stream *stream, const struct device *dev);
509
510 /* This function is executed in the interrupt context */
dma_rx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)511 static void dma_rx_callback(const struct device *dma_dev, void *arg,
512 uint32_t channel, int status)
513 {
514 const struct device *dev = get_dev_from_rx_dma_channel(channel);
515 const struct i2s_stm32_cfg *cfg = dev->config;
516 struct i2s_stm32_data *const dev_data = dev->data;
517 struct stream *stream = &dev_data->rx;
518 void *mblk_tmp;
519 int ret;
520
521 if (status < 0) {
522 ret = -EIO;
523 stream->state = I2S_STATE_ERROR;
524 goto rx_disable;
525 }
526
527 __ASSERT_NO_MSG(stream->mem_block != NULL);
528
529 /* Stop reception if there was an error */
530 if (stream->state == I2S_STATE_ERROR) {
531 goto rx_disable;
532 }
533
534 mblk_tmp = stream->mem_block;
535
536 /* Prepare to receive the next data block */
537 ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block,
538 K_NO_WAIT);
539 if (ret < 0) {
540 stream->state = I2S_STATE_ERROR;
541 goto rx_disable;
542 }
543
544 ret = reload_dma(stream->dev_dma, stream->dma_channel,
545 &stream->dma_cfg,
546 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
547 (void *)LL_SPI_DMA_GetRxRegAddr(cfg->i2s),
548 #else
549 (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s),
550 #endif
551 stream->mem_block,
552 stream->cfg.block_size);
553 if (ret < 0) {
554 LOG_DBG("Failed to start RX DMA transfer: %d", ret);
555 goto rx_disable;
556 }
557
558 /* Assure cache coherency after DMA write operation */
559 sys_cache_data_invd_range(mblk_tmp, stream->cfg.block_size);
560
561 /* All block data received */
562 ret = queue_put(stream->msgq, mblk_tmp,
563 stream->cfg.block_size, 0);
564 if (ret < 0) {
565 stream->state = I2S_STATE_ERROR;
566 goto rx_disable;
567 }
568
569 /* Stop reception if we were requested */
570 if (stream->state == I2S_STATE_STOPPING) {
571 stream->state = I2S_STATE_READY;
572 goto rx_disable;
573 }
574
575 return;
576
577 rx_disable:
578 rx_stream_disable(stream, dev);
579 }
580
dma_tx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)581 static void dma_tx_callback(const struct device *dma_dev, void *arg,
582 uint32_t channel, int status)
583 {
584 const struct device *dev = get_dev_from_tx_dma_channel(channel);
585 const struct i2s_stm32_cfg *cfg = dev->config;
586 struct i2s_stm32_data *const dev_data = dev->data;
587 struct stream *stream = &dev_data->tx;
588 size_t mem_block_size;
589 int ret;
590
591 if (status < 0) {
592 ret = -EIO;
593 stream->state = I2S_STATE_ERROR;
594 goto tx_disable;
595 }
596
597 __ASSERT_NO_MSG(stream->mem_block != NULL);
598
599 /* All block data sent */
600 k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
601 stream->mem_block = NULL;
602
603 /* Stop transmission if there was an error */
604 if (stream->state == I2S_STATE_ERROR) {
605 LOG_ERR("TX error detected");
606 goto tx_disable;
607 }
608
609 /* Check if we finished transferring one block and stopping is requested */
610 if ((stream->state == I2S_STATE_STOPPING) && (status == DMA_STATUS_COMPLETE)) {
611 /*
612 * Check if all tx samples have been completely handled
613 * as stated in zephyr i2s specification, in case of DRAIN command
614 * send all data in the transmit queue and stop the transmission.
615 */
616 if (queue_is_empty(stream->msgq) == true) {
617 stream_queue_drop(stream);
618 stream->state = I2S_STATE_READY;
619 goto tx_disable;
620 } else if (stream->tx_stop_for_drain == false) {
621 /*
622 * In case of STOP command, just stop the transmission
623 * at the current. The transmission can be resumed.
624 */
625 stream->state = I2S_STATE_READY;
626 goto tx_disable;
627 }
628 /* else: DRAIN trigger -> continue TX normally until queue is empty */
629 }
630
631 /* Stop transmission if we were requested */
632 if (stream->last_block) {
633 stream->state = I2S_STATE_READY;
634 goto tx_disable;
635 }
636
637 /* Prepare to send the next data block */
638 ret = queue_get(stream->msgq, &stream->mem_block,
639 &mem_block_size, 0);
640 if (ret < 0) {
641 if (stream->state == I2S_STATE_STOPPING) {
642 stream->state = I2S_STATE_READY;
643 } else {
644 stream->state = I2S_STATE_ERROR;
645 }
646 goto tx_disable;
647 }
648
649 /* Assure cache coherency before DMA read operation */
650 sys_cache_data_flush_range(stream->mem_block, mem_block_size);
651
652 ret = reload_dma(stream->dev_dma, stream->dma_channel,
653 &stream->dma_cfg,
654 stream->mem_block,
655 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
656 (void *)LL_SPI_DMA_GetTxRegAddr(cfg->i2s),
657 #else
658 (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s),
659 #endif
660 mem_block_size);
661 if (ret < 0) {
662 LOG_DBG("Failed to start TX DMA transfer: %d", ret);
663 goto tx_disable;
664 }
665
666 return;
667
668 tx_disable:
669 tx_stream_disable(stream, dev);
670 }
671
672 static uint32_t i2s_stm32_irq_count;
673 static uint32_t i2s_stm32_irq_ovr_count;
674 static uint32_t i2s_stm32_irq_udr_count;
675
i2s_stm32_isr(const struct device * dev)676 static void i2s_stm32_isr(const struct device *dev)
677 {
678 const struct i2s_stm32_cfg *cfg = dev->config;
679
680 /* OVR error must be explicitly cleared */
681 if (LL_I2S_IsActiveFlag_OVR(cfg->i2s)) {
682 i2s_stm32_irq_ovr_count++;
683 LL_I2S_ClearFlag_OVR(cfg->i2s);
684 }
685
686 /* NOTE: UDR error must be explicitly cleared on STM32H7 */
687 if (LL_I2S_IsActiveFlag_UDR(cfg->i2s)) {
688 i2s_stm32_irq_udr_count++;
689 LL_I2S_ClearFlag_UDR(cfg->i2s);
690 }
691
692 i2s_stm32_irq_count++;
693 }
694
i2s_stm32_initialize(const struct device * dev)695 static int i2s_stm32_initialize(const struct device *dev)
696 {
697 const struct i2s_stm32_cfg *cfg = dev->config;
698 struct i2s_stm32_data *const dev_data = dev->data;
699 struct stream *stream = &dev_data->tx;
700 int ret, i;
701
702 /* Initialize the variable used to handle the TX */
703 stream->tx_stop_for_drain = false;
704
705 /* Enable I2S clock propagation */
706 ret = i2s_stm32_enable_clock(dev);
707 if (ret < 0) {
708 LOG_ERR("%s: clock enabling failed: %d", __func__, ret);
709 return -EIO;
710 }
711
712 /* Configure dt provided device signals when available */
713 ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
714 if (ret < 0) {
715 LOG_ERR("I2S pinctrl setup failed (%d)", ret);
716 return ret;
717 }
718
719 cfg->irq_config(dev);
720
721 for (i = 0; i < STM32_DMA_NUM_CHANNELS; i++) {
722 active_dma_rx_channel[i] = NULL;
723 active_dma_tx_channel[i] = NULL;
724 }
725
726 /* Get the binding to the DMA device */
727 if (!device_is_ready(dev_data->tx.dev_dma)) {
728 LOG_ERR("%s device not ready", dev_data->tx.dev_dma->name);
729 return -ENODEV;
730 }
731 if (!device_is_ready(dev_data->rx.dev_dma)) {
732 LOG_ERR("%s device not ready", dev_data->rx.dev_dma->name);
733 return -ENODEV;
734 }
735
736 LOG_INF("%s inited", dev->name);
737
738 return 0;
739 }
740
rx_stream_start(struct stream * stream,const struct device * dev)741 static int rx_stream_start(struct stream *stream, const struct device *dev)
742 {
743 const struct i2s_stm32_cfg *cfg = dev->config;
744 int ret;
745
746 ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block,
747 K_NO_WAIT);
748 if (ret < 0) {
749 return ret;
750 }
751
752 if (stream->master) {
753 LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_RX);
754 } else {
755 LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_RX);
756 }
757
758 /* remember active RX DMA channel (used in callback) */
759 active_dma_rx_channel[stream->dma_channel] = dev;
760
761 ret = start_dma(stream->dev_dma, stream->dma_channel,
762 &stream->dma_cfg,
763 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
764 (void *)LL_SPI_DMA_GetRxRegAddr(cfg->i2s),
765 #else
766 (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s),
767 #endif
768 stream->src_addr_increment, stream->mem_block,
769 stream->dst_addr_increment, stream->fifo_threshold,
770 stream->cfg.block_size);
771 if (ret < 0) {
772 LOG_ERR("Failed to start RX DMA transfer: %d", ret);
773 return ret;
774 }
775
776 LL_I2S_EnableDMAReq_RX(cfg->i2s);
777
778 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
779 LL_I2S_EnableIT_OVR(cfg->i2s);
780 LL_I2S_EnableIT_UDR(cfg->i2s);
781 LL_I2S_EnableIT_FRE(cfg->i2s);
782 LL_I2S_Enable(cfg->i2s);
783 LL_SPI_StartMasterTransfer(cfg->i2s);
784 #else
785 LL_I2S_EnableIT_ERR(cfg->i2s);
786 LL_I2S_Enable(cfg->i2s);
787 #endif
788
789
790 return 0;
791 }
792
tx_stream_start(struct stream * stream,const struct device * dev)793 static int tx_stream_start(struct stream *stream, const struct device *dev)
794 {
795 const struct i2s_stm32_cfg *cfg = dev->config;
796 size_t mem_block_size;
797 int ret;
798
799 ret = queue_get(stream->msgq, &stream->mem_block,
800 &mem_block_size, 0);
801 if (ret < 0) {
802 return ret;
803 }
804
805 /* Assure cache coherency before DMA read operation */
806 sys_cache_data_flush_range(stream->mem_block, mem_block_size);
807
808 if (stream->master) {
809 LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_TX);
810 } else {
811 LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_TX);
812 }
813
814 /* remember active TX DMA channel (used in callback) */
815 active_dma_tx_channel[stream->dma_channel] = dev;
816
817 ret = start_dma(stream->dev_dma, stream->dma_channel,
818 &stream->dma_cfg,
819 stream->mem_block, stream->src_addr_increment,
820 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
821 (void *)LL_SPI_DMA_GetTxRegAddr(cfg->i2s),
822 #else
823 (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s),
824 #endif
825 stream->dst_addr_increment, stream->fifo_threshold,
826 stream->cfg.block_size);
827 if (ret < 0) {
828 LOG_ERR("Failed to start TX DMA transfer: %d", ret);
829 return ret;
830 }
831
832 LL_I2S_EnableDMAReq_TX(cfg->i2s);
833
834 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
835 LL_I2S_EnableIT_OVR(cfg->i2s);
836 LL_I2S_EnableIT_UDR(cfg->i2s);
837 LL_I2S_EnableIT_FRE(cfg->i2s);
838
839 LL_I2S_Enable(cfg->i2s);
840 LL_SPI_StartMasterTransfer(cfg->i2s);
841 #else
842 LL_I2S_EnableIT_ERR(cfg->i2s);
843 LL_I2S_Enable(cfg->i2s);
844 #endif
845
846 return 0;
847 }
848
rx_stream_disable(struct stream * stream,const struct device * dev)849 static void rx_stream_disable(struct stream *stream, const struct device *dev)
850 {
851 const struct i2s_stm32_cfg *cfg = dev->config;
852
853 LL_I2S_DisableDMAReq_RX(cfg->i2s);
854 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
855 LL_I2S_DisableIT_OVR(cfg->i2s);
856 LL_I2S_DisableIT_UDR(cfg->i2s);
857 LL_I2S_DisableIT_FRE(cfg->i2s);
858 #else
859 LL_I2S_DisableIT_ERR(cfg->i2s);
860 #endif
861
862 dma_stop(stream->dev_dma, stream->dma_channel);
863 if (stream->mem_block != NULL) {
864 k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
865 stream->mem_block = NULL;
866 }
867
868 LL_I2S_Disable(cfg->i2s);
869
870 active_dma_rx_channel[stream->dma_channel] = NULL;
871 }
872
tx_stream_disable(struct stream * stream,const struct device * dev)873 static void tx_stream_disable(struct stream *stream, const struct device *dev)
874 {
875 const struct i2s_stm32_cfg *cfg = dev->config;
876
877 LL_I2S_DisableDMAReq_TX(cfg->i2s);
878 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
879 LL_I2S_DisableIT_OVR(cfg->i2s);
880 LL_I2S_DisableIT_UDR(cfg->i2s);
881 LL_I2S_DisableIT_FRE(cfg->i2s);
882 #else
883 LL_I2S_DisableIT_ERR(cfg->i2s);
884 #endif
885
886 dma_stop(stream->dev_dma, stream->dma_channel);
887 if (stream->mem_block != NULL) {
888 k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
889 stream->mem_block = NULL;
890 }
891
892 /* Wait for TX queue to drain before disabling */
893 k_busy_wait(100);
894 LL_I2S_Disable(cfg->i2s);
895
896 active_dma_tx_channel[stream->dma_channel] = NULL;
897 }
898
get_dev_from_rx_dma_channel(uint32_t dma_channel)899 static const struct device *get_dev_from_rx_dma_channel(uint32_t dma_channel)
900 {
901 return active_dma_rx_channel[dma_channel];
902 }
903
get_dev_from_tx_dma_channel(uint32_t dma_channel)904 static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel)
905 {
906 return active_dma_tx_channel[dma_channel];
907 }
908
909 /* src_dev and dest_dev should be 'MEMORY' or 'PERIPHERAL'. */
910 #define I2S_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \
911 .dir = { \
912 .dev_dma = DEVICE_DT_GET(STM32_DMA_CTLR(index, dir)), \
913 .dma_channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \
914 .dma_cfg = { \
915 .block_count = 2, \
916 .dma_slot = STM32_DMA_SLOT(index, dir, slot),\
917 .channel_direction = src_dev##_TO_##dest_dev, \
918 .source_data_size = 2, /* 16bit default */ \
919 .dest_data_size = 2, /* 16bit default */ \
920 .source_burst_length = 1, /* SINGLE transfer */ \
921 .dest_burst_length = 1, \
922 .channel_priority = STM32_DMA_CONFIG_PRIORITY( \
923 STM32_DMA_CHANNEL_CONFIG(index, dir)),\
924 .dma_callback = dma_##dir##_callback, \
925 }, \
926 .src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC( \
927 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
928 .dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC( \
929 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
930 .fifo_threshold = STM32_DMA_FEATURES_FIFO_THRESHOLD( \
931 STM32_DMA_FEATURES(index, dir)), \
932 .stream_start = dir##_stream_start, \
933 .stream_disable = dir##_stream_disable, \
934 .msgq = &dir##_##index##_queue, \
935 }
936
937 #define I2S_STM32_INIT(index) \
938 \
939 static void i2s_stm32_irq_config_func_##index(const struct device *dev);\
940 \
941 PINCTRL_DT_INST_DEFINE(index); \
942 \
943 static const struct stm32_pclken clk_##index[] = \
944 STM32_DT_INST_CLOCKS(index); \
945 \
946 static const struct i2s_stm32_cfg i2s_stm32_config_##index = { \
947 .i2s = (SPI_TypeDef *)DT_INST_REG_ADDR(index), \
948 .pclken = clk_##index, \
949 .pclk_len = DT_INST_NUM_CLOCKS(index), \
950 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \
951 .irq_config = i2s_stm32_irq_config_func_##index, \
952 .master_clk_sel = DT_INST_PROP(index, mck_enabled) \
953 }; \
954 \
955 K_MSGQ_DEFINE(rx_##index##_queue, sizeof(struct queue_item), CONFIG_I2S_STM32_RX_BLOCK_COUNT, 4);\
956 K_MSGQ_DEFINE(tx_##index##_queue, sizeof(struct queue_item), CONFIG_I2S_STM32_TX_BLOCK_COUNT, 4);\
957 \
958 static struct i2s_stm32_data i2s_stm32_data_##index = { \
959 UTIL_AND(DT_INST_DMAS_HAS_NAME(index, rx), \
960 I2S_DMA_CHANNEL_INIT(index, rx, RX, PERIPHERAL, MEMORY)),\
961 UTIL_AND(DT_INST_DMAS_HAS_NAME(index, tx), \
962 I2S_DMA_CHANNEL_INIT(index, tx, TX, MEMORY, PERIPHERAL)),\
963 }; \
964 DEVICE_DT_INST_DEFINE(index, \
965 &i2s_stm32_initialize, NULL, \
966 &i2s_stm32_data_##index, \
967 &i2s_stm32_config_##index, POST_KERNEL, \
968 CONFIG_I2S_INIT_PRIORITY, &i2s_stm32_driver_api); \
969 \
970 static void i2s_stm32_irq_config_func_##index(const struct device *dev) \
971 { \
972 IRQ_CONNECT(DT_INST_IRQN(index), \
973 DT_INST_IRQ(index, priority), \
974 i2s_stm32_isr, DEVICE_DT_INST_GET(index), 0); \
975 irq_enable(DT_INST_IRQN(index)); \
976 }
977
978 DT_INST_FOREACH_STATUS_OKAY(I2S_STM32_INIT)
979