1 /*
2 * Copyright (c) 2018 STMicroelectronics
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT st_stm32_i2s
8
9 #include <string.h>
10 #include <zephyr/drivers/dma.h>
11 #include <zephyr/drivers/i2s.h>
12 #include <zephyr/drivers/dma/dma_stm32.h>
13 #include <soc.h>
14 #include <stm32_ll_rcc.h>
15 #include <stm32_ll_spi.h>
16 #include <zephyr/drivers/clock_control/stm32_clock_control.h>
17 #include <zephyr/drivers/clock_control.h>
18 #include <zephyr/drivers/pinctrl.h>
19 #include <zephyr/cache.h>
20
21 #include "i2s_ll_stm32.h"
22 #include <zephyr/logging/log.h>
23 #include <zephyr/irq.h>
24 LOG_MODULE_REGISTER(i2s_ll_stm32);
25
26 #define MODULO_INC(val, max) { val = (++val < max) ? val : 0; }
27
div_round_closest(uint32_t dividend,uint32_t divisor)28 static unsigned int div_round_closest(uint32_t dividend, uint32_t divisor)
29 {
30 return (dividend + (divisor / 2U)) / divisor;
31 }
32
queue_is_empty(struct ring_buf * rb)33 static bool queue_is_empty(struct ring_buf *rb)
34 {
35 unsigned int key;
36
37 key = irq_lock();
38
39 if (rb->tail != rb->head) {
40 /* Ring buffer is not empty */
41 irq_unlock(key);
42 return false;
43 }
44
45 irq_unlock(key);
46
47 return true;
48 }
49
50 /*
51 * Get data from the queue
52 */
queue_get(struct ring_buf * rb,void ** mem_block,size_t * size)53 static int queue_get(struct ring_buf *rb, void **mem_block, size_t *size)
54 {
55 unsigned int key;
56
57 key = irq_lock();
58
59 if (queue_is_empty(rb) == true) {
60 irq_unlock(key);
61 return -ENOMEM;
62 }
63
64 *mem_block = rb->buf[rb->tail].mem_block;
65 *size = rb->buf[rb->tail].size;
66 MODULO_INC(rb->tail, rb->len);
67
68 irq_unlock(key);
69
70 return 0;
71 }
72
73 /*
74 * Put data in the queue
75 */
queue_put(struct ring_buf * rb,void * mem_block,size_t size)76 static int queue_put(struct ring_buf *rb, void *mem_block, size_t size)
77 {
78 uint16_t head_next;
79 unsigned int key;
80
81 key = irq_lock();
82
83 head_next = rb->head;
84 MODULO_INC(head_next, rb->len);
85
86 if (head_next == rb->tail) {
87 /* Ring buffer is full */
88 irq_unlock(key);
89 return -ENOMEM;
90 }
91
92 rb->buf[rb->head].mem_block = mem_block;
93 rb->buf[rb->head].size = size;
94 rb->head = head_next;
95
96 irq_unlock(key);
97
98 return 0;
99 }
100
i2s_stm32_enable_clock(const struct device * dev)101 static int i2s_stm32_enable_clock(const struct device *dev)
102 {
103 const struct i2s_stm32_cfg *cfg = dev->config;
104 const struct device *clk;
105 int ret;
106
107 clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
108
109 if (!device_is_ready(clk)) {
110 LOG_ERR("clock control device not ready");
111 return -ENODEV;
112 }
113
114 ret = clock_control_on(clk, (clock_control_subsys_t)&cfg->pclken[0]);
115 if (ret != 0) {
116 LOG_ERR("Could not enable I2S clock");
117 return -EIO;
118 }
119
120 if (cfg->pclk_len > 1) {
121 /* Enable I2S clock source */
122 ret = clock_control_configure(clk,
123 (clock_control_subsys_t)&cfg->pclken[1],
124 NULL);
125 if (ret < 0) {
126 LOG_ERR("Could not configure I2S domain clock");
127 return -EIO;
128 }
129 }
130
131 return 0;
132 }
133
i2s_stm32_set_clock(const struct device * dev,uint32_t bit_clk_freq)134 static int i2s_stm32_set_clock(const struct device *dev,
135 uint32_t bit_clk_freq)
136 {
137 const struct i2s_stm32_cfg *cfg = dev->config;
138 uint32_t freq_in = 0U;
139 uint8_t i2s_div, i2s_odd;
140
141 if (cfg->pclk_len > 1) {
142 /* Handle multiple clock sources */
143 if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
144 (clock_control_subsys_t)&cfg->pclken[1],
145 &freq_in) < 0) {
146 LOG_ERR("Failed call clock_control_get_rate(pclken[1])");
147 return -EIO;
148 }
149 } else {
150 /* Handle single clock source */
151 if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
152 (clock_control_subsys_t)&cfg->pclken[0],
153 &freq_in) < 0) {
154 LOG_ERR("Failed call clock_control_get_rate(pclken[0])");
155 return -EIO;
156 }
157 }
158 /*
159 * The ratio between input clock (I2SxClk) and output
160 * clock on the pad (I2S_CK) is obtained using the
161 * following formula:
162 * (i2s_div * 2) + i2s_odd
163 */
164 i2s_div = div_round_closest(freq_in, bit_clk_freq);
165 i2s_odd = (i2s_div & 0x1) ? 1 : 0;
166 i2s_div >>= 1;
167
168 /* i2s_div == 0 || i2s_div == 1 are forbidden */
169 if (i2s_div < 2U) {
170 LOG_ERR("The linear prescaler value is unsupported");
171 return -EINVAL;
172 }
173
174 LOG_DBG("i2s_div: %d - i2s_odd: %d", i2s_div, i2s_odd);
175
176 LL_I2S_SetPrescalerLinear(cfg->i2s, i2s_div);
177 LL_I2S_SetPrescalerParity(cfg->i2s, i2s_odd);
178
179 return 0;
180 }
181
i2s_stm32_configure(const struct device * dev,enum i2s_dir dir,const struct i2s_config * i2s_cfg)182 static int i2s_stm32_configure(const struct device *dev, enum i2s_dir dir,
183 const struct i2s_config *i2s_cfg)
184 {
185 const struct i2s_stm32_cfg *const cfg = dev->config;
186 struct i2s_stm32_data *const dev_data = dev->data;
187 /* For words greater than 16-bit the channel length is considered 32-bit */
188 const uint32_t channel_length = i2s_cfg->word_size > 16U ? 32U : 16U;
189 /*
190 * comply with the i2s_config driver remark:
191 * When I2S data format is selected parameter channels is ignored,
192 * number of words in a frame is always 2.
193 */
194 const uint32_t num_channels =
195 ((i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) == I2S_FMT_DATA_FORMAT_I2S)
196 ? 2U
197 : i2s_cfg->channels;
198 struct stream *stream;
199 uint32_t bit_clk_freq;
200 bool enable_mck;
201 int ret;
202
203 if (dir == I2S_DIR_RX) {
204 stream = &dev_data->rx;
205 } else if (dir == I2S_DIR_TX) {
206 stream = &dev_data->tx;
207 } else if (dir == I2S_DIR_BOTH) {
208 return -ENOSYS;
209 } else {
210 LOG_ERR("Either RX or TX direction must be selected");
211 return -EINVAL;
212 }
213
214 if (stream->state != I2S_STATE_NOT_READY &&
215 stream->state != I2S_STATE_READY) {
216 LOG_ERR("invalid state");
217 return -EINVAL;
218 }
219
220 stream->master = true;
221 if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE ||
222 i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) {
223 stream->master = false;
224 }
225
226 if (i2s_cfg->frame_clk_freq == 0U) {
227 stream->queue_drop(stream);
228 memset(&stream->cfg, 0, sizeof(struct i2s_config));
229 stream->state = I2S_STATE_NOT_READY;
230 return 0;
231 }
232
233 memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config));
234
235 /* conditions to enable master clock output */
236 enable_mck = stream->master && cfg->master_clk_sel;
237
238 /* set I2S bitclock */
239 bit_clk_freq = i2s_cfg->frame_clk_freq *
240 channel_length * num_channels;
241
242 if (enable_mck) {
243 /*
244 * Compensate for the master clock dividers.
245 * MCK = N * CK, where N:
246 * 8 when the channel frame is 16-bit wide
247 * 4 when the channel frame is 32-bit wide
248 */
249 bit_clk_freq *= channel_length == 16U ? 4U * 2U : 4U;
250 }
251
252 ret = i2s_stm32_set_clock(dev, bit_clk_freq);
253 if (ret < 0) {
254 return ret;
255 }
256
257 /* set I2S Master Clock output in the MCK pin, enabled in the DT */
258 if (enable_mck) {
259 LL_I2S_EnableMasterClock(cfg->i2s);
260 } else {
261 LL_I2S_DisableMasterClock(cfg->i2s);
262 }
263
264 /*
265 * set I2S Data Format
266 * 16-bit data extended on 32-bit channel length excluded
267 */
268 if (i2s_cfg->word_size == 16U) {
269 LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_16B);
270 } else if (i2s_cfg->word_size == 24U) {
271 LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_24B);
272 } else if (i2s_cfg->word_size == 32U) {
273 LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_32B);
274 } else {
275 LOG_ERR("invalid word size");
276 return -EINVAL;
277 }
278
279 /* set I2S Standard */
280 switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) {
281 case I2S_FMT_DATA_FORMAT_I2S:
282 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PHILIPS);
283 break;
284
285 case I2S_FMT_DATA_FORMAT_PCM_SHORT:
286 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PCM_SHORT);
287 break;
288
289 case I2S_FMT_DATA_FORMAT_PCM_LONG:
290 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PCM_LONG);
291 break;
292
293 case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
294 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_MSB);
295 break;
296
297 case I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED:
298 LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_LSB);
299 break;
300
301 default:
302 LOG_ERR("Unsupported I2S data format");
303 return -EINVAL;
304 }
305
306 /* set I2S clock polarity */
307 if ((i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) == I2S_FMT_BIT_CLK_INV)
308 LL_I2S_SetClockPolarity(cfg->i2s, LL_I2S_POLARITY_HIGH);
309 else
310 LL_I2S_SetClockPolarity(cfg->i2s, LL_I2S_POLARITY_LOW);
311
312 stream->state = I2S_STATE_READY;
313 return 0;
314 }
315
i2s_stm32_trigger(const struct device * dev,enum i2s_dir dir,enum i2s_trigger_cmd cmd)316 static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir,
317 enum i2s_trigger_cmd cmd)
318 {
319 struct i2s_stm32_data *const dev_data = dev->data;
320 const struct i2s_stm32_cfg *const cfg = dev->config;
321 struct stream *stream;
322 unsigned int key;
323 int ret;
324
325 if (dir == I2S_DIR_RX) {
326 stream = &dev_data->rx;
327 } else if (dir == I2S_DIR_TX) {
328 stream = &dev_data->tx;
329 } else if (dir == I2S_DIR_BOTH) {
330 return -ENOSYS;
331 } else {
332 LOG_ERR("Either RX or TX direction must be selected");
333 return -EINVAL;
334 }
335
336 switch (cmd) {
337 case I2S_TRIGGER_START:
338 if (stream->state != I2S_STATE_READY) {
339 LOG_ERR("START trigger: invalid state %d",
340 stream->state);
341 return -EIO;
342 }
343
344 __ASSERT_NO_MSG(stream->mem_block == NULL);
345
346 ret = stream->stream_start(stream, dev);
347 if (ret < 0) {
348 LOG_ERR("START trigger failed %d", ret);
349 return ret;
350 }
351
352 stream->state = I2S_STATE_RUNNING;
353 stream->last_block = false;
354 break;
355
356 case I2S_TRIGGER_STOP:
357 key = irq_lock();
358 if (stream->state != I2S_STATE_RUNNING) {
359 irq_unlock(key);
360 LOG_ERR("STOP trigger: invalid state");
361 return -EIO;
362 }
363 do_trigger_stop:
364 if (ll_func_i2s_dma_busy(cfg->i2s)) {
365 stream->state = I2S_STATE_STOPPING;
366 /*
367 * Indicate that the transition to I2S_STATE_STOPPING
368 * is triggered by STOP command
369 */
370 stream->tx_stop_for_drain = false;
371 } else {
372 stream->stream_disable(stream, dev);
373 stream->state = I2S_STATE_READY;
374 stream->last_block = true;
375 }
376 irq_unlock(key);
377 break;
378
379 case I2S_TRIGGER_DRAIN:
380 key = irq_lock();
381 if (stream->state != I2S_STATE_RUNNING) {
382 irq_unlock(key);
383 LOG_ERR("DRAIN trigger: invalid state");
384 return -EIO;
385 }
386
387 if (dir == I2S_DIR_TX) {
388 if ((queue_is_empty(&stream->mem_block_queue) == false) ||
389 (ll_func_i2s_dma_busy(cfg->i2s))) {
390 stream->state = I2S_STATE_STOPPING;
391 /*
392 * Indicate that the transition to I2S_STATE_STOPPING
393 * is triggered by DRAIN command
394 */
395 stream->tx_stop_for_drain = true;
396 } else {
397 stream->stream_disable(stream, dev);
398 stream->state = I2S_STATE_READY;
399 }
400 } else if (dir == I2S_DIR_RX) {
401 goto do_trigger_stop;
402 } else {
403 LOG_ERR("Unavailable direction");
404 return -EINVAL;
405 }
406 irq_unlock(key);
407 break;
408
409 case I2S_TRIGGER_DROP:
410 if (stream->state == I2S_STATE_NOT_READY) {
411 LOG_ERR("DROP trigger: invalid state");
412 return -EIO;
413 }
414 stream->stream_disable(stream, dev);
415 stream->queue_drop(stream);
416 stream->state = I2S_STATE_READY;
417 break;
418
419 case I2S_TRIGGER_PREPARE:
420 if (stream->state != I2S_STATE_ERROR) {
421 LOG_ERR("PREPARE trigger: invalid state");
422 return -EIO;
423 }
424 stream->state = I2S_STATE_READY;
425 stream->queue_drop(stream);
426 break;
427
428 default:
429 LOG_ERR("Unsupported trigger command");
430 return -EINVAL;
431 }
432
433 return 0;
434 }
435
i2s_stm32_read(const struct device * dev,void ** mem_block,size_t * size)436 static int i2s_stm32_read(const struct device *dev, void **mem_block,
437 size_t *size)
438 {
439 struct i2s_stm32_data *const dev_data = dev->data;
440 int ret;
441
442 if (dev_data->rx.state == I2S_STATE_NOT_READY) {
443 LOG_DBG("invalid state");
444 return -EIO;
445 }
446
447 if (dev_data->rx.state != I2S_STATE_ERROR) {
448 ret = k_sem_take(&dev_data->rx.sem,
449 SYS_TIMEOUT_MS(dev_data->rx.cfg.timeout));
450 if (ret < 0) {
451 return ret;
452 }
453 }
454
455 /* Get data from the beginning of RX queue */
456 ret = queue_get(&dev_data->rx.mem_block_queue, mem_block, size);
457 if (ret < 0) {
458 return -EIO;
459 }
460
461 return 0;
462 }
463
i2s_stm32_write(const struct device * dev,void * mem_block,size_t size)464 static int i2s_stm32_write(const struct device *dev, void *mem_block,
465 size_t size)
466 {
467 struct i2s_stm32_data *const dev_data = dev->data;
468 int ret;
469
470 if (dev_data->tx.state != I2S_STATE_RUNNING &&
471 dev_data->tx.state != I2S_STATE_READY) {
472 LOG_DBG("invalid state");
473 return -EIO;
474 }
475
476 ret = k_sem_take(&dev_data->tx.sem,
477 SYS_TIMEOUT_MS(dev_data->tx.cfg.timeout));
478 if (ret < 0) {
479 return ret;
480 }
481
482 /* Add data to the end of the TX queue */
483 return queue_put(&dev_data->tx.mem_block_queue, mem_block, size);
484 }
485
486 static DEVICE_API(i2s, i2s_stm32_driver_api) = {
487 .configure = i2s_stm32_configure,
488 .read = i2s_stm32_read,
489 .write = i2s_stm32_write,
490 .trigger = i2s_stm32_trigger,
491 };
492
493 #define STM32_DMA_NUM_CHANNELS 8
494 static const struct device *active_dma_rx_channel[STM32_DMA_NUM_CHANNELS];
495 static const struct device *active_dma_tx_channel[STM32_DMA_NUM_CHANNELS];
496
reload_dma(const struct device * dev_dma,uint32_t channel,struct dma_config * dcfg,void * src,void * dst,uint32_t blk_size)497 static int reload_dma(const struct device *dev_dma, uint32_t channel,
498 struct dma_config *dcfg, void *src, void *dst,
499 uint32_t blk_size)
500 {
501 int ret;
502
503 ret = dma_reload(dev_dma, channel, (uint32_t)src, (uint32_t)dst, blk_size);
504 if (ret < 0) {
505 return ret;
506 }
507
508 ret = dma_start(dev_dma, channel);
509
510 return ret;
511 }
512
start_dma(const struct device * dev_dma,uint32_t channel,struct dma_config * dcfg,void * src,bool src_addr_increment,void * dst,bool dst_addr_increment,uint8_t fifo_threshold,uint32_t blk_size)513 static int start_dma(const struct device *dev_dma, uint32_t channel,
514 struct dma_config *dcfg, void *src,
515 bool src_addr_increment, void *dst,
516 bool dst_addr_increment, uint8_t fifo_threshold,
517 uint32_t blk_size)
518 {
519 struct dma_block_config blk_cfg;
520 int ret;
521
522 memset(&blk_cfg, 0, sizeof(blk_cfg));
523 blk_cfg.block_size = blk_size;
524 blk_cfg.source_address = (uint32_t)src;
525 blk_cfg.dest_address = (uint32_t)dst;
526 if (src_addr_increment) {
527 blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
528 } else {
529 blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
530 }
531 if (dst_addr_increment) {
532 blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
533 } else {
534 blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
535 }
536 blk_cfg.fifo_mode_control = fifo_threshold;
537
538 dcfg->head_block = &blk_cfg;
539
540 ret = dma_config(dev_dma, channel, dcfg);
541 if (ret < 0) {
542 return ret;
543 }
544
545 ret = dma_start(dev_dma, channel);
546
547 return ret;
548 }
549
550 static const struct device *get_dev_from_rx_dma_channel(uint32_t dma_channel);
551 static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel);
552 static void rx_stream_disable(struct stream *stream, const struct device *dev);
553 static void tx_stream_disable(struct stream *stream, const struct device *dev);
554
555 /* This function is executed in the interrupt context */
dma_rx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)556 static void dma_rx_callback(const struct device *dma_dev, void *arg,
557 uint32_t channel, int status)
558 {
559 const struct device *dev = get_dev_from_rx_dma_channel(channel);
560 const struct i2s_stm32_cfg *cfg = dev->config;
561 struct i2s_stm32_data *const dev_data = dev->data;
562 struct stream *stream = &dev_data->rx;
563 void *mblk_tmp;
564 int ret;
565
566 if (status < 0) {
567 ret = -EIO;
568 stream->state = I2S_STATE_ERROR;
569 goto rx_disable;
570 }
571
572 __ASSERT_NO_MSG(stream->mem_block != NULL);
573
574 /* Stop reception if there was an error */
575 if (stream->state == I2S_STATE_ERROR) {
576 goto rx_disable;
577 }
578
579 mblk_tmp = stream->mem_block;
580
581 /* Prepare to receive the next data block */
582 ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block,
583 K_NO_WAIT);
584 if (ret < 0) {
585 stream->state = I2S_STATE_ERROR;
586 goto rx_disable;
587 }
588
589 ret = reload_dma(stream->dev_dma, stream->dma_channel,
590 &stream->dma_cfg,
591 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
592 (void *)LL_SPI_DMA_GetRxRegAddr(cfg->i2s),
593 #else
594 (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s),
595 #endif
596 stream->mem_block,
597 stream->cfg.block_size);
598 if (ret < 0) {
599 LOG_DBG("Failed to start RX DMA transfer: %d", ret);
600 goto rx_disable;
601 }
602
603 /* Assure cache coherency after DMA write operation */
604 sys_cache_data_invd_range(mblk_tmp, stream->cfg.block_size);
605
606 /* All block data received */
607 ret = queue_put(&stream->mem_block_queue, mblk_tmp,
608 stream->cfg.block_size);
609 if (ret < 0) {
610 stream->state = I2S_STATE_ERROR;
611 goto rx_disable;
612 }
613 k_sem_give(&stream->sem);
614
615 /* Stop reception if we were requested */
616 if (stream->state == I2S_STATE_STOPPING) {
617 stream->state = I2S_STATE_READY;
618 goto rx_disable;
619 }
620
621 return;
622
623 rx_disable:
624 rx_stream_disable(stream, dev);
625 }
626
dma_tx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)627 static void dma_tx_callback(const struct device *dma_dev, void *arg,
628 uint32_t channel, int status)
629 {
630 const struct device *dev = get_dev_from_tx_dma_channel(channel);
631 const struct i2s_stm32_cfg *cfg = dev->config;
632 struct i2s_stm32_data *const dev_data = dev->data;
633 struct stream *stream = &dev_data->tx;
634 size_t mem_block_size;
635 int ret;
636
637 if (status < 0) {
638 ret = -EIO;
639 stream->state = I2S_STATE_ERROR;
640 goto tx_disable;
641 }
642
643 __ASSERT_NO_MSG(stream->mem_block != NULL);
644
645 /* All block data sent */
646 k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
647 stream->mem_block = NULL;
648
649 /* Stop transmission if there was an error */
650 if (stream->state == I2S_STATE_ERROR) {
651 LOG_ERR("TX error detected");
652 goto tx_disable;
653 }
654
655 /* Check if we finished transferring one block and stopping is requested */
656 if ((stream->state == I2S_STATE_STOPPING) && (status == DMA_STATUS_COMPLETE)) {
657 /*
658 * Check if all tx samples have been completely handled
659 * as stated in zephyr i2s specification, in case of DRAIN command
660 * send all data in the transmit queue and stop the transmission.
661 */
662 if (queue_is_empty(&stream->mem_block_queue) == true) {
663 stream->queue_drop(stream);
664 stream->state = I2S_STATE_READY;
665 goto tx_disable;
666 } else if (stream->tx_stop_for_drain == false) {
667 /*
668 * In case of STOP command, just stop the transmission
669 * at the current. The transmission can be resumed.
670 */
671 stream->state = I2S_STATE_READY;
672 goto tx_disable;
673 }
674 /* else: DRAIN trigger -> continue TX normally until queue is empty */
675 }
676
677 /* Stop transmission if we were requested */
678 if (stream->last_block) {
679 stream->state = I2S_STATE_READY;
680 goto tx_disable;
681 }
682
683 /* Prepare to send the next data block */
684 ret = queue_get(&stream->mem_block_queue, &stream->mem_block,
685 &mem_block_size);
686 if (ret < 0) {
687 if (stream->state == I2S_STATE_STOPPING) {
688 stream->state = I2S_STATE_READY;
689 } else {
690 stream->state = I2S_STATE_ERROR;
691 }
692 goto tx_disable;
693 }
694 k_sem_give(&stream->sem);
695
696 /* Assure cache coherency before DMA read operation */
697 sys_cache_data_flush_range(stream->mem_block, mem_block_size);
698
699 ret = reload_dma(stream->dev_dma, stream->dma_channel,
700 &stream->dma_cfg,
701 stream->mem_block,
702 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
703 (void *)LL_SPI_DMA_GetTxRegAddr(cfg->i2s),
704 #else
705 (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s),
706 #endif
707 mem_block_size);
708 if (ret < 0) {
709 LOG_DBG("Failed to start TX DMA transfer: %d", ret);
710 goto tx_disable;
711 }
712
713 return;
714
715 tx_disable:
716 tx_stream_disable(stream, dev);
717 }
718
719 static uint32_t i2s_stm32_irq_count;
720 static uint32_t i2s_stm32_irq_ovr_count;
721 static uint32_t i2s_stm32_irq_udr_count;
722
i2s_stm32_isr(const struct device * dev)723 static void i2s_stm32_isr(const struct device *dev)
724 {
725 const struct i2s_stm32_cfg *cfg = dev->config;
726
727 /* OVR error must be explicitly cleared */
728 if (LL_I2S_IsActiveFlag_OVR(cfg->i2s)) {
729 i2s_stm32_irq_ovr_count++;
730 LL_I2S_ClearFlag_OVR(cfg->i2s);
731 }
732
733 /* NOTE: UDR error must be explicitly cleared on STM32H7 */
734 if (LL_I2S_IsActiveFlag_UDR(cfg->i2s)) {
735 i2s_stm32_irq_udr_count++;
736 LL_I2S_ClearFlag_UDR(cfg->i2s);
737 }
738
739 i2s_stm32_irq_count++;
740 }
741
i2s_stm32_initialize(const struct device * dev)742 static int i2s_stm32_initialize(const struct device *dev)
743 {
744 const struct i2s_stm32_cfg *cfg = dev->config;
745 struct i2s_stm32_data *const dev_data = dev->data;
746 struct stream *stream = &dev_data->tx;
747 int ret, i;
748
749 /* Initialize the variable used to handle the TX */
750 stream->tx_stop_for_drain = false;
751
752 /* Enable I2S clock propagation */
753 ret = i2s_stm32_enable_clock(dev);
754 if (ret < 0) {
755 LOG_ERR("%s: clock enabling failed: %d", __func__, ret);
756 return -EIO;
757 }
758
759 /* Configure dt provided device signals when available */
760 ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
761 if (ret < 0) {
762 LOG_ERR("I2S pinctrl setup failed (%d)", ret);
763 return ret;
764 }
765
766 cfg->irq_config(dev);
767
768 k_sem_init(&dev_data->rx.sem, 0, CONFIG_I2S_STM32_RX_BLOCK_COUNT);
769 k_sem_init(&dev_data->tx.sem, CONFIG_I2S_STM32_TX_BLOCK_COUNT,
770 CONFIG_I2S_STM32_TX_BLOCK_COUNT);
771
772 for (i = 0; i < STM32_DMA_NUM_CHANNELS; i++) {
773 active_dma_rx_channel[i] = NULL;
774 active_dma_tx_channel[i] = NULL;
775 }
776
777 /* Get the binding to the DMA device */
778 if (!device_is_ready(dev_data->tx.dev_dma)) {
779 LOG_ERR("%s device not ready", dev_data->tx.dev_dma->name);
780 return -ENODEV;
781 }
782 if (!device_is_ready(dev_data->rx.dev_dma)) {
783 LOG_ERR("%s device not ready", dev_data->rx.dev_dma->name);
784 return -ENODEV;
785 }
786
787 LOG_INF("%s inited", dev->name);
788
789 return 0;
790 }
791
rx_stream_start(struct stream * stream,const struct device * dev)792 static int rx_stream_start(struct stream *stream, const struct device *dev)
793 {
794 const struct i2s_stm32_cfg *cfg = dev->config;
795 int ret;
796
797 ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block,
798 K_NO_WAIT);
799 if (ret < 0) {
800 return ret;
801 }
802
803 if (stream->master) {
804 LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_RX);
805 } else {
806 LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_RX);
807 }
808
809 /* remember active RX DMA channel (used in callback) */
810 active_dma_rx_channel[stream->dma_channel] = dev;
811
812 ret = start_dma(stream->dev_dma, stream->dma_channel,
813 &stream->dma_cfg,
814 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
815 (void *)LL_SPI_DMA_GetRxRegAddr(cfg->i2s),
816 #else
817 (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s),
818 #endif
819 stream->src_addr_increment, stream->mem_block,
820 stream->dst_addr_increment, stream->fifo_threshold,
821 stream->cfg.block_size);
822 if (ret < 0) {
823 LOG_ERR("Failed to start RX DMA transfer: %d", ret);
824 return ret;
825 }
826
827 LL_I2S_EnableDMAReq_RX(cfg->i2s);
828
829 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
830 LL_I2S_EnableIT_OVR(cfg->i2s);
831 LL_I2S_EnableIT_UDR(cfg->i2s);
832 LL_I2S_EnableIT_FRE(cfg->i2s);
833 LL_I2S_Enable(cfg->i2s);
834 LL_SPI_StartMasterTransfer(cfg->i2s);
835 #else
836 LL_I2S_EnableIT_ERR(cfg->i2s);
837 LL_I2S_Enable(cfg->i2s);
838 #endif
839
840
841 return 0;
842 }
843
tx_stream_start(struct stream * stream,const struct device * dev)844 static int tx_stream_start(struct stream *stream, const struct device *dev)
845 {
846 const struct i2s_stm32_cfg *cfg = dev->config;
847 size_t mem_block_size;
848 int ret;
849
850 ret = queue_get(&stream->mem_block_queue, &stream->mem_block,
851 &mem_block_size);
852 if (ret < 0) {
853 return ret;
854 }
855 k_sem_give(&stream->sem);
856
857 /* Assure cache coherency before DMA read operation */
858 sys_cache_data_flush_range(stream->mem_block, mem_block_size);
859
860 if (stream->master) {
861 LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_TX);
862 } else {
863 LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_TX);
864 }
865
866 /* remember active TX DMA channel (used in callback) */
867 active_dma_tx_channel[stream->dma_channel] = dev;
868
869 ret = start_dma(stream->dev_dma, stream->dma_channel,
870 &stream->dma_cfg,
871 stream->mem_block, stream->src_addr_increment,
872 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
873 (void *)LL_SPI_DMA_GetTxRegAddr(cfg->i2s),
874 #else
875 (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s),
876 #endif
877 stream->dst_addr_increment, stream->fifo_threshold,
878 stream->cfg.block_size);
879 if (ret < 0) {
880 LOG_ERR("Failed to start TX DMA transfer: %d", ret);
881 return ret;
882 }
883
884 LL_I2S_EnableDMAReq_TX(cfg->i2s);
885
886 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
887 LL_I2S_EnableIT_OVR(cfg->i2s);
888 LL_I2S_EnableIT_UDR(cfg->i2s);
889 LL_I2S_EnableIT_FRE(cfg->i2s);
890
891 LL_I2S_Enable(cfg->i2s);
892 LL_SPI_StartMasterTransfer(cfg->i2s);
893 #else
894 LL_I2S_EnableIT_ERR(cfg->i2s);
895 LL_I2S_Enable(cfg->i2s);
896 #endif
897
898 return 0;
899 }
900
rx_stream_disable(struct stream * stream,const struct device * dev)901 static void rx_stream_disable(struct stream *stream, const struct device *dev)
902 {
903 const struct i2s_stm32_cfg *cfg = dev->config;
904
905 LL_I2S_DisableDMAReq_RX(cfg->i2s);
906 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
907 LL_I2S_DisableIT_OVR(cfg->i2s);
908 LL_I2S_DisableIT_UDR(cfg->i2s);
909 LL_I2S_DisableIT_FRE(cfg->i2s);
910 #else
911 LL_I2S_DisableIT_ERR(cfg->i2s);
912 #endif
913
914 dma_stop(stream->dev_dma, stream->dma_channel);
915 if (stream->mem_block != NULL) {
916 k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
917 stream->mem_block = NULL;
918 }
919
920 LL_I2S_Disable(cfg->i2s);
921
922 active_dma_rx_channel[stream->dma_channel] = NULL;
923 }
924
tx_stream_disable(struct stream * stream,const struct device * dev)925 static void tx_stream_disable(struct stream *stream, const struct device *dev)
926 {
927 const struct i2s_stm32_cfg *cfg = dev->config;
928
929 LL_I2S_DisableDMAReq_TX(cfg->i2s);
930 #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32h7_i2s)
931 LL_I2S_DisableIT_OVR(cfg->i2s);
932 LL_I2S_DisableIT_UDR(cfg->i2s);
933 LL_I2S_DisableIT_FRE(cfg->i2s);
934 #else
935 LL_I2S_DisableIT_ERR(cfg->i2s);
936 #endif
937
938 dma_stop(stream->dev_dma, stream->dma_channel);
939 if (stream->mem_block != NULL) {
940 k_mem_slab_free(stream->cfg.mem_slab, stream->mem_block);
941 stream->mem_block = NULL;
942 }
943
944 /* Wait for TX queue to drain before disabling */
945 k_busy_wait(100);
946 LL_I2S_Disable(cfg->i2s);
947
948 active_dma_tx_channel[stream->dma_channel] = NULL;
949 }
950
rx_queue_drop(struct stream * stream)951 static void rx_queue_drop(struct stream *stream)
952 {
953 size_t size;
954 void *mem_block;
955
956 while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) {
957 k_mem_slab_free(stream->cfg.mem_slab, mem_block);
958 }
959
960 k_sem_reset(&stream->sem);
961 }
962
tx_queue_drop(struct stream * stream)963 static void tx_queue_drop(struct stream *stream)
964 {
965 size_t size;
966 void *mem_block;
967 unsigned int n = 0U;
968
969 while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) {
970 k_mem_slab_free(stream->cfg.mem_slab, mem_block);
971 n++;
972 }
973
974 for (; n > 0; n--) {
975 k_sem_give(&stream->sem);
976 }
977 }
978
get_dev_from_rx_dma_channel(uint32_t dma_channel)979 static const struct device *get_dev_from_rx_dma_channel(uint32_t dma_channel)
980 {
981 return active_dma_rx_channel[dma_channel];
982 }
983
get_dev_from_tx_dma_channel(uint32_t dma_channel)984 static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel)
985 {
986 return active_dma_tx_channel[dma_channel];
987 }
988
989 /* src_dev and dest_dev should be 'MEMORY' or 'PERIPHERAL'. */
990 #define I2S_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \
991 .dir = { \
992 .dev_dma = DEVICE_DT_GET(STM32_DMA_CTLR(index, dir)), \
993 .dma_channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \
994 .dma_cfg = { \
995 .block_count = 2, \
996 .dma_slot = STM32_DMA_SLOT(index, dir, slot),\
997 .channel_direction = src_dev##_TO_##dest_dev, \
998 .source_data_size = 2, /* 16bit default */ \
999 .dest_data_size = 2, /* 16bit default */ \
1000 .source_burst_length = 1, /* SINGLE transfer */ \
1001 .dest_burst_length = 1, \
1002 .channel_priority = STM32_DMA_CONFIG_PRIORITY( \
1003 STM32_DMA_CHANNEL_CONFIG(index, dir)),\
1004 .dma_callback = dma_##dir##_callback, \
1005 }, \
1006 .src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC( \
1007 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
1008 .dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC( \
1009 STM32_DMA_CHANNEL_CONFIG(index, dir)), \
1010 .fifo_threshold = STM32_DMA_FEATURES_FIFO_THRESHOLD( \
1011 STM32_DMA_FEATURES(index, dir)), \
1012 .stream_start = dir##_stream_start, \
1013 .stream_disable = dir##_stream_disable, \
1014 .queue_drop = dir##_queue_drop, \
1015 .mem_block_queue.buf = dir##_##index##_ring_buf, \
1016 .mem_block_queue.len = ARRAY_SIZE(dir##_##index##_ring_buf) \
1017 }
1018
1019 #define I2S_STM32_INIT(index) \
1020 \
1021 static void i2s_stm32_irq_config_func_##index(const struct device *dev);\
1022 \
1023 PINCTRL_DT_INST_DEFINE(index); \
1024 \
1025 static const struct stm32_pclken clk_##index[] = \
1026 STM32_DT_INST_CLOCKS(index); \
1027 \
1028 static const struct i2s_stm32_cfg i2s_stm32_config_##index = { \
1029 .i2s = (SPI_TypeDef *)DT_INST_REG_ADDR(index), \
1030 .pclken = clk_##index, \
1031 .pclk_len = DT_INST_NUM_CLOCKS(index), \
1032 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \
1033 .irq_config = i2s_stm32_irq_config_func_##index, \
1034 .master_clk_sel = DT_INST_PROP(index, mck_enabled) \
1035 }; \
1036 \
1037 struct queue_item rx_##index##_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1];\
1038 struct queue_item tx_##index##_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1];\
1039 \
1040 static struct i2s_stm32_data i2s_stm32_data_##index = { \
1041 UTIL_AND(DT_INST_DMAS_HAS_NAME(index, rx), \
1042 I2S_DMA_CHANNEL_INIT(index, rx, RX, PERIPHERAL, MEMORY)),\
1043 UTIL_AND(DT_INST_DMAS_HAS_NAME(index, tx), \
1044 I2S_DMA_CHANNEL_INIT(index, tx, TX, MEMORY, PERIPHERAL)),\
1045 }; \
1046 DEVICE_DT_INST_DEFINE(index, \
1047 &i2s_stm32_initialize, NULL, \
1048 &i2s_stm32_data_##index, \
1049 &i2s_stm32_config_##index, POST_KERNEL, \
1050 CONFIG_I2S_INIT_PRIORITY, &i2s_stm32_driver_api); \
1051 \
1052 static void i2s_stm32_irq_config_func_##index(const struct device *dev) \
1053 { \
1054 IRQ_CONNECT(DT_INST_IRQN(index), \
1055 DT_INST_IRQ(index, priority), \
1056 i2s_stm32_isr, DEVICE_DT_INST_GET(index), 0); \
1057 irq_enable(DT_INST_IRQN(index)); \
1058 }
1059
1060 DT_INST_FOREACH_STATUS_OKAY(I2S_STM32_INIT)
1061