1 /*
2 * Copyright (c) 2024 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT espressif_esp32_i2s
8
9 #include <zephyr/drivers/dma.h>
10 #include <zephyr/drivers/i2s.h>
11 #include <zephyr/drivers/dma/dma_esp32.h>
12 #include <soc.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/drivers/pinctrl.h>
15
16 #include <zephyr/logging/log.h>
17 #include <zephyr/irq.h>
18
19 #include <esp_clk_tree.h>
20 #include <hal/i2s_hal.h>
21
22 LOG_MODULE_REGISTER(i2s_esp32, CONFIG_I2S_LOG_LEVEL);
23
24 #if !SOC_GDMA_SUPPORTED
25 #error "Only SoCs with GDMA peripheral are supported!"
26 #endif
27
28 #define I2S_ESP32_CLK_SRC I2S_CLK_SRC_DEFAULT
29
30 struct queue_item {
31 void *buffer;
32 size_t size;
33 };
34
35 struct i2s_esp32_stream {
36 int32_t state;
37 struct i2s_config i2s_cfg;
38 bool is_slave;
39
40 const struct device *dma_dev;
41 uint32_t dma_channel;
42
43 bool stop_without_draining;
44
45 void *mem_block;
46 size_t mem_block_len;
47 bool last_block;
48
49 struct k_msgq queue;
50 void (*queue_drop)(struct i2s_esp32_stream *stream);
51
52 int (*start_transfer)(const struct device *dev);
53 void (*stop_transfer)(const struct device *dev);
54 };
55
56 struct i2s_esp32_cfg {
57 const int unit;
58 i2s_hal_context_t hal_cxt;
59 const struct pinctrl_dev_config *pcfg;
60 const struct device *clock_dev;
61 clock_control_subsys_t clock_subsys;
62 };
63
64 struct i2s_esp32_data {
65 i2s_hal_clock_info_t clk_info;
66 struct i2s_esp32_stream rx;
67 struct i2s_esp32_stream tx;
68 };
69
i2s_esp32_get_source_clk_freq(i2s_clock_src_t clk_src)70 uint32_t i2s_esp32_get_source_clk_freq(i2s_clock_src_t clk_src)
71 {
72 uint32_t clk_freq = 0;
73
74 esp_clk_tree_src_get_freq_hz(clk_src, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &clk_freq);
75 return clk_freq;
76 }
77
i2s_esp32_calculate_clock(const struct i2s_config * i2s_cfg,uint8_t channel_length,i2s_hal_clock_info_t * i2s_hal_clock_info)78 static esp_err_t i2s_esp32_calculate_clock(const struct i2s_config *i2s_cfg, uint8_t channel_length,
79 i2s_hal_clock_info_t *i2s_hal_clock_info)
80 {
81 uint16_t mclk_multiple = 256;
82
83 if (i2s_cfg == NULL) {
84 LOG_ERR("Input i2s_cfg is NULL");
85 return ESP_ERR_INVALID_ARG;
86 }
87
88 if (i2s_hal_clock_info == NULL) {
89 LOG_ERR("Input hal_clock_info is NULL");
90 return ESP_ERR_INVALID_ARG;
91 }
92
93 if (i2s_cfg->word_size == 24) {
94 mclk_multiple = 384;
95 }
96
97 if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE ||
98 i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) {
99 i2s_hal_clock_info->bclk_div = 8;
100 i2s_hal_clock_info->bclk =
101 i2s_cfg->frame_clk_freq * i2s_cfg->channels * channel_length;
102 i2s_hal_clock_info->mclk = i2s_cfg->frame_clk_freq * i2s_hal_clock_info->bclk_div;
103 } else {
104 i2s_hal_clock_info->bclk =
105 i2s_cfg->frame_clk_freq * i2s_cfg->channels * channel_length;
106 i2s_hal_clock_info->mclk = i2s_cfg->frame_clk_freq * mclk_multiple;
107 i2s_hal_clock_info->bclk_div = i2s_hal_clock_info->mclk / i2s_hal_clock_info->bclk;
108 }
109
110 i2s_hal_clock_info->sclk = i2s_esp32_get_source_clk_freq(I2S_ESP32_CLK_SRC);
111 i2s_hal_clock_info->mclk_div = i2s_hal_clock_info->sclk / i2s_hal_clock_info->mclk;
112 if (i2s_hal_clock_info->mclk_div == 0) {
113 LOG_ERR("Sample rate is too large for the current clock source");
114 return ESP_ERR_INVALID_ARG;
115 }
116
117 return ESP_OK;
118 }
119
i2s_esp32_queue_drop(struct i2s_esp32_stream * stream)120 static void i2s_esp32_queue_drop(struct i2s_esp32_stream *stream)
121 {
122 struct queue_item item;
123
124 while (k_msgq_get(&stream->queue, &item, K_NO_WAIT) == 0) {
125 k_mem_slab_free(stream->i2s_cfg.mem_slab, item.buffer);
126 }
127 }
128
129 static void i2s_esp32_rx_callback(const struct device *dma_dev, void *arg, uint32_t channel,
130 int status);
131 static void i2s_esp32_tx_callback(const struct device *dma_dev, void *arg, uint32_t channel,
132 int status);
133
i2s_esp32_restart_dma(const struct device * dev,enum i2s_dir i2s_dir)134 static int i2s_esp32_restart_dma(const struct device *dev, enum i2s_dir i2s_dir)
135 {
136 const struct i2s_esp32_cfg *dev_cfg = dev->config;
137 struct i2s_esp32_data *const dev_data = dev->data;
138 struct i2s_esp32_stream *stream;
139 void *src = NULL, *dst = NULL;
140 int err;
141
142 if (i2s_dir == I2S_DIR_RX) {
143 stream = &dev_data->rx;
144 dst = stream->mem_block;
145 } else if (i2s_dir == I2S_DIR_TX) {
146 stream = &dev_data->tx;
147 src = stream->mem_block;
148 } else {
149 LOG_ERR("Invalid DMA direction");
150 return -EINVAL;
151 }
152
153 err = dma_reload(stream->dma_dev, stream->dma_channel, (uint32_t)src, (uint32_t)dst,
154 stream->mem_block_len);
155 if (err < 0) {
156 LOG_ERR("Error reloading DMA channel[%d]: %d", (int)stream->dma_channel, err);
157 return -EIO;
158 }
159
160 if (i2s_dir == I2S_DIR_RX) {
161 i2s_ll_rx_set_eof_num(dev_cfg->hal_cxt.dev, stream->mem_block_len);
162 }
163
164 err = dma_start(stream->dma_dev, stream->dma_channel);
165 if (err < 0) {
166 LOG_ERR("Error starting DMA channel[%d]: %d", (int)stream->dma_channel, err);
167 return -EIO;
168 }
169
170 return 0;
171 }
172
i2s_esp32_start_dma(const struct device * dev,enum i2s_dir i2s_dir)173 static int i2s_esp32_start_dma(const struct device *dev, enum i2s_dir i2s_dir)
174 {
175 const struct i2s_esp32_cfg *dev_cfg = dev->config;
176 struct i2s_esp32_data *const dev_data = dev->data;
177 struct i2s_esp32_stream *stream = NULL;
178 struct dma_config dma_cfg = {0};
179 struct dma_block_config dma_blk = {0};
180 unsigned int key;
181 int err, ret = 0;
182
183 if (i2s_dir == I2S_DIR_RX) {
184 stream = &dev_data->rx;
185 } else if (i2s_dir == I2S_DIR_TX) {
186 stream = &dev_data->tx;
187 } else {
188 LOG_ERR("Invalid DMA direction");
189 return -EINVAL;
190 }
191
192 key = irq_lock();
193
194 dma_blk.block_size = stream->mem_block_len;
195 if (i2s_dir == I2S_DIR_RX) {
196 dma_blk.dest_address = (uint32_t)stream->mem_block;
197 dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
198 dma_cfg.dma_callback = i2s_esp32_rx_callback;
199 } else {
200 dma_blk.source_address = (uint32_t)stream->mem_block;
201 dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
202 dma_cfg.dma_callback = i2s_esp32_tx_callback;
203 }
204 dma_cfg.user_data = (void *)dev;
205 dma_cfg.dma_slot =
206 dev_cfg->unit == 0 ? ESP_GDMA_TRIG_PERIPH_I2S0 : ESP_GDMA_TRIG_PERIPH_I2S1;
207 dma_cfg.block_count = 1;
208 dma_cfg.head_block = &dma_blk;
209
210 err = dma_config(stream->dma_dev, stream->dma_channel, &dma_cfg);
211 if (err < 0) {
212 LOG_ERR("Error configuring DMA channel[%d]: %d", (int)stream->dma_channel, err);
213 ret = -EINVAL;
214 goto unlock;
215 }
216
217 if (i2s_dir == I2S_DIR_RX) {
218 i2s_ll_rx_set_eof_num(dev_cfg->hal_cxt.dev, stream->mem_block_len);
219 }
220
221 err = dma_start(stream->dma_dev, stream->dma_channel);
222 if (err < 0) {
223 LOG_ERR("Error starting DMA channel[%d]: %d", (int)stream->dma_channel, err);
224 ret = -EIO;
225 goto unlock;
226 }
227
228 unlock:
229 irq_unlock(key);
230 return ret;
231 }
232
i2s_esp32_rx_start_transfer(const struct device * dev)233 static int i2s_esp32_rx_start_transfer(const struct device *dev)
234 {
235 struct i2s_esp32_data *const dev_data = dev->data;
236 const struct i2s_esp32_cfg *dev_cfg = dev->config;
237 struct i2s_esp32_stream *stream = &dev_data->rx;
238 const i2s_hal_context_t *hal_cxt = &dev_cfg->hal_cxt;
239 int err;
240
241 err = k_mem_slab_alloc(stream->i2s_cfg.mem_slab, &stream->mem_block, K_NO_WAIT);
242 if (err < 0) {
243 return -ENOMEM;
244 }
245 stream->mem_block_len = stream->i2s_cfg.block_size;
246
247 i2s_hal_rx_stop(hal_cxt);
248 i2s_hal_rx_reset(hal_cxt);
249 i2s_hal_rx_reset_fifo(hal_cxt);
250
251 err = i2s_esp32_start_dma(dev, I2S_DIR_RX);
252 if (err < 0) {
253 LOG_ERR("Failed to start RX DMA transfer: %d", err);
254 return -EIO;
255 }
256
257 i2s_hal_rx_start(hal_cxt);
258
259 return 0;
260 }
261
i2s_esp32_tx_start_transfer(const struct device * dev)262 static int i2s_esp32_tx_start_transfer(const struct device *dev)
263 {
264 const struct i2s_esp32_cfg *dev_cfg = dev->config;
265 struct i2s_esp32_data *const dev_data = dev->data;
266 struct i2s_esp32_stream *stream = &dev_data->tx;
267 const i2s_hal_context_t *hal_cxt = &dev_cfg->hal_cxt;
268 struct queue_item item;
269 int err;
270
271 err = k_msgq_get(&stream->queue, &item, K_NO_WAIT);
272 if (err < 0) {
273 return -ENOMEM;
274 }
275
276 stream->mem_block = item.buffer;
277 stream->mem_block_len = item.size;
278
279 i2s_hal_tx_stop(hal_cxt);
280 i2s_hal_tx_reset(hal_cxt);
281 i2s_hal_tx_reset_fifo(hal_cxt);
282
283 err = i2s_esp32_start_dma(dev, I2S_DIR_TX);
284 if (err < 0) {
285 LOG_ERR("Failed to start TX DMA transfer: %d", err);
286 return -EIO;
287 }
288
289 i2s_hal_tx_start(hal_cxt);
290
291 return 0;
292 }
293
i2s_esp32_rx_stop_transfer(const struct device * dev)294 static void i2s_esp32_rx_stop_transfer(const struct device *dev)
295 {
296 struct i2s_esp32_data *const dev_data = dev->data;
297 struct i2s_esp32_stream *stream = &dev_data->rx;
298
299 dma_stop(stream->dma_dev, stream->dma_channel);
300
301 if (stream->mem_block != NULL) {
302 k_mem_slab_free(stream->i2s_cfg.mem_slab, stream->mem_block);
303 stream->mem_block = NULL;
304 stream->mem_block_len = 0;
305 }
306 }
307
i2s_esp32_tx_stop_transfer(const struct device * dev)308 static void i2s_esp32_tx_stop_transfer(const struct device *dev)
309 {
310 struct i2s_esp32_data *const dev_data = dev->data;
311 struct i2s_esp32_stream *stream = &dev_data->tx;
312
313 dma_stop(stream->dma_dev, stream->dma_channel);
314
315 if (stream->mem_block != NULL) {
316 k_mem_slab_free(stream->i2s_cfg.mem_slab, stream->mem_block);
317 stream->mem_block = NULL;
318 stream->mem_block_len = 0;
319 }
320 }
321
i2s_esp32_rx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)322 static void i2s_esp32_rx_callback(const struct device *dma_dev, void *arg, uint32_t channel,
323 int status)
324 {
325 const struct device *dev = (const struct device *)arg;
326 struct i2s_esp32_data *const dev_data = dev->data;
327 struct i2s_esp32_stream *stream = &dev_data->rx;
328 int err;
329
330 if (status < 0) {
331 stream->state = I2S_STATE_ERROR;
332 LOG_ERR("RX status bad: %d", status);
333 goto rx_disable;
334 }
335
336 if (stream->mem_block == NULL) {
337 if (stream->state != I2S_STATE_READY) {
338 stream->state = I2S_STATE_ERROR;
339 LOG_ERR("RX mem_block NULL");
340 goto rx_disable;
341 } else {
342 return;
343 }
344 }
345
346 struct queue_item item = {.buffer = stream->mem_block, .size = stream->mem_block_len};
347
348 err = k_msgq_put(&stream->queue, &item, K_NO_WAIT);
349 if (err < 0) {
350 stream->state = I2S_STATE_ERROR;
351 goto rx_disable;
352 }
353
354 if (stream->state == I2S_STATE_STOPPING) {
355 stream->state = I2S_STATE_READY;
356 goto rx_disable;
357 }
358
359 err = k_mem_slab_alloc(stream->i2s_cfg.mem_slab, &stream->mem_block, K_NO_WAIT);
360 if (err < 0) {
361 stream->state = I2S_STATE_ERROR;
362 goto rx_disable;
363 }
364 stream->mem_block_len = stream->i2s_cfg.block_size;
365
366 err = i2s_esp32_restart_dma(dev, I2S_DIR_RX);
367 if (err < 0) {
368 stream->state = I2S_STATE_ERROR;
369 LOG_ERR("Failed to restart RX transfer: %d", err);
370 goto rx_disable;
371 }
372
373 return;
374
375 rx_disable:
376 stream->stop_transfer(dev);
377 }
378
i2s_esp32_tx_callback(const struct device * dma_dev,void * arg,uint32_t channel,int status)379 static void i2s_esp32_tx_callback(const struct device *dma_dev, void *arg, uint32_t channel,
380 int status)
381 {
382 const struct device *dev = (const struct device *)arg;
383 struct i2s_esp32_data *const dev_data = dev->data;
384 struct i2s_esp32_stream *stream = &dev_data->tx;
385 struct queue_item item;
386 void *mem_block_tmp;
387 int err;
388
389 if (status < 0) {
390 stream->state = I2S_STATE_ERROR;
391 LOG_ERR("TX bad status: %d", status);
392 goto tx_disable;
393 }
394
395 if (stream->mem_block == NULL) {
396 if (stream->state != I2S_STATE_READY) {
397 stream->state = I2S_STATE_ERROR;
398 LOG_ERR("TX mem_block NULL");
399 goto tx_disable;
400 } else {
401 return;
402 }
403 }
404
405 if (stream->state == I2S_STATE_STOPPING) {
406 if (k_msgq_num_used_get(&stream->queue) == 0) {
407 stream->state = I2S_STATE_READY;
408 goto tx_disable;
409 } else if (stream->stop_without_draining == true) {
410 stream->queue_drop(stream);
411 stream->state = I2S_STATE_READY;
412 goto tx_disable;
413 }
414 /*else: DRAIN trigger, so continue until queue is empty*/
415 }
416
417 if (stream->last_block) {
418 stream->state = I2S_STATE_READY;
419 goto tx_disable;
420 }
421
422 err = k_msgq_get(&stream->queue, &item, K_NO_WAIT);
423 if (err < 0) {
424 stream->state = I2S_STATE_ERROR;
425 goto tx_disable;
426 }
427
428 mem_block_tmp = stream->mem_block;
429
430 stream->mem_block = item.buffer;
431 stream->mem_block_len = item.size;
432
433 err = i2s_esp32_restart_dma(dev, I2S_DIR_TX);
434 if (err < 0) {
435 stream->state = I2S_STATE_ERROR;
436 LOG_ERR("Failed to restart TX transfer: %d", err);
437 goto tx_disable;
438 }
439
440 k_mem_slab_free(stream->i2s_cfg.mem_slab, mem_block_tmp);
441
442 return;
443
444 tx_disable:
445 stream->stop_transfer(dev);
446 }
447
i2s_esp32_initialize(const struct device * dev)448 static int i2s_esp32_initialize(const struct device *dev)
449 {
450 const struct i2s_esp32_cfg *dev_cfg = dev->config;
451 struct i2s_esp32_data *const dev_data = dev->data;
452 const struct device *clk_dev = dev_cfg->clock_dev;
453 int err;
454
455 if (dev_data->tx.dma_dev && !device_is_ready(dev_data->tx.dma_dev)) {
456 LOG_ERR("%s device not ready", dev_data->tx.dma_dev->name);
457 return -ENODEV;
458 }
459
460 if (dev_data->rx.dma_dev && !device_is_ready(dev_data->rx.dma_dev)) {
461 LOG_ERR("%s device not ready", dev_data->rx.dma_dev->name);
462 return -ENODEV;
463 }
464
465 if (!device_is_ready(clk_dev)) {
466 LOG_ERR("clock control device not ready");
467 return -ENODEV;
468 }
469
470 err = clock_control_on(clk_dev, dev_cfg->clock_subsys);
471 if (err != 0) {
472 LOG_ERR("Clock control enabling failed: %d", err);
473 return -EIO;
474 }
475
476 if (dev_data->tx.dma_dev) {
477 err = k_msgq_alloc_init(&dev_data->tx.queue, sizeof(struct queue_item),
478 CONFIG_I2S_ESP32_TX_BLOCK_COUNT);
479 if (err < 0) {
480 return err;
481 }
482 }
483
484 if (dev_data->rx.dma_dev) {
485 err = k_msgq_alloc_init(&dev_data->rx.queue, sizeof(struct queue_item),
486 CONFIG_I2S_ESP32_RX_BLOCK_COUNT);
487 if (err < 0) {
488 return err;
489 }
490 }
491
492 i2s_ll_enable_clock(dev_cfg->hal_cxt.dev);
493
494 LOG_INF("%s initialized", dev->name);
495
496 return 0;
497 }
498
i2s_esp32_configure(const struct device * dev,enum i2s_dir dir,const struct i2s_config * i2s_cfg)499 static int i2s_esp32_configure(const struct device *dev, enum i2s_dir dir,
500 const struct i2s_config *i2s_cfg)
501 {
502 const struct i2s_esp32_cfg *const dev_cfg = dev->config;
503 struct i2s_esp32_data *const dev_data = dev->data;
504 struct i2s_esp32_stream *stream;
505 uint8_t data_format;
506
507 switch (dir) {
508 case I2S_DIR_RX:
509 stream = &dev_data->rx;
510 if (stream->dma_dev == NULL) {
511 LOG_ERR("RX DMA controller not available");
512 return -EINVAL;
513 }
514 break;
515 case I2S_DIR_TX:
516 stream = &dev_data->tx;
517 if (stream->dma_dev == NULL) {
518 LOG_ERR("TX DMA controller not available");
519 return -EINVAL;
520 }
521 break;
522 case I2S_DIR_BOTH:
523 LOG_ERR("I2S_DIR_BOTH is not supported");
524 return -ENOSYS;
525 default:
526 LOG_ERR("Invalid direction");
527 return -EINVAL;
528 }
529
530 if (stream->state != I2S_STATE_NOT_READY && stream->state != I2S_STATE_READY) {
531 LOG_ERR("Invalid state: %d", (int)stream->state);
532 return -EINVAL;
533 }
534
535 if (i2s_cfg->frame_clk_freq == 0U) {
536 stream->queue_drop(stream);
537 memset(&stream->i2s_cfg, 0, sizeof(struct i2s_config));
538 stream->is_slave = false;
539 stream->state = I2S_STATE_NOT_READY;
540 return 0;
541 }
542
543 data_format = i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK;
544
545 if (data_format != I2S_FMT_DATA_FORMAT_I2S &&
546 data_format != I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED &&
547 data_format != I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED) {
548 LOG_ERR("Invalid data format: %u", (unsigned int)data_format);
549 return -EINVAL;
550 }
551
552 if (i2s_cfg->word_size != 8 && i2s_cfg->word_size != 16 && i2s_cfg->word_size != 24 &&
553 i2s_cfg->word_size != 32) {
554 LOG_ERR("Word size not supported: %d", (int)i2s_cfg->word_size);
555 return -EINVAL;
556 }
557
558 if (i2s_cfg->channels != 2) {
559 LOG_ERR("Currently only 2 channels are supported");
560 return -EINVAL;
561 }
562
563 if (i2s_cfg->options & I2S_OPT_LOOPBACK) {
564 LOG_ERR("For internal loopback: I2S#_O_SD_GPIO = I2S#_I_SD_GPIO");
565 return -EINVAL;
566 }
567
568 if (i2s_cfg->options & I2S_OPT_PINGPONG) {
569 LOG_ERR("Unsupported option: I2S_OPT_PINGPONG");
570 return -EINVAL;
571 }
572
573 if ((i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE) != 0 &&
574 (i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) != 0) {
575 stream->is_slave = true;
576 } else if ((i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE) == 0 &&
577 (i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) == 0) {
578 stream->is_slave = false;
579 } else {
580 LOG_ERR("I2S_OPT_FRAME_CLK and I2S_OPT_BIT_CLK options must both be"
581 " MASTER or SLAVE");
582 return -EINVAL;
583 }
584
585 i2s_hal_slot_config_t slot_cfg = {0};
586
587 slot_cfg.data_bit_width = i2s_cfg->word_size;
588 slot_cfg.slot_mode = I2S_SLOT_MODE_STEREO;
589 slot_cfg.slot_bit_width = i2s_cfg->word_size > 16 ? 32 : 16;
590 if (data_format == I2S_FMT_DATA_FORMAT_I2S) {
591 slot_cfg.std.ws_pol = i2s_cfg->format & I2S_FMT_FRAME_CLK_INV ? true : false;
592 slot_cfg.std.bit_shift = true;
593 slot_cfg.std.left_align = true;
594 } else {
595 slot_cfg.std.ws_pol = i2s_cfg->format & I2S_FMT_FRAME_CLK_INV ? false : true;
596 slot_cfg.std.bit_shift = false;
597 if (data_format == I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED) {
598 slot_cfg.std.left_align = true;
599 } else if (data_format == I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED) {
600 slot_cfg.std.left_align = false;
601 } else {
602 LOG_ERR("Invalid data format: %u", (unsigned int)data_format);
603 }
604 }
605 slot_cfg.std.ws_width = slot_cfg.slot_bit_width;
606 slot_cfg.std.slot_mask = I2S_STD_SLOT_BOTH;
607 slot_cfg.std.big_endian = false;
608 slot_cfg.std.bit_order_lsb = i2s_cfg->format & I2S_FMT_DATA_ORDER_LSB ? true : false;
609
610 int err;
611 i2s_hal_clock_info_t i2s_hal_clock_info;
612 i2s_hal_context_t *hal_cxt = (i2s_hal_context_t *)&dev_cfg->hal_cxt;
613
614 err = i2s_esp32_calculate_clock(i2s_cfg, slot_cfg.slot_bit_width, &i2s_hal_clock_info);
615 if (err != ESP_OK) {
616 return -EINVAL;
617 }
618
619 if (dir == I2S_DIR_TX) {
620 if (dev_data->rx.state != I2S_STATE_NOT_READY) {
621 if (stream->is_slave && !dev_data->rx.is_slave) { /*full duplex*/
622 i2s_ll_share_bck_ws(hal_cxt->dev, true);
623 } else {
624 i2s_ll_share_bck_ws(hal_cxt->dev, false);
625 }
626 } else {
627 i2s_ll_share_bck_ws(hal_cxt->dev, false);
628 }
629
630 i2s_hal_std_set_tx_slot(hal_cxt, stream->is_slave, &slot_cfg);
631
632 i2s_hal_set_tx_clock(hal_cxt, &i2s_hal_clock_info, I2S_ESP32_CLK_SRC);
633
634 err = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT);
635 if (err < 0) {
636 LOG_ERR("Pins setup failed: %d", err);
637 return -EIO;
638 }
639
640 if (dev_data->tx.state != I2S_STATE_NOT_READY) {
641 if (stream->is_slave && dev_data->rx.is_slave) {
642 i2s_ll_mclk_bind_to_tx_clk(hal_cxt->dev);
643 }
644 }
645
646 i2s_hal_std_enable_tx_channel(hal_cxt);
647 } else if (dir == I2S_DIR_RX) {
648 if (dev_data->tx.state != I2S_STATE_NOT_READY) {
649 if (stream->is_slave && !dev_data->tx.is_slave) { /*full duplex*/
650 i2s_ll_share_bck_ws(hal_cxt->dev, true);
651 } else {
652 i2s_ll_share_bck_ws(hal_cxt->dev, false);
653 }
654 } else {
655 i2s_ll_share_bck_ws(hal_cxt->dev, false);
656 }
657
658 i2s_hal_std_set_rx_slot(hal_cxt, stream->is_slave, &slot_cfg);
659
660 i2s_hal_set_rx_clock(hal_cxt, &i2s_hal_clock_info, I2S_ESP32_CLK_SRC);
661
662 err = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT);
663 if (err < 0) {
664 LOG_ERR("Pins setup failed: %d", err);
665 return -EIO;
666 }
667
668 if (dev_data->tx.state != I2S_STATE_NOT_READY) {
669 if (!stream->is_slave && !dev_data->tx.is_slave) {
670 i2s_ll_mclk_bind_to_rx_clk(hal_cxt->dev);
671 }
672 }
673
674 i2s_hal_std_enable_rx_channel(hal_cxt);
675 }
676 memcpy(&stream->i2s_cfg, i2s_cfg, sizeof(struct i2s_config));
677
678 stream->state = I2S_STATE_READY;
679
680 return 0;
681 }
682
i2s_esp32_config_get(const struct device * dev,enum i2s_dir dir)683 static const struct i2s_config *i2s_esp32_config_get(const struct device *dev, enum i2s_dir dir)
684 {
685 struct i2s_esp32_data *dev_data = dev->data;
686 struct i2s_esp32_stream *stream;
687
688 if (dir == I2S_DIR_RX) {
689 stream = &dev_data->rx;
690 } else {
691 stream = &dev_data->tx;
692 }
693
694 if (stream->state == I2S_STATE_NOT_READY) {
695 return NULL;
696 }
697
698 return &stream->i2s_cfg;
699 }
700
i2s_esp32_trigger(const struct device * dev,enum i2s_dir dir,enum i2s_trigger_cmd cmd)701 static int i2s_esp32_trigger(const struct device *dev, enum i2s_dir dir, enum i2s_trigger_cmd cmd)
702 {
703 struct i2s_esp32_data *const dev_data = dev->data;
704 struct i2s_esp32_stream *stream;
705 struct dma_status dma_channel_status;
706 unsigned int key;
707 int err;
708
709 switch (dir) {
710 case I2S_DIR_RX:
711 stream = &dev_data->rx;
712 break;
713 case I2S_DIR_TX:
714 stream = &dev_data->tx;
715 break;
716 case I2S_DIR_BOTH:
717 LOG_ERR("Unsupported direction: %d", (int)dir);
718 return -ENOSYS;
719 default:
720 LOG_ERR("Invalid direction: %d", (int)dir);
721 return -EINVAL;
722 }
723
724 switch (cmd) {
725 case I2S_TRIGGER_START:
726 if (stream->state != I2S_STATE_READY) {
727 LOG_ERR("START - Invalid state: %d", (int)stream->state);
728 return -EIO;
729 }
730
731 err = stream->start_transfer(dev);
732 if (err < 0) {
733 LOG_ERR("START - Transfer start failed: %d", err);
734 return -EIO;
735 }
736 stream->last_block = false;
737 stream->state = I2S_STATE_RUNNING;
738 break;
739
740 case I2S_TRIGGER_STOP:
741 key = irq_lock();
742 if (stream->state != I2S_STATE_RUNNING) {
743 irq_unlock(key);
744 LOG_ERR("STOP - Invalid state: %d", (int)stream->state);
745 return -EIO;
746 }
747
748 err = dma_get_status(stream->dma_dev, stream->dma_channel, &dma_channel_status);
749 if (err < 0) {
750 irq_unlock(key);
751 LOG_ERR("Unable to get DMA channel[%d] status: %d",
752 (int)stream->dma_channel, err);
753 return -EIO;
754 }
755
756 if (dma_channel_status.busy) {
757 stream->stop_without_draining = true;
758 stream->state = I2S_STATE_STOPPING;
759 } else {
760 stream->stop_transfer(dev);
761 stream->last_block = true;
762 stream->state = I2S_STATE_READY;
763 }
764
765 irq_unlock(key);
766 break;
767
768 case I2S_TRIGGER_DRAIN:
769 key = irq_lock();
770 if (stream->state != I2S_STATE_RUNNING) {
771 irq_unlock(key);
772 LOG_ERR("DRAIN - Invalid state: %d", (int)stream->state);
773 return -EIO;
774 }
775
776 err = dma_get_status(stream->dma_dev, stream->dma_channel, &dma_channel_status);
777 if (err < 0) {
778 irq_unlock(key);
779 LOG_ERR("Unable to get DMA channel[%d] status: %d",
780 (int)stream->dma_channel, err);
781 return -EIO;
782 }
783
784 if (dir == I2S_DIR_TX) {
785 if (k_msgq_num_used_get(&stream->queue) > 0 || dma_channel_status.busy) {
786 stream->stop_without_draining = false;
787 stream->state = I2S_STATE_STOPPING;
788 } else {
789 stream->stop_transfer(dev);
790 stream->state = I2S_STATE_READY;
791 }
792 } else if (dir == I2S_DIR_RX) {
793 if (dma_channel_status.busy) {
794 stream->stop_without_draining = true;
795 stream->state = I2S_STATE_STOPPING;
796 } else {
797 stream->stop_transfer(dev);
798 stream->last_block = true;
799 stream->state = I2S_STATE_READY;
800 }
801 } else {
802 irq_unlock(key);
803 LOG_ERR("Invalid direction: %d", (int)dir);
804 return -EINVAL;
805 }
806
807 irq_unlock(key);
808 break;
809
810 case I2S_TRIGGER_DROP:
811 if (stream->state == I2S_STATE_NOT_READY) {
812 LOG_ERR("DROP - invalid state: %d", (int)stream->state);
813 return -EIO;
814 }
815 stream->stop_transfer(dev);
816 stream->queue_drop(stream);
817 stream->state = I2S_STATE_READY;
818 break;
819
820 case I2S_TRIGGER_PREPARE:
821 if (stream->state != I2S_STATE_ERROR) {
822 LOG_ERR("PREPARE - invalid state: %d", (int)stream->state);
823 return -EIO;
824 }
825 stream->queue_drop(stream);
826 stream->state = I2S_STATE_READY;
827 break;
828
829 default:
830 LOG_ERR("Unsupported trigger command: %d", (int)cmd);
831 return -EINVAL;
832 }
833
834 return 0;
835 }
836
i2s_esp32_read(const struct device * dev,void ** mem_block,size_t * size)837 static int i2s_esp32_read(const struct device *dev, void **mem_block, size_t *size)
838 {
839 struct i2s_esp32_data *const dev_data = dev->data;
840 struct queue_item item;
841 int err;
842
843 if (dev_data->rx.state == I2S_STATE_NOT_READY) {
844 LOG_ERR("RX invalid state: %d", (int)dev_data->rx.state);
845 return -EIO;
846 } else if (dev_data->rx.state == I2S_STATE_ERROR &&
847 k_msgq_num_used_get(&dev_data->rx.queue) == 0) {
848 LOG_ERR("RX queue empty");
849 return -EIO;
850 }
851
852 err = k_msgq_get(&dev_data->rx.queue, &item, K_MSEC(dev_data->rx.i2s_cfg.timeout));
853 if (err < 0) {
854 LOG_ERR("RX queue empty");
855 return err;
856 }
857
858 *mem_block = item.buffer;
859 *size = item.size;
860
861 return 0;
862 }
863
i2s_esp32_write(const struct device * dev,void * mem_block,size_t size)864 static int i2s_esp32_write(const struct device *dev, void *mem_block, size_t size)
865 {
866 struct i2s_esp32_data *const dev_data = dev->data;
867 int err;
868
869 if (dev_data->tx.state != I2S_STATE_RUNNING && dev_data->tx.state != I2S_STATE_READY) {
870 LOG_ERR("TX Invalid state: %d", (int)dev_data->tx.state);
871 return -EIO;
872 }
873
874 if (size > dev_data->tx.i2s_cfg.block_size) {
875 LOG_ERR("Max write size is: %u", (unsigned int)dev_data->tx.i2s_cfg.block_size);
876 return -EINVAL;
877 }
878
879 struct queue_item item = {.buffer = mem_block, .size = size};
880
881 err = k_msgq_put(&dev_data->tx.queue, &item, K_MSEC(dev_data->tx.i2s_cfg.timeout));
882 if (err < 0) {
883 LOG_ERR("TX queue full");
884 return err;
885 }
886
887 return 0;
888 }
889
890 static DEVICE_API(i2s, i2s_esp32_driver_api) = {
891 .configure = i2s_esp32_configure,
892 .config_get = i2s_esp32_config_get,
893 .trigger = i2s_esp32_trigger,
894 .read = i2s_esp32_read,
895 .write = i2s_esp32_write,
896 };
897
898 #define I2S_ESP32_DMA_CHANNEL_INIT(index, dir) \
899 .dir = {.state = I2S_STATE_NOT_READY, \
900 .is_slave = false, \
901 .dma_dev = UTIL_AND(DT_INST_DMAS_HAS_NAME(index, dir), \
902 DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir))), \
903 .dma_channel = UTIL_AND(DT_INST_DMAS_HAS_NAME(index, dir), \
904 DT_INST_DMAS_CELL_BY_NAME(index, dir, channel)), \
905 .mem_block = NULL, \
906 .mem_block_len = 0, \
907 .start_transfer = i2s_esp32_##dir##_start_transfer, \
908 .stop_transfer = i2s_esp32_##dir##_stop_transfer, \
909 .queue_drop = i2s_esp32_queue_drop, \
910 .last_block = false, \
911 .stop_without_draining = false}
912
913 #define I2S_ESP32_INIT(index) \
914 PINCTRL_DT_INST_DEFINE(index); \
915 \
916 static const struct i2s_esp32_cfg i2s_esp32_config_##index = { \
917 .unit = DT_PROP(DT_DRV_INST(index), unit), \
918 .hal_cxt = {.dev = (i2s_dev_t *)DT_INST_REG_ADDR(index)}, \
919 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \
920 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(index)), \
921 .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(index, offset)}; \
922 \
923 static struct i2s_esp32_data i2s_esp32_data_##index = { \
924 I2S_ESP32_DMA_CHANNEL_INIT(index, rx), I2S_ESP32_DMA_CHANNEL_INIT(index, tx)}; \
925 \
926 DEVICE_DT_INST_DEFINE(index, &i2s_esp32_initialize, NULL, &i2s_esp32_data_##index, \
927 &i2s_esp32_config_##index, POST_KERNEL, CONFIG_I2S_INIT_PRIORITY, \
928 &i2s_esp32_driver_api);
929
930 DT_INST_FOREACH_STATUS_OKAY(I2S_ESP32_INIT)
931