1 /*
2 * Copyright (c) 2022 Espressif Systems (Shanghai) Co., Ltd.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT espressif_esp32_gdma
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(dma_esp32_gdma, CONFIG_DMA_LOG_LEVEL);
11
12 #include <hal/gdma_hal.h>
13 #include <hal/gdma_ll.h>
14 #include <soc/gdma_channel.h>
15 #include <hal/dma_types.h>
16
17 #include <soc.h>
18 #include <esp_memory_utils.h>
19 #include <errno.h>
20 #include <zephyr/kernel.h>
21 #include <zephyr/drivers/dma.h>
22 #include <zephyr/drivers/dma/dma_esp32.h>
23 #include <zephyr/drivers/clock_control.h>
24 #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6)
25 #include <zephyr/drivers/interrupt_controller/intc_esp32c3.h>
26 #else
27 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
28 #endif
29
30 #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6)
31 #define ISR_HANDLER isr_handler_t
32 #else
33 #define ISR_HANDLER intr_handler_t
34 #endif
35
36 #define DMA_MAX_CHANNEL SOC_GDMA_PAIRS_PER_GROUP
37
38 #define ESP_DMA_M2M_ON 0
39 #define ESP_DMA_M2M_OFF 1
40
41 struct dma_esp32_data {
42 gdma_hal_context_t hal;
43 };
44
45 enum dma_channel_dir {
46 DMA_RX,
47 DMA_TX,
48 DMA_UNCONFIGURED
49 };
50
51 struct irq_config {
52 uint8_t irq_source;
53 uint8_t irq_priority;
54 int irq_flags;
55 };
56
57 struct dma_esp32_channel {
58 uint8_t dir;
59 uint8_t channel_id;
60 int host_id;
61 int periph_id;
62 dma_callback_t cb;
63 void *user_data;
64 dma_descriptor_t desc_list[CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM];
65 };
66
67 struct dma_esp32_config {
68 struct irq_config *irq_config;
69 uint8_t irq_size;
70 void **irq_handlers;
71 uint8_t dma_channel_max;
72 uint8_t sram_alignment;
73 struct dma_esp32_channel dma_channel[DMA_MAX_CHANNEL * 2];
74 void (*config_irq)(const struct device *dev);
75 struct device *src_dev;
76 const struct device *clock_dev;
77 clock_control_subsys_t clock_subsys;
78 };
79
dma_esp32_isr_handle_rx(const struct device * dev,struct dma_esp32_channel * rx,uint32_t intr_status)80 static void IRAM_ATTR dma_esp32_isr_handle_rx(const struct device *dev,
81 struct dma_esp32_channel *rx, uint32_t intr_status)
82 {
83 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
84
85 gdma_ll_rx_clear_interrupt_status(data->hal.dev, rx->channel_id, intr_status);
86 if (rx->cb) {
87 int status;
88
89 if (intr_status == (GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE)) {
90 status = DMA_STATUS_COMPLETE;
91 } else if (intr_status == GDMA_LL_EVENT_RX_DONE) {
92 status = DMA_STATUS_BLOCK;
93 #if defined(CONFIG_SOC_SERIES_ESP32S3)
94 } else if (intr_status == GDMA_LL_EVENT_RX_WATER_MARK) {
95 status = DMA_STATUS_BLOCK;
96 #endif
97 } else {
98 status = -intr_status;
99 }
100
101 rx->cb(dev, rx->user_data, rx->channel_id * 2, status);
102 }
103 }
104
dma_esp32_isr_handle_tx(const struct device * dev,struct dma_esp32_channel * tx,uint32_t intr_status)105 static void IRAM_ATTR dma_esp32_isr_handle_tx(const struct device *dev,
106 struct dma_esp32_channel *tx, uint32_t intr_status)
107 {
108 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
109
110 gdma_ll_tx_clear_interrupt_status(data->hal.dev, tx->channel_id, intr_status);
111 if (tx->cb) {
112 intr_status &= ~(GDMA_LL_EVENT_TX_TOTAL_EOF | GDMA_LL_EVENT_TX_DONE |
113 GDMA_LL_EVENT_TX_EOF);
114
115 tx->cb(dev, tx->user_data, tx->channel_id * 2 + 1, -intr_status);
116 }
117 }
118
119 #if !defined(CONFIG_SOC_SERIES_ESP32C6) && !defined(CONFIG_SOC_SERIES_ESP32S3)
dma_esp32_isr_handle(const struct device * dev,uint8_t rx_id,uint8_t tx_id)120 static void IRAM_ATTR dma_esp32_isr_handle(const struct device *dev, uint8_t rx_id, uint8_t tx_id)
121 {
122 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
123 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
124 struct dma_esp32_channel *dma_channel_rx = &config->dma_channel[rx_id];
125 struct dma_esp32_channel *dma_channel_tx = &config->dma_channel[tx_id];
126 uint32_t intr_status = 0;
127
128 intr_status = gdma_ll_rx_get_interrupt_status(data->hal.dev, dma_channel_rx->channel_id);
129 if (intr_status) {
130 dma_esp32_isr_handle_rx(dev, dma_channel_rx, intr_status);
131 }
132
133 intr_status = gdma_ll_tx_get_interrupt_status(data->hal.dev, dma_channel_tx->channel_id);
134 if (intr_status) {
135 dma_esp32_isr_handle_tx(dev, dma_channel_tx, intr_status);
136 }
137 }
138 #endif
139
dma_esp32_config_descriptor(struct dma_esp32_channel * dma_channel,struct dma_block_config * block)140 static int dma_esp32_config_descriptor(struct dma_esp32_channel *dma_channel,
141 struct dma_block_config *block)
142 {
143 if (!block) {
144 LOG_ERR("At least one dma block is required");
145 return -EINVAL;
146 }
147
148 uint32_t target_address = 0, block_size = 0;
149 dma_descriptor_t *desc_iter = dma_channel->desc_list;
150
151 for (int i = 0; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM; ++i) {
152 if (block_size == 0) {
153 if (dma_channel->dir == DMA_TX) {
154 target_address = block->source_address;
155 } else {
156 target_address = block->dest_address;
157 }
158
159 if (!esp_ptr_dma_capable((uint32_t *)target_address)
160 #if defined(CONFIG_ESP_SPIRAM)
161 && !esp_ptr_dma_ext_capable((uint32_t *)target_address)
162 #endif
163 ) {
164 if (dma_channel->dir == DMA_TX) {
165 LOG_ERR("Tx buffer not in DMA capable memory: %p",
166 (uint32_t *)target_address);
167 } else {
168 LOG_ERR("Rx buffer not in DMA capable memory: %p",
169 (uint32_t *)target_address);
170 }
171
172 return -EINVAL;
173 }
174
175 block_size = block->block_size;
176 }
177
178 uint32_t buffer_size;
179
180 if (block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED) {
181 buffer_size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
182 } else {
183 buffer_size = block_size;
184 }
185
186 memset(desc_iter, 0, sizeof(dma_descriptor_t));
187 desc_iter->buffer = (void *)target_address;
188 desc_iter->dw0.size = buffer_size;
189 if (dma_channel->dir == DMA_TX) {
190 desc_iter->dw0.length = buffer_size;
191 }
192 desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
193
194 target_address += buffer_size;
195 block_size -= buffer_size;
196
197 if (!block_size) {
198 if (block->next_block) {
199 block = block->next_block;
200 } else {
201 desc_iter->next = NULL;
202 if (dma_channel->dir == DMA_TX) {
203 desc_iter->dw0.suc_eof = 1;
204 }
205 break;
206 }
207 }
208
209 desc_iter->next = desc_iter + 1;
210 desc_iter += 1;
211 }
212
213 if (desc_iter->next) {
214 memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list));
215 LOG_ERR("Run out of DMA descriptors. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM");
216 return -EINVAL;
217 }
218
219 return 0;
220 }
221
dma_esp32_config_rx(const struct device * dev,struct dma_esp32_channel * dma_channel,struct dma_config * config_dma)222 static int dma_esp32_config_rx(const struct device *dev, struct dma_esp32_channel *dma_channel,
223 struct dma_config *config_dma)
224 {
225 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
226 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
227
228 dma_channel->dir = DMA_RX;
229
230 gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id);
231
232 gdma_ll_rx_connect_to_periph(
233 data->hal.dev, dma_channel->channel_id,
234 dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON
235 : ESP_DMA_M2M_OFF,
236 dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON
237 : dma_channel->periph_id);
238
239 if (config_dma->dest_burst_length) {
240 /*
241 * RX channel burst mode depends on specific data alignment
242 */
243 gdma_ll_rx_enable_data_burst(data->hal.dev, dma_channel->channel_id,
244 config->sram_alignment >= 4);
245 gdma_ll_rx_enable_descriptor_burst(data->hal.dev, dma_channel->channel_id,
246 config->sram_alignment >= 4);
247 }
248
249 dma_channel->cb = config_dma->dma_callback;
250 dma_channel->user_data = config_dma->user_data;
251
252 gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_RX_EVENT_MASK,
253 false);
254
255 gdma_ll_rx_clear_interrupt_status(data->hal.dev, dma_channel->channel_id,
256 GDMA_LL_RX_EVENT_MASK);
257
258 return dma_esp32_config_descriptor(dma_channel, config_dma->head_block);
259 }
260
dma_esp32_config_tx(const struct device * dev,struct dma_esp32_channel * dma_channel,struct dma_config * config_dma)261 static int dma_esp32_config_tx(const struct device *dev, struct dma_esp32_channel *dma_channel,
262 struct dma_config *config_dma)
263 {
264 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
265
266 dma_channel->dir = DMA_TX;
267
268 gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id);
269
270 gdma_ll_tx_connect_to_periph(
271 data->hal.dev, dma_channel->channel_id,
272 dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON
273 : ESP_DMA_M2M_OFF,
274 dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON
275 : dma_channel->periph_id);
276
277 /*
278 * TX channel can always enable burst mode, no matter data alignment
279 */
280 if (config_dma->source_burst_length) {
281 gdma_ll_tx_enable_data_burst(data->hal.dev, dma_channel->channel_id, true);
282 gdma_ll_tx_enable_descriptor_burst(data->hal.dev, dma_channel->channel_id, true);
283 }
284
285 dma_channel->cb = config_dma->dma_callback;
286 dma_channel->user_data = config_dma->user_data;
287
288 gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_TX_EVENT_MASK,
289 false);
290
291 gdma_ll_tx_clear_interrupt_status(data->hal.dev, dma_channel->channel_id,
292 GDMA_LL_TX_EVENT_MASK);
293
294 return dma_esp32_config_descriptor(dma_channel, config_dma->head_block);
295 }
296
dma_esp32_config(const struct device * dev,uint32_t channel,struct dma_config * config_dma)297 static int dma_esp32_config(const struct device *dev, uint32_t channel,
298 struct dma_config *config_dma)
299 {
300 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
301 struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
302 int ret = 0;
303
304 if (channel >= config->dma_channel_max) {
305 LOG_ERR("Unsupported channel");
306 return -EINVAL;
307 }
308
309 if (!config_dma) {
310 return -EINVAL;
311 }
312
313 if (config_dma->source_burst_length != config_dma->dest_burst_length) {
314 LOG_ERR("Source and destination burst lengths must be equal");
315 return -EINVAL;
316 }
317
318 dma_channel->periph_id = config_dma->channel_direction == MEMORY_TO_MEMORY
319 ? SOC_GDMA_TRIG_PERIPH_M2M0
320 : config_dma->dma_slot;
321
322 dma_channel->channel_id = channel / 2;
323
324 switch (config_dma->channel_direction) {
325 case MEMORY_TO_MEMORY:
326 /*
327 * Create both Tx and Rx stream on the same channel_id
328 */
329 struct dma_esp32_channel *dma_channel_rx =
330 &config->dma_channel[dma_channel->channel_id * 2];
331 struct dma_esp32_channel *dma_channel_tx =
332 &config->dma_channel[(dma_channel->channel_id * 2) + 1];
333
334 dma_channel_rx->channel_id = dma_channel->channel_id;
335 dma_channel_tx->channel_id = dma_channel->channel_id;
336
337 dma_channel_rx->periph_id = dma_channel->periph_id;
338 dma_channel_tx->periph_id = dma_channel->periph_id;
339
340 ret = dma_esp32_config_rx(dev, dma_channel_rx, config_dma);
341 ret = dma_esp32_config_tx(dev, dma_channel_tx, config_dma);
342 break;
343 case PERIPHERAL_TO_MEMORY:
344 ret = dma_esp32_config_rx(dev, dma_channel, config_dma);
345 break;
346 case MEMORY_TO_PERIPHERAL:
347 ret = dma_esp32_config_tx(dev, dma_channel, config_dma);
348 break;
349 default:
350 LOG_ERR("Invalid Channel direction");
351 return -EINVAL;
352 }
353
354 return ret;
355 }
356
dma_esp32_start(const struct device * dev,uint32_t channel)357 static int dma_esp32_start(const struct device *dev, uint32_t channel)
358 {
359 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
360 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
361 struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
362
363 if (channel >= config->dma_channel_max) {
364 LOG_ERR("Unsupported channel");
365 return -EINVAL;
366 }
367
368 if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) {
369 struct dma_esp32_channel *dma_channel_rx =
370 &config->dma_channel[dma_channel->channel_id * 2];
371 struct dma_esp32_channel *dma_channel_tx =
372 &config->dma_channel[(dma_channel->channel_id * 2) + 1];
373
374 gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
375 GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE, true);
376 gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
377 GDMA_LL_EVENT_TX_EOF, true);
378
379 gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
380 (int32_t)dma_channel_rx->desc_list);
381 gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
382
383 gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
384 (int32_t)dma_channel_tx->desc_list);
385 gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
386 } else {
387 if (dma_channel->dir == DMA_RX) {
388 gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
389 GDMA_LL_EVENT_RX_SUC_EOF |
390 GDMA_LL_EVENT_RX_DONE, true);
391 gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
392 (int32_t)dma_channel->desc_list);
393 gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
394 } else if (dma_channel->dir == DMA_TX) {
395 gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
396 GDMA_LL_EVENT_TX_EOF, true);
397 gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
398 (int32_t)dma_channel->desc_list);
399 gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
400 } else {
401 LOG_ERR("Channel %d is not configured", channel);
402 return -EINVAL;
403 }
404 }
405
406 return 0;
407 }
408
dma_esp32_stop(const struct device * dev,uint32_t channel)409 static int dma_esp32_stop(const struct device *dev, uint32_t channel)
410 {
411 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
412 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
413 struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
414
415 if (channel >= config->dma_channel_max) {
416 LOG_ERR("Unsupported channel");
417 return -EINVAL;
418 }
419
420 if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) {
421 gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
422 GDMA_LL_RX_EVENT_MASK, false);
423 gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
424 GDMA_LL_TX_EVENT_MASK, false);
425 gdma_ll_rx_stop(data->hal.dev, dma_channel->channel_id);
426 gdma_ll_tx_stop(data->hal.dev, dma_channel->channel_id);
427 }
428
429 if (dma_channel->dir == DMA_RX) {
430 gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
431 GDMA_LL_RX_EVENT_MASK, false);
432 gdma_ll_rx_stop(data->hal.dev, dma_channel->channel_id);
433 } else if (dma_channel->dir == DMA_TX) {
434 gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
435 GDMA_LL_TX_EVENT_MASK, false);
436 gdma_ll_tx_stop(data->hal.dev, dma_channel->channel_id);
437 }
438
439 return 0;
440 }
441
dma_esp32_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)442 static int dma_esp32_get_status(const struct device *dev, uint32_t channel,
443 struct dma_status *status)
444 {
445 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
446 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
447 struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
448 dma_descriptor_t *desc;
449
450 if (channel >= config->dma_channel_max) {
451 LOG_ERR("Unsupported channel");
452 return -EINVAL;
453 }
454
455 if (!status) {
456 return -EINVAL;
457 }
458
459 memset(status, 0, sizeof(struct dma_status));
460
461 if (dma_channel->dir == DMA_RX) {
462 status->busy = !gdma_ll_rx_is_fsm_idle(data->hal.dev, dma_channel->channel_id);
463 status->dir = PERIPHERAL_TO_MEMORY;
464 desc = (dma_descriptor_t *)gdma_ll_rx_get_current_desc_addr(
465 data->hal.dev, dma_channel->channel_id);
466 if (desc >= dma_channel->desc_list) {
467 status->read_position = desc - dma_channel->desc_list;
468 status->total_copied = desc->dw0.length
469 + dma_channel->desc_list[0].dw0.size
470 * status->read_position;
471 }
472 } else if (dma_channel->dir == DMA_TX) {
473 status->busy = !gdma_ll_tx_is_fsm_idle(data->hal.dev, dma_channel->channel_id);
474 status->dir = MEMORY_TO_PERIPHERAL;
475 desc = (dma_descriptor_t *)gdma_ll_tx_get_current_desc_addr(
476 data->hal.dev, dma_channel->channel_id);
477 if (desc >= dma_channel->desc_list) {
478 status->write_position = desc - dma_channel->desc_list;
479 }
480 }
481
482 return 0;
483 }
484
dma_esp32_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)485 static int dma_esp32_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst,
486 size_t size)
487 {
488 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
489 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
490 struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
491 dma_descriptor_t *desc_iter = dma_channel->desc_list;
492 uint32_t buf;
493
494 if (channel >= config->dma_channel_max) {
495 LOG_ERR("Unsupported channel");
496 return -EINVAL;
497 }
498
499 if (dma_channel->dir == DMA_RX) {
500 gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id);
501 buf = dst;
502 } else if (dma_channel->dir == DMA_TX) {
503 gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id);
504 buf = src;
505 } else {
506 return -EINVAL;
507 }
508
509 for (int i = 0; i < ARRAY_SIZE(dma_channel->desc_list); ++i) {
510 memset(desc_iter, 0, sizeof(dma_descriptor_t));
511 desc_iter->buffer = (void *)(buf + DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED * i);
512 desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
513 if (size <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED) {
514 desc_iter->dw0.size = size;
515 if (dma_channel->dir == DMA_TX) {
516 desc_iter->dw0.length = size;
517 desc_iter->dw0.suc_eof = 1;
518 }
519 desc_iter->next = NULL;
520 break;
521 }
522 desc_iter->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
523 if (dma_channel->dir == DMA_TX) {
524 desc_iter->dw0.length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
525 }
526 size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
527 desc_iter->next = desc_iter + 1;
528 desc_iter += 1;
529 }
530
531 if (desc_iter->next) {
532 memset(desc_iter, 0, sizeof(dma_descriptor_t));
533 LOG_ERR("Not enough DMA descriptors. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM");
534 return -EINVAL;
535 }
536
537 return 0;
538 }
539
dma_esp32_configure_irq(const struct device * dev)540 static int dma_esp32_configure_irq(const struct device *dev)
541 {
542 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
543 struct irq_config *irq_cfg = (struct irq_config *)config->irq_config;
544
545 for (uint8_t i = 0; i < config->irq_size; i++) {
546 int ret = esp_intr_alloc(irq_cfg[i].irq_source,
547 ESP_PRIO_TO_FLAGS(irq_cfg[i].irq_priority) |
548 ESP_INT_FLAGS_CHECK(irq_cfg[i].irq_flags) | ESP_INTR_FLAG_IRAM,
549 (ISR_HANDLER)config->irq_handlers[i],
550 (void *)dev,
551 NULL);
552 if (ret != 0) {
553 LOG_ERR("Could not allocate interrupt handler");
554 return ret;
555 }
556 }
557
558 return 0;
559 }
560
dma_esp32_init(const struct device * dev)561 static int dma_esp32_init(const struct device *dev)
562 {
563 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
564 struct dma_esp32_data *data = (struct dma_esp32_data *)dev->data;
565 struct dma_esp32_channel *dma_channel;
566 int ret = 0;
567
568 if (!device_is_ready(config->clock_dev)) {
569 LOG_ERR("clock control device not ready");
570 return -ENODEV;
571 }
572
573 ret = clock_control_on(config->clock_dev, config->clock_subsys);
574 if (ret < 0) {
575 LOG_ERR("Could not initialize clock (%d)", ret);
576 return ret;
577 }
578
579 ret = dma_esp32_configure_irq(dev);
580 if (ret < 0) {
581 LOG_ERR("Could not configure IRQ (%d)", ret);
582 return ret;
583 }
584
585 for (uint8_t i = 0; i < DMA_MAX_CHANNEL * 2; i++) {
586 dma_channel = &config->dma_channel[i];
587 dma_channel->cb = NULL;
588 dma_channel->dir = DMA_UNCONFIGURED;
589 dma_channel->periph_id = ESP_GDMA_TRIG_PERIPH_INVALID;
590 memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list));
591 }
592
593 gdma_hal_init(&data->hal, 0);
594 gdma_ll_enable_clock(data->hal.dev, true);
595
596 return 0;
597 }
598
599 static DEVICE_API(dma, dma_esp32_api) = {
600 .config = dma_esp32_config,
601 .start = dma_esp32_start,
602 .stop = dma_esp32_stop,
603 .get_status = dma_esp32_get_status,
604 .reload = dma_esp32_reload,
605 };
606
607 #if defined(CONFIG_SOC_SERIES_ESP32C6) || defined(CONFIG_SOC_SERIES_ESP32S3)
608
609 #define DMA_ESP32_DEFINE_IRQ_HANDLER(channel) \
610 __attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel##_rx( \
611 const struct device *dev) \
612 { \
613 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; \
614 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; \
615 uint32_t intr_status = gdma_ll_rx_get_interrupt_status(data->hal.dev, channel); \
616 if (intr_status) { \
617 dma_esp32_isr_handle_rx(dev, &config->dma_channel[channel * 2], \
618 intr_status); \
619 } \
620 } \
621 \
622 __attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel##_tx( \
623 const struct device *dev) \
624 { \
625 struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config; \
626 struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data; \
627 uint32_t intr_status = gdma_ll_tx_get_interrupt_status(data->hal.dev, channel); \
628 if (intr_status) { \
629 dma_esp32_isr_handle_tx(dev, &config->dma_channel[channel * 2 + 1], \
630 intr_status); \
631 } \
632 }
633
634 #else
635
636 #define DMA_ESP32_DEFINE_IRQ_HANDLER(channel) \
637 __attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel( \
638 const struct device *dev) \
639 { \
640 dma_esp32_isr_handle(dev, channel * 2, channel * 2 + 1); \
641 }
642
643 #endif
644
645 #if defined(CONFIG_SOC_SERIES_ESP32C6) || defined(CONFIG_SOC_SERIES_ESP32S3)
646 #define ESP32_DMA_HANDLER(channel) dma_esp32_isr_##channel##_rx, dma_esp32_isr_##channel##_tx
647 #else
648 #define ESP32_DMA_HANDLER(channel) dma_esp32_isr_##channel
649 #endif
650
651 DMA_ESP32_DEFINE_IRQ_HANDLER(0)
652 DMA_ESP32_DEFINE_IRQ_HANDLER(1)
653 DMA_ESP32_DEFINE_IRQ_HANDLER(2)
654 #if DMA_MAX_CHANNEL >= 5
655 DMA_ESP32_DEFINE_IRQ_HANDLER(3)
656 DMA_ESP32_DEFINE_IRQ_HANDLER(4)
657 #endif
658
659 static void *irq_handlers[] = {
660 ESP32_DMA_HANDLER(0),
661 ESP32_DMA_HANDLER(1),
662 ESP32_DMA_HANDLER(2),
663 #if DMA_MAX_CHANNEL >= 5
664 ESP32_DMA_HANDLER(3),
665 ESP32_DMA_HANDLER(4),
666 #endif
667 };
668
669 #define IRQ_NUM(idx) DT_NUM_IRQS(DT_DRV_INST(idx))
670 #define IRQ_ENTRY(n, idx) { \
671 DT_INST_IRQ_BY_IDX(idx, n, irq), \
672 DT_INST_IRQ_BY_IDX(idx, n, priority), \
673 DT_INST_IRQ_BY_IDX(idx, n, flags) },
674
675 #define DMA_ESP32_INIT(idx) \
676 static struct irq_config irq_config_##idx[] = { \
677 LISTIFY(IRQ_NUM(idx), IRQ_ENTRY, (), idx) \
678 }; \
679 static struct dma_esp32_config dma_config_##idx = { \
680 .irq_config = irq_config_##idx, \
681 .irq_size = IRQ_NUM(idx), \
682 .irq_handlers = irq_handlers, \
683 .dma_channel_max = DT_INST_PROP(idx, dma_channels), \
684 .sram_alignment = DT_INST_PROP(idx, dma_buf_addr_alignment), \
685 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \
686 .clock_subsys = (void *)DT_INST_CLOCKS_CELL(idx, offset), \
687 }; \
688 static struct dma_esp32_data dma_data_##idx = { \
689 .hal = \
690 { \
691 .dev = (gdma_dev_t *)DT_INST_REG_ADDR(idx), \
692 }, \
693 }; \
694 \
695 DEVICE_DT_INST_DEFINE(idx, &dma_esp32_init, NULL, &dma_data_##idx, &dma_config_##idx, \
696 PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &dma_esp32_api);
697
698 DT_INST_FOREACH_STATUS_OKAY(DMA_ESP32_INIT)
699