1 /*
2  * Copyright (c) 2022 Espressif Systems (Shanghai) Co., Ltd.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT espressif_esp32_gdma
8 
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(dma_esp32_gdma, CONFIG_DMA_LOG_LEVEL);
11 
12 #include <hal/gdma_hal.h>
13 #include <hal/gdma_ll.h>
14 #include <soc/gdma_channel.h>
15 #include <hal/dma_types.h>
16 
17 #include <soc.h>
18 #include <esp_memory_utils.h>
19 #include <errno.h>
20 #include <zephyr/kernel.h>
21 #include <zephyr/drivers/dma.h>
22 #include <zephyr/drivers/dma/dma_esp32.h>
23 #include <zephyr/drivers/clock_control.h>
24 #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6)
25 #include <zephyr/drivers/interrupt_controller/intc_esp32c3.h>
26 #else
27 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
28 #endif
29 
30 #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6)
31 #define ISR_HANDLER isr_handler_t
32 #else
33 #define ISR_HANDLER intr_handler_t
34 #endif
35 
36 #define DMA_MAX_CHANNEL SOC_GDMA_PAIRS_PER_GROUP
37 
38 #define ESP_DMA_M2M_ON  0
39 #define ESP_DMA_M2M_OFF 1
40 
41 struct dma_esp32_data {
42 	gdma_hal_context_t hal;
43 };
44 
45 enum dma_channel_dir {
46 	DMA_RX,
47 	DMA_TX,
48 	DMA_UNCONFIGURED
49 };
50 
51 struct irq_config {
52 	uint8_t irq_source;
53 	uint8_t irq_priority;
54 	int irq_flags;
55 };
56 
57 struct dma_esp32_channel {
58 	uint8_t dir;
59 	uint8_t channel_id;
60 	int host_id;
61 	int periph_id;
62 	dma_callback_t cb;
63 	void *user_data;
64 	dma_descriptor_t desc_list[CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM];
65 };
66 
67 struct dma_esp32_config {
68 	struct irq_config *irq_config;
69 	uint8_t irq_size;
70 	void **irq_handlers;
71 	uint8_t dma_channel_max;
72 	uint8_t sram_alignment;
73 	struct dma_esp32_channel dma_channel[DMA_MAX_CHANNEL * 2];
74 	void (*config_irq)(const struct device *dev);
75 	struct device *src_dev;
76 	const struct device *clock_dev;
77 	clock_control_subsys_t clock_subsys;
78 };
79 
dma_esp32_isr_handle_rx(const struct device * dev,struct dma_esp32_channel * rx,uint32_t intr_status)80 static void IRAM_ATTR dma_esp32_isr_handle_rx(const struct device *dev,
81 					      struct dma_esp32_channel *rx, uint32_t intr_status)
82 {
83 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
84 	uint32_t status;
85 
86 	gdma_ll_rx_clear_interrupt_status(data->hal.dev, rx->channel_id, intr_status);
87 
88 	if (intr_status == (GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE)) {
89 		status = DMA_STATUS_COMPLETE;
90 	} else if (intr_status == GDMA_LL_EVENT_RX_DONE) {
91 		status = DMA_STATUS_BLOCK;
92 #if defined(CONFIG_SOC_SERIES_ESP32S3)
93 	} else if (intr_status == GDMA_LL_EVENT_RX_WATER_MARK) {
94 		status = DMA_STATUS_BLOCK;
95 #endif
96 	} else {
97 		status = -intr_status;
98 	}
99 
100 	if (rx->cb) {
101 		rx->cb(dev, rx->user_data, rx->channel_id * 2, status);
102 	}
103 }
104 
dma_esp32_isr_handle_tx(const struct device * dev,struct dma_esp32_channel * tx,uint32_t intr_status)105 static void IRAM_ATTR dma_esp32_isr_handle_tx(const struct device *dev,
106 					      struct dma_esp32_channel *tx, uint32_t intr_status)
107 {
108 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
109 
110 	gdma_ll_tx_clear_interrupt_status(data->hal.dev, tx->channel_id, intr_status);
111 
112 	intr_status &= ~(GDMA_LL_EVENT_TX_TOTAL_EOF | GDMA_LL_EVENT_TX_DONE | GDMA_LL_EVENT_TX_EOF);
113 
114 	if (tx->cb) {
115 		tx->cb(dev, tx->user_data, tx->channel_id * 2 + 1, -intr_status);
116 	}
117 }
118 
119 #if !defined(CONFIG_SOC_SERIES_ESP32C6) && !defined(CONFIG_SOC_SERIES_ESP32S3)
dma_esp32_isr_handle(const struct device * dev,uint8_t rx_id,uint8_t tx_id)120 static void IRAM_ATTR dma_esp32_isr_handle(const struct device *dev, uint8_t rx_id, uint8_t tx_id)
121 {
122 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
123 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
124 	struct dma_esp32_channel *dma_channel_rx = &config->dma_channel[rx_id];
125 	struct dma_esp32_channel *dma_channel_tx = &config->dma_channel[tx_id];
126 	uint32_t intr_status = 0;
127 
128 	intr_status = gdma_ll_rx_get_interrupt_status(data->hal.dev, dma_channel_rx->channel_id);
129 	if (intr_status) {
130 		dma_esp32_isr_handle_rx(dev, dma_channel_rx, intr_status);
131 	}
132 
133 	intr_status = gdma_ll_tx_get_interrupt_status(data->hal.dev, dma_channel_tx->channel_id);
134 	if (intr_status) {
135 		dma_esp32_isr_handle_tx(dev, dma_channel_tx, intr_status);
136 	}
137 }
138 #endif
139 
dma_esp32_config_rx_descriptor(struct dma_esp32_channel * dma_channel,struct dma_block_config * block)140 static int dma_esp32_config_rx_descriptor(struct dma_esp32_channel *dma_channel,
141 						struct dma_block_config *block)
142 {
143 	if (!block) {
144 		LOG_ERR("At least one dma block is required");
145 		return -EINVAL;
146 	}
147 
148 	if (!esp_ptr_dma_capable((uint32_t *)block->dest_address)
149 #if defined(CONFIG_ESP_SPIRAM)
150 	&& !esp_ptr_dma_ext_capable((uint32_t *)block->dest_address)
151 #endif
152 	) {
153 		LOG_ERR("Rx buffer not in DMA capable memory: %p", (uint32_t *)block->dest_address);
154 		return -EINVAL;
155 	}
156 
157 	dma_descriptor_t *desc_iter = dma_channel->desc_list;
158 
159 	for (int i = 0; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM; ++i) {
160 		if (block->block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
161 			LOG_ERR("Size of block %d is too large", i);
162 			return -EINVAL;
163 		}
164 		memset(desc_iter, 0, sizeof(dma_descriptor_t));
165 		desc_iter->buffer = (void *)block->dest_address;
166 		desc_iter->dw0.size = block->block_size;
167 		desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
168 		if (!block->next_block) {
169 			desc_iter->next = NULL;
170 			break;
171 		}
172 		desc_iter->next = desc_iter + 1;
173 		desc_iter += 1;
174 		block = block->next_block;
175 	}
176 
177 	if (desc_iter->next) {
178 		memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list));
179 		LOG_ERR("Too many dma blocks. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM");
180 		return -EINVAL;
181 	}
182 
183 	return 0;
184 }
185 
dma_esp32_config_rx(const struct device * dev,struct dma_esp32_channel * dma_channel,struct dma_config * config_dma)186 static int dma_esp32_config_rx(const struct device *dev, struct dma_esp32_channel *dma_channel,
187 				struct dma_config *config_dma)
188 {
189 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
190 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
191 
192 	dma_channel->dir = DMA_RX;
193 
194 	gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id);
195 
196 	gdma_ll_rx_connect_to_periph(
197 		data->hal.dev, dma_channel->channel_id,
198 		dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON
199 								    : ESP_DMA_M2M_OFF,
200 		dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON
201 								    : dma_channel->periph_id);
202 
203 	if (config_dma->dest_burst_length) {
204 		/*
205 		 * RX channel burst mode depends on specific data alignment
206 		 */
207 		gdma_ll_rx_enable_data_burst(data->hal.dev, dma_channel->channel_id,
208 					     config->sram_alignment >= 4);
209 		gdma_ll_rx_enable_descriptor_burst(data->hal.dev, dma_channel->channel_id,
210 						   config->sram_alignment >= 4);
211 	}
212 
213 	dma_channel->cb = config_dma->dma_callback;
214 	dma_channel->user_data = config_dma->user_data;
215 
216 	gdma_ll_rx_clear_interrupt_status(data->hal.dev, dma_channel->channel_id, UINT32_MAX);
217 	gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id, UINT32_MAX,
218 				    config_dma->dma_callback != NULL);
219 
220 	return dma_esp32_config_rx_descriptor(dma_channel, config_dma->head_block);
221 }
222 
dma_esp32_config_tx_descriptor(struct dma_esp32_channel * dma_channel,struct dma_block_config * block)223 static int dma_esp32_config_tx_descriptor(struct dma_esp32_channel *dma_channel,
224 						struct dma_block_config *block)
225 {
226 	if (!block) {
227 		LOG_ERR("At least one dma block is required");
228 		return -EINVAL;
229 	}
230 
231 	if (!esp_ptr_dma_capable((uint32_t *)block->source_address)
232 #if defined(CONFIG_ESP_SPIRAM)
233 	&& !esp_ptr_dma_ext_capable((uint32_t *)block->source_address)
234 #endif
235 	) {
236 		LOG_ERR("Tx buffer not in DMA capable memory: %p",
237 			(uint32_t *)block->source_address);
238 		return -EINVAL;
239 	}
240 
241 	dma_descriptor_t *desc_iter = dma_channel->desc_list;
242 
243 	for (int i = 0; i < CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM; ++i) {
244 		if (block->block_size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
245 			LOG_ERR("Size of block %d is too large", i);
246 			return -EINVAL;
247 		}
248 		memset(desc_iter, 0, sizeof(dma_descriptor_t));
249 		desc_iter->buffer = (void *)block->source_address;
250 		desc_iter->dw0.size = block->block_size;
251 		desc_iter->dw0.length = block->block_size;
252 		desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
253 		if (!block->next_block) {
254 			desc_iter->next = NULL;
255 			desc_iter->dw0.suc_eof = 1;
256 			break;
257 		}
258 		desc_iter->next = desc_iter + 1;
259 		desc_iter += 1;
260 		block = block->next_block;
261 	}
262 
263 	if (desc_iter->next) {
264 		memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list));
265 		LOG_ERR("Too many dma blocks. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM");
266 		return -EINVAL;
267 	}
268 
269 	return 0;
270 }
271 
dma_esp32_config_tx(const struct device * dev,struct dma_esp32_channel * dma_channel,struct dma_config * config_dma)272 static int dma_esp32_config_tx(const struct device *dev, struct dma_esp32_channel *dma_channel,
273 				struct dma_config *config_dma)
274 {
275 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
276 
277 	dma_channel->dir = DMA_TX;
278 
279 	gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id);
280 
281 	gdma_ll_tx_connect_to_periph(
282 		data->hal.dev, dma_channel->channel_id,
283 		dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON
284 								    : ESP_DMA_M2M_OFF,
285 		dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0 ? ESP_DMA_M2M_ON
286 								    : dma_channel->periph_id);
287 
288 	/*
289 	 * TX channel can always enable burst mode, no matter data alignment
290 	 */
291 	if (config_dma->source_burst_length) {
292 		gdma_ll_tx_enable_data_burst(data->hal.dev, dma_channel->channel_id, true);
293 		gdma_ll_tx_enable_descriptor_burst(data->hal.dev, dma_channel->channel_id, true);
294 	}
295 
296 	dma_channel->cb = config_dma->dma_callback;
297 	dma_channel->user_data = config_dma->user_data;
298 
299 	gdma_ll_tx_clear_interrupt_status(data->hal.dev, dma_channel->channel_id, UINT32_MAX);
300 
301 	gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_EVENT_TX_EOF,
302 				    config_dma->dma_callback != NULL);
303 
304 	return dma_esp32_config_tx_descriptor(dma_channel, config_dma->head_block);
305 }
306 
dma_esp32_config(const struct device * dev,uint32_t channel,struct dma_config * config_dma)307 static int dma_esp32_config(const struct device *dev, uint32_t channel,
308 				struct dma_config *config_dma)
309 {
310 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
311 	struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
312 	int ret = 0;
313 
314 	if (channel >= config->dma_channel_max) {
315 		LOG_ERR("Unsupported channel");
316 		return -EINVAL;
317 	}
318 
319 	if (!config_dma) {
320 		return -EINVAL;
321 	}
322 
323 	if (config_dma->source_burst_length != config_dma->dest_burst_length) {
324 		LOG_ERR("Source and destination burst lengths must be equal");
325 		return -EINVAL;
326 	}
327 
328 	dma_channel->periph_id = config_dma->channel_direction == MEMORY_TO_MEMORY
329 					 ? SOC_GDMA_TRIG_PERIPH_M2M0
330 					 : config_dma->dma_slot;
331 
332 	dma_channel->channel_id = channel / 2;
333 
334 	switch (config_dma->channel_direction) {
335 	case MEMORY_TO_MEMORY:
336 		/*
337 		 *	Create both Tx and Rx stream on the same channel_id
338 		 */
339 		struct dma_esp32_channel *dma_channel_rx =
340 			&config->dma_channel[dma_channel->channel_id * 2];
341 		struct dma_esp32_channel *dma_channel_tx =
342 			&config->dma_channel[(dma_channel->channel_id * 2) + 1];
343 
344 		dma_channel_rx->channel_id = dma_channel->channel_id;
345 		dma_channel_tx->channel_id = dma_channel->channel_id;
346 
347 		dma_channel_rx->periph_id = dma_channel->periph_id;
348 		dma_channel_tx->periph_id = dma_channel->periph_id;
349 
350 		ret = dma_esp32_config_rx(dev, dma_channel_rx, config_dma);
351 		ret = dma_esp32_config_tx(dev, dma_channel_tx, config_dma);
352 		break;
353 	case PERIPHERAL_TO_MEMORY:
354 		ret = dma_esp32_config_rx(dev, dma_channel, config_dma);
355 		break;
356 	case MEMORY_TO_PERIPHERAL:
357 		ret = dma_esp32_config_tx(dev, dma_channel, config_dma);
358 		break;
359 	default:
360 		LOG_ERR("Invalid Channel direction");
361 		return -EINVAL;
362 	}
363 
364 	return ret;
365 }
366 
dma_esp32_start(const struct device * dev,uint32_t channel)367 static int dma_esp32_start(const struct device *dev, uint32_t channel)
368 {
369 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
370 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
371 	struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
372 
373 	if (channel >= config->dma_channel_max) {
374 		LOG_ERR("Unsupported channel");
375 		return -EINVAL;
376 	}
377 
378 	if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) {
379 		struct dma_esp32_channel *dma_channel_rx =
380 			&config->dma_channel[dma_channel->channel_id * 2];
381 		struct dma_esp32_channel *dma_channel_tx =
382 			&config->dma_channel[(dma_channel->channel_id * 2) + 1];
383 
384 		gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
385 					UINT32_MAX, true);
386 		gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
387 					GDMA_LL_EVENT_TX_EOF, true);
388 
389 		gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
390 					 (int32_t)dma_channel_rx->desc_list);
391 		gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
392 
393 		gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
394 					 (int32_t)dma_channel_tx->desc_list);
395 		gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
396 	} else {
397 		if (dma_channel->dir == DMA_RX) {
398 			gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
399 					UINT32_MAX, true);
400 			gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
401 						 (int32_t)dma_channel->desc_list);
402 			gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
403 		} else if (dma_channel->dir == DMA_TX) {
404 			gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
405 					GDMA_LL_EVENT_TX_EOF, true);
406 			gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
407 						 (int32_t)dma_channel->desc_list);
408 			gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
409 		} else {
410 			LOG_ERR("Channel %d is not configured", channel);
411 			return -EINVAL;
412 		}
413 	}
414 
415 	return 0;
416 }
417 
dma_esp32_stop(const struct device * dev,uint32_t channel)418 static int dma_esp32_stop(const struct device *dev, uint32_t channel)
419 {
420 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
421 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
422 	struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
423 
424 	if (channel >= config->dma_channel_max) {
425 		LOG_ERR("Unsupported channel");
426 		return -EINVAL;
427 	}
428 
429 	if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) {
430 		gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
431 				UINT32_MAX, false);
432 		gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
433 				GDMA_LL_EVENT_TX_EOF, false);
434 		gdma_ll_rx_stop(data->hal.dev, dma_channel->channel_id);
435 		gdma_ll_tx_stop(data->hal.dev, dma_channel->channel_id);
436 	}
437 
438 	if (dma_channel->dir == DMA_RX) {
439 		gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
440 				UINT32_MAX, false);
441 		gdma_ll_rx_stop(data->hal.dev, dma_channel->channel_id);
442 	} else if (dma_channel->dir == DMA_TX) {
443 		gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id,
444 				GDMA_LL_EVENT_TX_EOF, false);
445 		gdma_ll_tx_stop(data->hal.dev, dma_channel->channel_id);
446 	}
447 
448 	return 0;
449 }
450 
dma_esp32_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)451 static int dma_esp32_get_status(const struct device *dev, uint32_t channel,
452 				struct dma_status *status)
453 {
454 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
455 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
456 	struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
457 	dma_descriptor_t *desc;
458 
459 	if (channel >= config->dma_channel_max) {
460 		LOG_ERR("Unsupported channel");
461 		return -EINVAL;
462 	}
463 
464 	if (!status) {
465 		return -EINVAL;
466 	}
467 
468 	memset(status, 0, sizeof(struct dma_status));
469 
470 	if (dma_channel->dir == DMA_RX) {
471 		status->busy = !gdma_ll_rx_is_fsm_idle(data->hal.dev, dma_channel->channel_id);
472 		status->dir = PERIPHERAL_TO_MEMORY;
473 		desc = (dma_descriptor_t *)gdma_ll_rx_get_current_desc_addr(
474 			data->hal.dev, dma_channel->channel_id);
475 		if (desc >= dma_channel->desc_list) {
476 			status->read_position = desc - dma_channel->desc_list;
477 			status->total_copied = desc->dw0.length
478 						+ dma_channel->desc_list[0].dw0.size
479 						* status->read_position;
480 		}
481 	} else if (dma_channel->dir == DMA_TX) {
482 		status->busy = !gdma_ll_tx_is_fsm_idle(data->hal.dev, dma_channel->channel_id);
483 		status->dir = MEMORY_TO_PERIPHERAL;
484 		desc = (dma_descriptor_t *)gdma_ll_tx_get_current_desc_addr(
485 			data->hal.dev, dma_channel->channel_id);
486 		if (desc >= dma_channel->desc_list) {
487 			status->write_position = desc - dma_channel->desc_list;
488 		}
489 	}
490 
491 	return 0;
492 }
493 
dma_esp32_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)494 static int dma_esp32_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst,
495 			    size_t size)
496 {
497 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
498 	struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
499 	struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
500 	dma_descriptor_t *desc_iter = dma_channel->desc_list;
501 	uint32_t buf;
502 
503 	if (channel >= config->dma_channel_max) {
504 		LOG_ERR("Unsupported channel");
505 		return -EINVAL;
506 	}
507 
508 	if (dma_channel->dir == DMA_RX) {
509 		gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id);
510 		buf = dst;
511 	} else if (dma_channel->dir == DMA_TX) {
512 		gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id);
513 		buf = src;
514 	} else {
515 		return -EINVAL;
516 	}
517 
518 	for (int i = 0; i < ARRAY_SIZE(dma_channel->desc_list); ++i) {
519 		memset(desc_iter, 0, sizeof(dma_descriptor_t));
520 		desc_iter->buffer = (void *)(buf + DMA_DESCRIPTOR_BUFFER_MAX_SIZE * i);
521 		desc_iter->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
522 		if (size < DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
523 			desc_iter->dw0.size = size;
524 			if (dma_channel->dir == DMA_TX) {
525 				desc_iter->dw0.length = size;
526 				desc_iter->dw0.suc_eof = 1;
527 			}
528 			desc_iter->next = NULL;
529 			break;
530 		}
531 		desc_iter->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
532 		if (dma_channel->dir == DMA_TX) {
533 			desc_iter->dw0.length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
534 		}
535 		size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
536 		desc_iter->next = desc_iter + 1;
537 		desc_iter += 1;
538 	}
539 
540 	if (desc_iter->next) {
541 		memset(desc_iter, 0, sizeof(dma_descriptor_t));
542 		LOG_ERR("Not enough DMA descriptors. Increase CONFIG_DMA_ESP32_MAX_DESCRIPTOR_NUM");
543 		return -EINVAL;
544 	}
545 
546 	return 0;
547 }
548 
dma_esp32_configure_irq(const struct device * dev)549 static int dma_esp32_configure_irq(const struct device *dev)
550 {
551 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
552 	struct irq_config *irq_cfg = (struct irq_config *)config->irq_config;
553 
554 	for (uint8_t i = 0; i < config->irq_size; i++) {
555 		int ret = esp_intr_alloc(irq_cfg[i].irq_source,
556 			ESP_PRIO_TO_FLAGS(irq_cfg[i].irq_priority) |
557 				ESP_INT_FLAGS_CHECK(irq_cfg[i].irq_flags) | ESP_INTR_FLAG_IRAM,
558 			(ISR_HANDLER)config->irq_handlers[i],
559 			(void *)dev,
560 			NULL);
561 		if (ret != 0) {
562 			LOG_ERR("Could not allocate interrupt handler");
563 			return ret;
564 		}
565 	}
566 
567 	return 0;
568 }
569 
dma_esp32_init(const struct device * dev)570 static int dma_esp32_init(const struct device *dev)
571 {
572 	struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
573 	struct dma_esp32_data *data = (struct dma_esp32_data *)dev->data;
574 	struct dma_esp32_channel *dma_channel;
575 	int ret = 0;
576 
577 	if (!device_is_ready(config->clock_dev)) {
578 		LOG_ERR("clock control device not ready");
579 		return -ENODEV;
580 	}
581 
582 	ret = clock_control_on(config->clock_dev, config->clock_subsys);
583 	if (ret < 0) {
584 		LOG_ERR("Could not initialize clock (%d)", ret);
585 		return ret;
586 	}
587 
588 	ret = dma_esp32_configure_irq(dev);
589 	if (ret < 0) {
590 		LOG_ERR("Could not configure IRQ (%d)", ret);
591 		return ret;
592 	}
593 
594 	for (uint8_t i = 0; i < DMA_MAX_CHANNEL * 2; i++) {
595 		dma_channel = &config->dma_channel[i];
596 		dma_channel->cb = NULL;
597 		dma_channel->dir = DMA_UNCONFIGURED;
598 		dma_channel->periph_id = ESP_GDMA_TRIG_PERIPH_INVALID;
599 		memset(dma_channel->desc_list, 0, sizeof(dma_channel->desc_list));
600 	}
601 
602 	gdma_hal_init(&data->hal, 0);
603 	gdma_ll_enable_clock(data->hal.dev, true);
604 
605 	return 0;
606 }
607 
608 static DEVICE_API(dma, dma_esp32_api) = {
609 	.config = dma_esp32_config,
610 	.start = dma_esp32_start,
611 	.stop = dma_esp32_stop,
612 	.get_status = dma_esp32_get_status,
613 	.reload = dma_esp32_reload,
614 };
615 
616 #if defined(CONFIG_SOC_SERIES_ESP32C6) || defined(CONFIG_SOC_SERIES_ESP32S3)
617 
618 #define DMA_ESP32_DEFINE_IRQ_HANDLER(channel)                                                      \
619 	__attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel##_rx(                \
620 		const struct device *dev)                                                          \
621 	{                                                                                          \
622 		struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;          \
623 		struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;           \
624 		uint32_t intr_status = gdma_ll_rx_get_interrupt_status(data->hal.dev, channel);    \
625 		if (intr_status) {                                                                 \
626 			dma_esp32_isr_handle_rx(dev, &config->dma_channel[channel * 2],            \
627 						intr_status);                                      \
628 		}                                                                                  \
629 	}                                                                                          \
630                                                                                                    \
631 	__attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel##_tx(                \
632 		const struct device *dev)                                                          \
633 	{                                                                                          \
634 		struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;          \
635 		struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;           \
636 		uint32_t intr_status = gdma_ll_tx_get_interrupt_status(data->hal.dev, channel);    \
637 		if (intr_status) {                                                                 \
638 			dma_esp32_isr_handle_tx(dev, &config->dma_channel[channel * 2 + 1],        \
639 						intr_status);                                      \
640 		}                                                                                  \
641 	}
642 
643 #else
644 
645 #define DMA_ESP32_DEFINE_IRQ_HANDLER(channel)                                                      \
646 	__attribute__((unused)) static void IRAM_ATTR dma_esp32_isr_##channel(                     \
647 		const struct device *dev)                                                          \
648 	{                                                                                          \
649 		dma_esp32_isr_handle(dev, channel * 2, channel * 2 + 1);                           \
650 	}
651 
652 #endif
653 
654 #if defined(CONFIG_SOC_SERIES_ESP32C6) || defined(CONFIG_SOC_SERIES_ESP32S3)
655 #define ESP32_DMA_HANDLER(channel) dma_esp32_isr_##channel##_rx, dma_esp32_isr_##channel##_tx
656 #else
657 #define ESP32_DMA_HANDLER(channel) dma_esp32_isr_##channel
658 #endif
659 
660 DMA_ESP32_DEFINE_IRQ_HANDLER(0)
661 DMA_ESP32_DEFINE_IRQ_HANDLER(1)
662 DMA_ESP32_DEFINE_IRQ_HANDLER(2)
663 #if DMA_MAX_CHANNEL >= 5
664 DMA_ESP32_DEFINE_IRQ_HANDLER(3)
665 DMA_ESP32_DEFINE_IRQ_HANDLER(4)
666 #endif
667 
668 static void *irq_handlers[] = {
669 	ESP32_DMA_HANDLER(0),
670 	ESP32_DMA_HANDLER(1),
671 	ESP32_DMA_HANDLER(2),
672 #if DMA_MAX_CHANNEL >= 5
673 	ESP32_DMA_HANDLER(3),
674 	ESP32_DMA_HANDLER(4),
675 #endif
676 	};
677 
678 #define IRQ_NUM(idx)	DT_NUM_IRQS(DT_DRV_INST(idx))
679 #define IRQ_ENTRY(n, idx) {	\
680 	DT_INST_IRQ_BY_IDX(idx, n, irq),	\
681 	DT_INST_IRQ_BY_IDX(idx, n, priority),	\
682 	DT_INST_IRQ_BY_IDX(idx, n, flags)	},
683 
684 #define DMA_ESP32_INIT(idx)                                                                        \
685 	static struct irq_config irq_config_##idx[] = {                                            \
686 		LISTIFY(IRQ_NUM(idx), IRQ_ENTRY, (), idx)                                          \
687 	};                                                                                         \
688 	static struct dma_esp32_config dma_config_##idx = {                                        \
689 		.irq_config = irq_config_##idx,                                                    \
690 		.irq_size = IRQ_NUM(idx),                                                          \
691 		.irq_handlers = irq_handlers,                                                      \
692 		.dma_channel_max = DT_INST_PROP(idx, dma_channels),                                \
693 		.sram_alignment = DT_INST_PROP(idx, dma_buf_addr_alignment),                       \
694 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)),                              \
695 		.clock_subsys = (void *)DT_INST_CLOCKS_CELL(idx, offset),                          \
696 	};                                                                                         \
697 	static struct dma_esp32_data dma_data_##idx = {                                            \
698 		.hal =                                                                             \
699 			{                                                                          \
700 				.dev = (gdma_dev_t *)DT_INST_REG_ADDR(idx),                        \
701 			},                                                                         \
702 	};                                                                                         \
703 												   \
704 	DEVICE_DT_INST_DEFINE(idx, &dma_esp32_init, NULL, &dma_data_##idx, &dma_config_##idx,      \
705 			      PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &dma_esp32_api);
706 
707 DT_INST_FOREACH_STATUS_OKAY(DMA_ESP32_INIT)
708