1 /*
2 * Copyright 2020-2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @brief Common part of DMA drivers for imx rt series.
9 */
10
11 #define DT_DRV_COMPAT nxp_mcux_edma
12
13 #include <errno.h>
14 #include <soc.h>
15 #include <zephyr/init.h>
16 #include <zephyr/kernel.h>
17 #include <zephyr/devicetree.h>
18 #include <zephyr/sys/atomic.h>
19 #include <zephyr/drivers/dma.h>
20 #include <zephyr/drivers/clock_control.h>
21 #include <zephyr/sys/barrier.h>
22
23 #include "dma_mcux_edma.h"
24
25 #include <zephyr/logging/log.h>
26 #include <zephyr/irq.h>
27
28 LOG_MODULE_REGISTER(dma_mcux_edma, CONFIG_DMA_LOG_LEVEL);
29
30 #define HAS_CHANNEL_GAP(n) DT_INST_NODE_HAS_PROP(n, channel_gap) ||
31 #define DMA_MCUX_HAS_CHANNEL_GAP (DT_INST_FOREACH_STATUS_OKAY(HAS_CHANNEL_GAP) 0)
32
33 struct dma_mcux_edma_config {
34 DMA_Type *base;
35 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
36 DMAMUX_Type **dmamux_base;
37 #endif
38 uint8_t channels_per_mux;
39 uint8_t dmamux_reg_offset;
40 int dma_requests;
41 int dma_channels; /* number of channels */
42 #if DMA_MCUX_HAS_CHANNEL_GAP
43 uint32_t channel_gap[2];
44 #endif
45 void (*irq_config_func)(const struct device *dev);
46 edma_tcd_t (*tcdpool)[CONFIG_DMA_TCD_QUEUE_SIZE];
47 };
48
49
50 #ifdef CONFIG_HAS_MCUX_CACHE
51
52 #ifdef CONFIG_DMA_MCUX_USE_DTCM_FOR_DMA_DESCRIPTORS
53
54 #if DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_dtcm))
55 #define EDMA_TCDPOOL_CACHE_ATTR __dtcm_noinit_section
56 #else /* DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_dtcm)) */
57 #error Selected DTCM for MCUX DMA descriptors but no DTCM section.
58 #endif /* DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_dtcm)) */
59
60 #elif defined(CONFIG_NOCACHE_MEMORY)
61 #define EDMA_TCDPOOL_CACHE_ATTR __nocache
62 #else
63 /*
64 * Note: the TCD pool *must* be in non cacheable memory. All of the NXP SOCs
65 * that support caching memory have their default SRAM regions defined as a
66 * non cached memory region, but if the default SRAM region is changed EDMA
67 * TCD pools would be moved to cacheable memory, resulting in DMA cache
68 * coherency issues.
69 */
70
71 #define EDMA_TCDPOOL_CACHE_ATTR
72
73 #endif /* CONFIG_DMA_MCUX_USE_DTCM_FOR_DMA_DESCRIPTORS */
74
75 #else /* CONFIG_HAS_MCUX_CACHE */
76
77 #define EDMA_TCDPOOL_CACHE_ATTR
78
79 #endif /* CONFIG_HAS_MCUX_CACHE */
80
81 struct dma_mcux_channel_transfer_edma_settings {
82 uint32_t source_data_size;
83 uint32_t dest_data_size;
84 uint32_t source_burst_length;
85 uint32_t dest_burst_length;
86 enum dma_channel_direction direction;
87 edma_transfer_type_t transfer_type;
88 bool valid;
89 /* This var indicate it is dynamic SG mode or loop SG mode. */
90 bool cyclic;
91 /* These parameters are for cyclic mode only.
92 * Next empty TCD idx which can be used for transfer
93 */
94 volatile uint8_t write_idx;
95 /* How many TCDs in TCD pool is emtpy(can be used to write transfer parameters) */
96 volatile uint8_t empty_tcds;
97 };
98
99
100 struct call_back {
101 edma_transfer_config_t transferConfig;
102 edma_handle_t edma_handle;
103 const struct device *dev;
104 void *user_data;
105 dma_callback_t dma_callback;
106 struct dma_mcux_channel_transfer_edma_settings transfer_settings;
107 bool busy;
108 };
109
110 struct dma_mcux_edma_data {
111 struct dma_context dma_ctx;
112 struct call_back *data_cb;
113 atomic_t *channels_atomic;
114 };
115
116 #define DEV_CFG(dev) \
117 ((const struct dma_mcux_edma_config *const)dev->config)
118 #define DEV_DATA(dev) ((struct dma_mcux_edma_data *)dev->data)
119 #define DEV_BASE(dev) ((DMA_Type *)DEV_CFG(dev)->base)
120
121 #define DEV_CHANNEL_DATA(dev, ch) \
122 ((struct call_back *)(&(DEV_DATA(dev)->data_cb[ch])))
123
124 #define DEV_EDMA_HANDLE(dev, ch) \
125 ((edma_handle_t *)(&(DEV_CHANNEL_DATA(dev, ch)->edma_handle)))
126
127 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
128 #define DEV_DMAMUX_BASE(dev, idx) ((DMAMUX_Type *)DEV_CFG(dev)->dmamux_base[idx])
129 #define DEV_DMAMUX_IDX(dev, ch) (ch / DEV_CFG(dev)->channels_per_mux)
130
131 #define DEV_DMAMUX_CHANNEL(dev, ch) \
132 (ch % DEV_CFG(dev)->channels_per_mux) ^ (DEV_CFG(dev)->dmamux_reg_offset)
133 #endif
134
135 /* Definations for SW TCD fields */
136 #if defined(CONFIG_DMA_MCUX_EDMA) || defined(CONFIG_DMA_MCUX_EDMA_V3)
137 #define EDMA_TCD_SADDR(tcd, flag) ((tcd)->SADDR)
138 #define EDMA_TCD_DADDR(tcd, flag) ((tcd)->DADDR)
139 #define EDMA_TCD_BITER(tcd, flag) ((tcd)->BITER)
140 #define EDMA_TCD_CITER(tcd, flag) ((tcd)->CITER)
141 #define EDMA_TCD_CSR(tcd, flag) ((tcd)->CSR)
142 #define EDMA_TCD_DLAST_SGA(tcd, flag) ((tcd)->DLAST_SGA)
143 #if defined(CONFIG_DMA_MCUX_EDMA_V3)
144 #define DMA_CSR_DREQ DMA_TCD_CSR_DREQ
145 #define EDMA_HW_TCD_CH_ACTIVE_MASK (DMA_CH_CSR_ACTIVE_MASK)
146 #else
147 #define EDMA_HW_TCD_CH_ACTIVE_MASK (DMA_CSR_ACTIVE_MASK)
148 #endif /* CONFIG_DMA_MCUX_EDMA_V3 */
149 #elif defined(CONFIG_DMA_MCUX_EDMA_V4)
150 /* Above macros have been defined in fsl_edma_core.h */
151 #define EDMA_HW_TCD_CH_ACTIVE_MASK (DMA_CH_CSR_ACTIVE_MASK)
152 #endif
153
154 /* Definations for HW TCD fields */
155 #ifdef CONFIG_DMA_MCUX_EDMA
156 #define EDMA_HW_TCD_SADDR(dev, ch) (DEV_BASE(dev)->TCD[ch].SADDR)
157 #define EDMA_HW_TCD_DADDR(dev, ch) (DEV_BASE(dev)->TCD[ch].DADDR)
158 #define EDMA_HW_TCD_BITER(dev, ch) (DEV_BASE(dev)->TCD[ch].BITER_ELINKNO)
159 #define EDMA_HW_TCD_CITER(dev, ch) (DEV_BASE(dev)->TCD[ch].CITER_ELINKNO)
160 #define EDMA_HW_TCD_CSR(dev, ch) (DEV_BASE(dev)->TCD[ch].CSR)
161 #elif defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4)
162 #define EDMA_HW_TCD_SADDR(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_SADDR)
163 #define EDMA_HW_TCD_DADDR(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_DADDR)
164 #define EDMA_HW_TCD_BITER(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_BITER_ELINKNO)
165 #define EDMA_HW_TCD_CITER(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_CITER_ELINKNO)
166 #define EDMA_HW_TCD_CSR(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_CSR)
167 #endif
168
169 /*
170 * The hardware channel (takes the gap into account) is used when access DMA registers.
171 * For data structures in the shim driver still use the primitive channel.
172 */
dma_mcux_edma_add_channel_gap(const struct device * dev,uint32_t channel)173 static ALWAYS_INLINE uint32_t dma_mcux_edma_add_channel_gap(const struct device *dev,
174 uint32_t channel)
175 {
176 #if DMA_MCUX_HAS_CHANNEL_GAP
177 const struct dma_mcux_edma_config *config = DEV_CFG(dev);
178
179 return (channel < config->channel_gap[0]) ? channel :
180 (channel + 1 + config->channel_gap[1] - config->channel_gap[0]);
181 #else
182 ARG_UNUSED(dev);
183 return channel;
184 #endif
185 }
186
dma_mcux_edma_remove_channel_gap(const struct device * dev,uint32_t channel)187 static ALWAYS_INLINE uint32_t dma_mcux_edma_remove_channel_gap(const struct device *dev,
188 uint32_t channel)
189 {
190 #if DMA_MCUX_HAS_CHANNEL_GAP
191 const struct dma_mcux_edma_config *config = DEV_CFG(dev);
192
193 return (channel < config->channel_gap[0]) ? channel :
194 (channel + config->channel_gap[0] - config->channel_gap[1] - 1);
195 #else
196 ARG_UNUSED(dev);
197 return channel;
198 #endif
199 }
200
data_size_valid(const size_t data_size)201 static bool data_size_valid(const size_t data_size)
202 {
203 return (data_size == 4U || data_size == 2U ||
204 data_size == 1U || data_size == 8U ||
205 data_size == 16U || data_size == 32U
206 #if defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4)
207 || data_size == 64U
208 #endif
209 );
210 }
211
nxp_edma_callback(edma_handle_t * handle,void * param,bool transferDone,uint32_t tcds)212 static void nxp_edma_callback(edma_handle_t *handle, void *param, bool transferDone,
213 uint32_t tcds)
214 {
215 int ret = -EIO;
216 struct call_back *data = (struct call_back *)param;
217 uint32_t channel = dma_mcux_edma_remove_channel_gap(data->dev, handle->channel);
218
219 if (data->transfer_settings.cyclic) {
220 data->transfer_settings.empty_tcds++;
221 /*In loop mode, DMA is always busy*/
222 data->busy = 1;
223 ret = DMA_STATUS_COMPLETE;
224 } else if (transferDone) {
225 /* DMA is no longer busy when there are no remaining TCDs to transfer */
226 data->busy = (handle->tcdPool != NULL) && (handle->tcdUsed > 0);
227 ret = DMA_STATUS_COMPLETE;
228 }
229 LOG_DBG("transfer %d", tcds);
230 data->dma_callback(data->dev, data->user_data, channel, ret);
231 }
232
dma_mcux_edma_irq_handler(const struct device * dev,uint32_t channel)233 static void dma_mcux_edma_irq_handler(const struct device *dev, uint32_t channel)
234 {
235 uint32_t hw_channel = dma_mcux_edma_add_channel_gap(dev, channel);
236 uint32_t flag = EDMA_GetChannelStatusFlags(DEV_BASE(dev), hw_channel);
237
238 if (flag & kEDMA_InterruptFlag) {
239 LOG_DBG("IRQ OCCURRED");
240 /* EDMA interrupt flag is cleared here */
241 EDMA_HandleIRQ(DEV_EDMA_HANDLE(dev, channel));
242 LOG_DBG("IRQ DONE");
243 }
244
245 #if DT_INST_PROP(0, no_error_irq)
246 /* Channel shares the same irq for error and transfer complete */
247 else if (flag & kEDMA_ErrorFlag) {
248 EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel, 0xFFFFFFFF);
249 EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, channel));
250 DEV_CHANNEL_DATA(dev, channel)->busy = false;
251 LOG_INF("channel %d error status is 0x%x", channel, flag);
252 }
253 #endif
254 }
255
256 #if !DT_INST_PROP(0, no_error_irq)
dma_mcux_edma_error_irq_handler(const struct device * dev)257 static void dma_mcux_edma_error_irq_handler(const struct device *dev)
258 {
259 int i = 0;
260 uint32_t flag = 0;
261 uint32_t hw_channel;
262
263 for (i = 0; i < DEV_CFG(dev)->dma_channels; i++) {
264 if (DEV_CHANNEL_DATA(dev, i)->busy) {
265 hw_channel = dma_mcux_edma_add_channel_gap(dev, i);
266 flag = EDMA_GetChannelStatusFlags(DEV_BASE(dev), hw_channel);
267 EDMA_ClearChannelStatusFlags(DEV_BASE(dev), hw_channel, 0xFFFFFFFF);
268 EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, i));
269 DEV_CHANNEL_DATA(dev, i)->busy = false;
270 LOG_INF("channel %d error status is 0x%x", hw_channel, flag);
271 }
272 }
273
274 #if defined(CONFIG_CPU_CORTEX_M4)
275 barrier_dsync_fence_full();
276 #endif
277 }
278 #endif
279
280 /* Configure a channel */
dma_mcux_edma_configure(const struct device * dev,uint32_t channel,struct dma_config * config)281 static int dma_mcux_edma_configure(const struct device *dev, uint32_t channel,
282 struct dma_config *config)
283 {
284 /* Check for invalid parameters before dereferencing them. */
285 if (NULL == dev || NULL == config) {
286 return -EINVAL;
287 }
288
289 edma_handle_t *p_handle = DEV_EDMA_HANDLE(dev, channel);
290 struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
291 struct dma_block_config *block_config = config->head_block;
292 uint32_t slot = config->dma_slot;
293 uint32_t hw_channel;
294 edma_transfer_type_t transfer_type;
295 unsigned int key;
296 int ret = 0;
297 edma_tcd_t *tcd = NULL;
298
299 if (slot >= DEV_CFG(dev)->dma_requests) {
300 LOG_ERR("source number is out of scope %d", slot);
301 return -ENOTSUP;
302 }
303
304 if (channel >= DEV_CFG(dev)->dma_channels) {
305 LOG_ERR("out of DMA channel %d", channel);
306 return -EINVAL;
307 }
308
309 hw_channel = dma_mcux_edma_add_channel_gap(dev, channel);
310 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
311 uint8_t dmamux_idx, dmamux_channel;
312
313 dmamux_idx = DEV_DMAMUX_IDX(dev, channel);
314 dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel);
315 #endif
316 data->transfer_settings.valid = false;
317
318 switch (config->channel_direction) {
319 case MEMORY_TO_MEMORY:
320 transfer_type = kEDMA_MemoryToMemory;
321 break;
322 case MEMORY_TO_PERIPHERAL:
323 transfer_type = kEDMA_MemoryToPeripheral;
324 break;
325 case PERIPHERAL_TO_MEMORY:
326 transfer_type = kEDMA_PeripheralToMemory;
327 break;
328 case PERIPHERAL_TO_PERIPHERAL:
329 transfer_type = kEDMA_PeripheralToPeripheral;
330 break;
331 default:
332 LOG_ERR("not support transfer direction");
333 return -EINVAL;
334 }
335
336 if (!data_size_valid(config->source_data_size)) {
337 LOG_ERR("Source unit size error, %d", config->source_data_size);
338 return -EINVAL;
339 }
340
341 if (!data_size_valid(config->dest_data_size)) {
342 LOG_ERR("Dest unit size error, %d", config->dest_data_size);
343 return -EINVAL;
344 }
345
346 if (block_config->source_gather_en || block_config->dest_scatter_en) {
347 if (config->block_count > CONFIG_DMA_TCD_QUEUE_SIZE) {
348 LOG_ERR("please config DMA_TCD_QUEUE_SIZE as %d", config->block_count);
349 return -EINVAL;
350 }
351 }
352
353 data->transfer_settings.source_data_size = config->source_data_size;
354 data->transfer_settings.dest_data_size = config->dest_data_size;
355 data->transfer_settings.source_burst_length = config->source_burst_length;
356 data->transfer_settings.dest_burst_length = config->dest_burst_length;
357 data->transfer_settings.direction = config->channel_direction;
358 data->transfer_settings.transfer_type = transfer_type;
359 data->transfer_settings.valid = true;
360 data->transfer_settings.cyclic = config->cyclic;
361
362 /* Lock and page in the channel configuration */
363 key = irq_lock();
364
365 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
366
367 #if DT_INST_PROP(0, nxp_a_on)
368 if (config->source_handshake || config->dest_handshake ||
369 transfer_type == kEDMA_MemoryToMemory) {
370 /*software trigger make the channel always on*/
371 LOG_DBG("ALWAYS ON");
372 DMAMUX_EnableAlwaysOn(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, true);
373 } else {
374 DMAMUX_SetSource(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, slot);
375 }
376 #else
377 DMAMUX_SetSource(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, slot);
378 #endif
379
380 /* dam_imx_rt_set_channel_priority(dev, channel, config); */
381 DMAMUX_EnableChannel(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel);
382
383 #endif
384
385 if (data->busy) {
386 EDMA_AbortTransfer(p_handle);
387 }
388 EDMA_ResetChannel(DEV_BASE(dev), hw_channel);
389 EDMA_CreateHandle(p_handle, DEV_BASE(dev), hw_channel);
390 EDMA_SetCallback(p_handle, nxp_edma_callback, (void *)data);
391
392 #if defined(FSL_FEATURE_EDMA_HAS_CHANNEL_MUX) && FSL_FEATURE_EDMA_HAS_CHANNEL_MUX
393 /* First release any peripheral previously associated with this channel */
394 EDMA_SetChannelMux(DEV_BASE(dev), hw_channel, 0);
395 EDMA_SetChannelMux(DEV_BASE(dev), hw_channel, slot);
396 #endif
397
398 LOG_DBG("channel is %d", channel);
399 EDMA_EnableChannelInterrupts(DEV_BASE(dev), hw_channel, kEDMA_ErrorInterruptEnable);
400
401 /* Initialize all TCD pool as 0*/
402 for (int i = 0; i < CONFIG_DMA_TCD_QUEUE_SIZE; i++) {
403 memset(&DEV_CFG(dev)->tcdpool[channel][i], 0,
404 sizeof(DEV_CFG(dev)->tcdpool[channel][i]));
405 }
406
407 if (block_config->source_gather_en || block_config->dest_scatter_en) {
408 if (config->cyclic) {
409 /* Loop SG mode */
410 data->transfer_settings.write_idx = 0;
411 data->transfer_settings.empty_tcds = CONFIG_DMA_TCD_QUEUE_SIZE;
412
413 EDMA_PrepareTransfer(
414 &data->transferConfig, (void *)block_config->source_address,
415 config->source_data_size, (void *)block_config->dest_address,
416 config->dest_data_size, config->source_burst_length,
417 block_config->block_size, transfer_type);
418
419 /* Init all TCDs with the para in transfer config and link them. */
420 for (int i = 0; i < CONFIG_DMA_TCD_QUEUE_SIZE; i++) {
421 EDMA_TcdSetTransferConfig(
422 &DEV_CFG(dev)->tcdpool[channel][i], &data->transferConfig,
423 &DEV_CFG(dev)->tcdpool[channel][(i + 1) %
424 CONFIG_DMA_TCD_QUEUE_SIZE]);
425
426 /* Enable Major loop interrupt.*/
427 EDMA_TcdEnableInterrupts(&DEV_CFG(dev)->tcdpool[channel][i],
428 kEDMA_MajorInterruptEnable);
429 }
430
431 /* Load valid transfer parameters */
432 while (block_config != NULL && data->transfer_settings.empty_tcds > 0) {
433 tcd = &(DEV_CFG(dev)->tcdpool[channel]
434 [data->transfer_settings.write_idx]);
435
436 EDMA_TCD_SADDR(tcd, kEDMA_EDMA4Flag) = block_config->source_address;
437 EDMA_TCD_DADDR(tcd, kEDMA_EDMA4Flag) = block_config->dest_address;
438 EDMA_TCD_BITER(tcd, kEDMA_EDMA4Flag) =
439 block_config->block_size / config->source_data_size;
440 EDMA_TCD_CITER(tcd, kEDMA_EDMA4Flag) =
441 block_config->block_size / config->source_data_size;
442 /*Enable auto stop for last transfer.*/
443 if (block_config->next_block == NULL) {
444 EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) |= DMA_CSR_DREQ(1U);
445 } else {
446 EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) &= ~DMA_CSR_DREQ(1U);
447 }
448
449 data->transfer_settings.write_idx =
450 (data->transfer_settings.write_idx + 1) %
451 CONFIG_DMA_TCD_QUEUE_SIZE;
452 data->transfer_settings.empty_tcds--;
453 block_config = block_config->next_block;
454 }
455
456 if (block_config != NULL && data->transfer_settings.empty_tcds == 0) {
457 /* User input more blocks than TCD number, return error */
458 LOG_ERR("Too much request blocks,increase TCD buffer size!");
459 ret = -ENOBUFS;
460 }
461 /* Push the 1st TCD into HW */
462 EDMA_InstallTCD(p_handle->base, hw_channel,
463 &DEV_CFG(dev)->tcdpool[channel][0]);
464
465 } else {
466 /* Dynamic Scatter Gather mode */
467 EDMA_InstallTCDMemory(p_handle, DEV_CFG(dev)->tcdpool[channel],
468 CONFIG_DMA_TCD_QUEUE_SIZE);
469
470 while (block_config != NULL) {
471 EDMA_PrepareTransfer(&(data->transferConfig),
472 (void *)block_config->source_address,
473 config->source_data_size,
474 (void *)block_config->dest_address,
475 config->dest_data_size,
476 config->source_burst_length,
477 block_config->block_size, transfer_type);
478
479 const status_t submit_status =
480 EDMA_SubmitTransfer(p_handle, &(data->transferConfig));
481 if (submit_status != kStatus_Success) {
482 LOG_ERR("Error submitting EDMA Transfer: 0x%x",
483 submit_status);
484 ret = -EFAULT;
485 }
486 block_config = block_config->next_block;
487 }
488 }
489 } else {
490 /* block_count shall be 1 */
491 LOG_DBG("block size is: %d", block_config->block_size);
492 EDMA_PrepareTransfer(&(data->transferConfig),
493 (void *)block_config->source_address,
494 config->source_data_size,
495 (void *)block_config->dest_address,
496 config->dest_data_size,
497 config->source_burst_length,
498 block_config->block_size, transfer_type);
499
500 const status_t submit_status =
501 EDMA_SubmitTransfer(p_handle, &(data->transferConfig));
502 if (submit_status != kStatus_Success) {
503 LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status);
504 ret = -EFAULT;
505 }
506
507 LOG_DBG("DMA TCD CSR 0x%x", EDMA_HW_TCD_CSR(dev, hw_channel));
508 }
509
510 if (config->dest_chaining_en) {
511 LOG_DBG("link major channel %d", config->linked_channel);
512 EDMA_SetChannelLink(DEV_BASE(dev), channel, kEDMA_MajorLink,
513 config->linked_channel);
514 }
515 if (config->source_chaining_en) {
516 LOG_DBG("link minor channel %d", config->linked_channel);
517 EDMA_SetChannelLink(DEV_BASE(dev), channel, kEDMA_MinorLink,
518 config->linked_channel);
519 }
520
521 data->busy = false;
522 if (config->dma_callback) {
523 LOG_DBG("INSTALL call back on channel %d", channel);
524 data->user_data = config->user_data;
525 data->dma_callback = config->dma_callback;
526 data->dev = dev;
527 }
528
529 irq_unlock(key);
530
531 return ret;
532 }
533
dma_mcux_edma_start(const struct device * dev,uint32_t channel)534 static int dma_mcux_edma_start(const struct device *dev, uint32_t channel)
535 {
536 struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
537
538 LOG_DBG("START TRANSFER");
539
540 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
541 uint8_t dmamux_idx = DEV_DMAMUX_IDX(dev, channel);
542 uint8_t dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel);
543
544 LOG_DBG("DMAMUX CHCFG 0x%x", DEV_DMAMUX_BASE(dev, dmamux_idx)->CHCFG[dmamux_channel]);
545 #endif
546
547 #if !defined(CONFIG_DMA_MCUX_EDMA_V3) && !defined(CONFIG_DMA_MCUX_EDMA_V4)
548 LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CR);
549 #endif
550 data->busy = true;
551 EDMA_StartTransfer(DEV_EDMA_HANDLE(dev, channel));
552 return 0;
553 }
554
dma_mcux_edma_stop(const struct device * dev,uint32_t channel)555 static int dma_mcux_edma_stop(const struct device *dev, uint32_t channel)
556 {
557 struct dma_mcux_edma_data *data = DEV_DATA(dev);
558 uint32_t hw_channel;
559
560 hw_channel = dma_mcux_edma_add_channel_gap(dev, channel);
561
562 data->data_cb[channel].transfer_settings.valid = false;
563
564 if (!data->data_cb[channel].busy) {
565 return 0;
566 }
567
568 EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, channel));
569 EDMA_ClearChannelStatusFlags(DEV_BASE(dev), hw_channel,
570 kEDMA_DoneFlag | kEDMA_ErrorFlag |
571 kEDMA_InterruptFlag);
572 EDMA_ResetChannel(DEV_BASE(dev), hw_channel);
573 data->data_cb[channel].busy = false;
574 return 0;
575 }
576
dma_mcux_edma_suspend(const struct device * dev,uint32_t channel)577 static int dma_mcux_edma_suspend(const struct device *dev, uint32_t channel)
578 {
579 struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
580
581 if (!data->busy) {
582 return -EINVAL;
583 }
584 EDMA_StopTransfer(DEV_EDMA_HANDLE(dev, channel));
585 return 0;
586 }
587
dma_mcux_edma_resume(const struct device * dev,uint32_t channel)588 static int dma_mcux_edma_resume(const struct device *dev, uint32_t channel)
589 {
590 struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
591
592 if (!data->busy) {
593 return -EINVAL;
594 }
595 EDMA_StartTransfer(DEV_EDMA_HANDLE(dev, channel));
596 return 0;
597 }
598
dma_mcux_edma_update_hw_tcd(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)599 static void dma_mcux_edma_update_hw_tcd(const struct device *dev, uint32_t channel, uint32_t src,
600 uint32_t dst, size_t size)
601 {
602 EDMA_HW_TCD_SADDR(dev, channel) = src;
603 EDMA_HW_TCD_DADDR(dev, channel) = dst;
604 EDMA_HW_TCD_BITER(dev, channel) = size;
605 EDMA_HW_TCD_CITER(dev, channel) = size;
606 EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_DREQ(1U);
607 }
608
dma_mcux_edma_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)609 static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel,
610 uint32_t src, uint32_t dst, size_t size)
611 {
612 struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
613 edma_tcd_t *tcd = NULL;
614 edma_tcd_t *pre_tcd = NULL;
615 uint32_t hw_id, sw_id;
616 uint8_t pre_idx;
617
618 /* Lock the channel configuration */
619 const unsigned int key = irq_lock();
620 int ret = 0;
621
622 if (!data->transfer_settings.valid) {
623 LOG_ERR("Invalid EDMA settings on initial config. Configure DMA before reload.");
624 ret = -EFAULT;
625 goto cleanup;
626 }
627
628 if (data->transfer_settings.cyclic) {
629 if (data->transfer_settings.empty_tcds == 0) {
630 LOG_ERR("TCD list is full in loop mode.");
631 ret = -ENOBUFS;
632 goto cleanup;
633 }
634
635 /* Convert size into major loop count */
636 size = size / data->transfer_settings.dest_data_size;
637
638 /* Previous TCD index in circular list */
639 pre_idx = data->transfer_settings.write_idx - 1;
640 if (pre_idx >= CONFIG_DMA_TCD_QUEUE_SIZE)
641 pre_idx = CONFIG_DMA_TCD_QUEUE_SIZE - 1;
642
643 /* Configure a TCD for the transfer */
644 tcd = &(DEV_CFG(dev)->tcdpool[channel][data->transfer_settings.write_idx]);
645 pre_tcd = &(DEV_CFG(dev)->tcdpool[channel][pre_idx]);
646
647 EDMA_TCD_SADDR(tcd, kEDMA_EDMA4Flag) = src;
648 EDMA_TCD_DADDR(tcd, kEDMA_EDMA4Flag) = dst;
649 EDMA_TCD_BITER(tcd, kEDMA_EDMA4Flag) = size;
650 EDMA_TCD_CITER(tcd, kEDMA_EDMA4Flag) = size;
651 /* Enable automatically stop */
652 EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) |= DMA_CSR_DREQ(1U);
653 sw_id = EDMA_TCD_DLAST_SGA(tcd, kEDMA_EDMA4Flag);
654
655 /* Block the peripheral's hardware request trigger to prevent
656 * starting the DMA before updating the TCDs. Make sure the
657 * code between EDMA_DisableChannelRequest() and
658 * EDMA_EnableChannelRequest() is minimum.
659 */
660 EDMA_DisableChannelRequest(DEV_BASE(dev), channel);
661
662 /* Wait for the DMA to be inactive before updating the TCDs.
663 * The CSR[ACTIVE] bit will deassert quickly after the EDMA's
664 * minor loop burst completes.
665 */
666 while (EDMA_HW_TCD_CSR(dev, channel) & EDMA_HW_TCD_CH_ACTIVE_MASK) {
667 ;
668 }
669
670 /* Identify the current active TCD. Use DLAST_SGA as the HW ID */
671 hw_id = EDMA_GetNextTCDAddress(DEV_EDMA_HANDLE(dev, channel));
672 if (data->transfer_settings.empty_tcds >= CONFIG_DMA_TCD_QUEUE_SIZE ||
673 hw_id == sw_id) {
674 /* All transfers have been done.DMA is stopped automatically,
675 * invalid TCD has been loaded into the HW, update HW.
676 */
677 dma_mcux_edma_update_hw_tcd(dev, channel, src, dst, size);
678 LOG_DBG("Transfer done,auto stop");
679
680 } else {
681 /* Previous TCD can automatically start this TCD.
682 * Enable the peripheral DMA request in the previous TCD
683 */
684 EDMA_TCD_CSR(pre_tcd, kEDMA_EDMA4Flag) &= ~DMA_CSR_DREQ(1U);
685
686 if (data->transfer_settings.empty_tcds == CONFIG_DMA_TCD_QUEUE_SIZE - 1 ||
687 hw_id == (uint32_t)tcd) {
688 /* DMA is running on last transfer. HW has loaded the last one,
689 * we need ensure it's DREQ is cleared.
690 */
691 EDMA_EnableAutoStopRequest(DEV_BASE(dev), channel, false);
692 LOG_DBG("Last transfer.");
693 }
694 LOG_DBG("Manu stop");
695 }
696
697 #ifdef CONFIG_DMA_MCUX_EDMA
698 /* It seems that there is HW issue which may cause ESG bit is cleared.
699 * This is a workaround. Clear the DONE bit before setting ESG bit.
700 */
701 EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel, kEDMA_DoneFlag);
702 EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_ESG_MASK;
703 #elif (CONFIG_DMA_MCUX_EDMA_V3 || CONFIG_DMA_MCUX_EDMA_V4)
704 /*We have not verified if this issue exist on V3/V4 HW, jut place a holder here. */
705 #endif
706 /* TCDs are configured. Resume DMA */
707 EDMA_EnableChannelRequest(DEV_BASE(dev), channel);
708
709 /* Update the write index and available TCD numbers. */
710 data->transfer_settings.write_idx =
711 (data->transfer_settings.write_idx + 1) % CONFIG_DMA_TCD_QUEUE_SIZE;
712 data->transfer_settings.empty_tcds--;
713
714 LOG_DBG("w_idx:%d no:%d(ch:%d)", data->transfer_settings.write_idx,
715 data->transfer_settings.empty_tcds, channel);
716
717 } else {
718 /* Dynamice Scatter/Gather mode:
719 * If the tcdPool is not in use (no s/g) then only a single TCD
720 * can be active at once.
721 */
722 if (data->busy && data->edma_handle.tcdPool == NULL) {
723 LOG_ERR("EDMA busy. Wait until the transfer completes before reloading.");
724 ret = -EBUSY;
725 goto cleanup;
726 }
727
728 EDMA_PrepareTransfer(&(data->transferConfig), (void *)src,
729 data->transfer_settings.source_data_size, (void *)dst,
730 data->transfer_settings.dest_data_size,
731 data->transfer_settings.source_burst_length, size,
732 data->transfer_settings.transfer_type);
733
734 const status_t submit_status =
735 EDMA_SubmitTransfer(DEV_EDMA_HANDLE(dev, channel), &(data->transferConfig));
736
737 if (submit_status != kStatus_Success) {
738 LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status);
739 ret = -EFAULT;
740 }
741 }
742
743 cleanup:
744 irq_unlock(key);
745 return ret;
746 }
747
dma_mcux_edma_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)748 static int dma_mcux_edma_get_status(const struct device *dev, uint32_t channel,
749 struct dma_status *status)
750 {
751 uint32_t hw_channel = dma_mcux_edma_add_channel_gap(dev, channel);
752
753 if (DEV_CHANNEL_DATA(dev, channel)->busy) {
754 status->busy = true;
755 /* pending_length is in bytes. Multiply remaining major loop
756 * count by NBYTES for each minor loop
757 */
758 status->pending_length =
759 EDMA_GetRemainingMajorLoopCount(DEV_BASE(dev), hw_channel) *
760 DEV_CHANNEL_DATA(dev, channel)->transfer_settings.source_data_size;
761 } else {
762 status->busy = false;
763 status->pending_length = 0;
764 }
765 status->dir = DEV_CHANNEL_DATA(dev, channel)->transfer_settings.direction;
766
767 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
768 uint8_t dmamux_idx = DEV_DMAMUX_IDX(dev, channel);
769 uint8_t dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel);
770
771 LOG_DBG("DMAMUX CHCFG 0x%x", DEV_DMAMUX_BASE(dev, dmamux_idx)->CHCFG[dmamux_channel]);
772 #endif
773
774 #if defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4)
775 LOG_DBG("DMA MP_CSR 0x%x", DEV_BASE(dev)->MP_CSR);
776 LOG_DBG("DMA MP_ES 0x%x", DEV_BASE(dev)->MP_ES);
777 LOG_DBG("DMA CHx_ES 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_ES);
778 LOG_DBG("DMA CHx_CSR 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_CSR);
779 LOG_DBG("DMA CHx_ES 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_ES);
780 LOG_DBG("DMA CHx_INT 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_INT);
781 LOG_DBG("DMA TCD_CSR 0x%x", DEV_BASE(dev)->CH[hw_channel].TCD_CSR);
782 #else
783 LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CR);
784 LOG_DBG("DMA INT 0x%x", DEV_BASE(dev)->INT);
785 LOG_DBG("DMA ERQ 0x%x", DEV_BASE(dev)->ERQ);
786 LOG_DBG("DMA ES 0x%x", DEV_BASE(dev)->ES);
787 LOG_DBG("DMA ERR 0x%x", DEV_BASE(dev)->ERR);
788 LOG_DBG("DMA HRS 0x%x", DEV_BASE(dev)->HRS);
789 LOG_DBG("data csr is 0x%x", DEV_BASE(dev)->TCD[hw_channel].CSR);
790 #endif
791 return 0;
792 }
793
dma_mcux_edma_channel_filter(const struct device * dev,int channel_id,void * param)794 static bool dma_mcux_edma_channel_filter(const struct device *dev,
795 int channel_id, void *param)
796 {
797 enum dma_channel_filter *filter = (enum dma_channel_filter *)param;
798
799 if (filter && *filter == DMA_CHANNEL_PERIODIC) {
800 if (channel_id > 3) {
801 return false;
802 }
803 }
804 return true;
805 }
806
807 static DEVICE_API(dma, dma_mcux_edma_api) = {
808 .reload = dma_mcux_edma_reload,
809 .config = dma_mcux_edma_configure,
810 .start = dma_mcux_edma_start,
811 .stop = dma_mcux_edma_stop,
812 .suspend = dma_mcux_edma_suspend,
813 .resume = dma_mcux_edma_resume,
814 .get_status = dma_mcux_edma_get_status,
815 .chan_filter = dma_mcux_edma_channel_filter,
816 };
817
dma_mcux_edma_init(const struct device * dev)818 static int dma_mcux_edma_init(const struct device *dev)
819 {
820 const struct dma_mcux_edma_config *config = dev->config;
821 struct dma_mcux_edma_data *data = dev->data;
822
823 edma_config_t userConfig = { 0 };
824
825 LOG_DBG("INIT NXP EDMA");
826
827 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
828 uint8_t i;
829
830 for (i = 0; i < config->dma_channels / config->channels_per_mux; i++) {
831 DMAMUX_Init(DEV_DMAMUX_BASE(dev, i));
832 }
833 #endif
834
835 EDMA_GetDefaultConfig(&userConfig);
836 EDMA_Init(DEV_BASE(dev), &userConfig);
837 #ifdef CONFIG_DMA_MCUX_EDMA_V3
838 /* Channel linking available and will be controlled by each channel's link settings */
839 EDMA_EnableAllChannelLink(DEV_BASE(dev), true);
840 #endif
841 config->irq_config_func(dev);
842 data->dma_ctx.magic = DMA_MAGIC;
843 data->dma_ctx.dma_channels = config->dma_channels;
844 data->dma_ctx.atomic = data->channels_atomic;
845 return 0;
846 }
847
848 /* The shared error interrupt (if have) must be declared as the last element in devicetree */
849 #if !DT_INST_PROP(0, no_error_irq)
850 #define NUM_IRQS_WITHOUT_ERROR_IRQ(n) UTIL_DEC(DT_NUM_IRQS(DT_DRV_INST(n)))
851 #else
852 #define NUM_IRQS_WITHOUT_ERROR_IRQ(n) DT_NUM_IRQS(DT_DRV_INST(n))
853 #endif
854
855 #define IRQ_CONFIG(n, idx, fn) \
856 { \
857 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, idx, irq), \
858 DT_INST_IRQ_BY_IDX(n, idx, priority), \
859 fn, \
860 DEVICE_DT_INST_GET(n), 0); \
861 irq_enable(DT_INST_IRQ_BY_IDX(n, idx, irq)); \
862 }
863
864 #define DMA_MCUX_EDMA_IRQ_DEFINE(idx, n) \
865 static void dma_mcux_edma_##n##_irq_##idx(const struct device *dev) \
866 { \
867 dma_mcux_edma_irq_handler(dev, idx); \
868 \
869 IF_ENABLED(UTIL_BOOL(DT_INST_PROP(n, irq_shared_offset)), \
870 (dma_mcux_edma_irq_handler(dev, \
871 idx + DT_INST_PROP(n, irq_shared_offset));)) \
872 \
873 IF_ENABLED(CONFIG_CPU_CORTEX_M4, (barrier_dsync_fence_full();)) \
874 }
875
876 #define DMA_MCUX_EDMA_IRQ_CONFIG(idx, n) \
877 IRQ_CONFIG(n, idx, dma_mcux_edma_##n##_irq_##idx)
878
879 #define DMA_MCUX_EDMA_CONFIG_FUNC(n) \
880 LISTIFY(NUM_IRQS_WITHOUT_ERROR_IRQ(n), DMA_MCUX_EDMA_IRQ_DEFINE, (), n) \
881 static void dma_imx_config_func_##n(const struct device *dev) \
882 { \
883 ARG_UNUSED(dev); \
884 \
885 LISTIFY(NUM_IRQS_WITHOUT_ERROR_IRQ(n), \
886 DMA_MCUX_EDMA_IRQ_CONFIG, (;), n) \
887 \
888 COND_CODE_1(DT_INST_PROP(n, no_error_irq), (), \
889 (IRQ_CONFIG(n, NUM_IRQS_WITHOUT_ERROR_IRQ(n), \
890 dma_mcux_edma_error_irq_handler))) \
891 \
892 LOG_DBG("install irq done"); \
893 }
894
895 #if DMA_MCUX_HAS_CHANNEL_GAP
896 #define DMA_MCUX_EDMA_CHANNEL_GAP(n) \
897 .channel_gap = DT_INST_PROP_OR(n, channel_gap, \
898 {[0 ... 1] = DT_INST_PROP(n, dma_channels)}),
899 #else
900 #define DMA_MCUX_EDMA_CHANNEL_GAP(n)
901 #endif
902
903 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
904 #define DMA_MCUX_EDMA_MUX(idx, n) \
905 (DMAMUX_Type *)DT_INST_REG_ADDR_BY_IDX(n, UTIL_INC(idx))
906
907 #define DMAMUX_BASE_INIT_DEFINE(n) \
908 static DMAMUX_Type *dmamux_base_##n[] = { \
909 LISTIFY(UTIL_DEC(DT_NUM_REGS(DT_DRV_INST(n))), \
910 DMA_MCUX_EDMA_MUX, (,), n) \
911 };
912
913 #define DMAMUX_BASE_INIT(n) .dmamux_base = &dmamux_base_##n[0],
914 #define CHANNELS_PER_MUX(n) .channels_per_mux = DT_INST_PROP(n, dma_channels) / \
915 ARRAY_SIZE(dmamux_base_##n),
916
917 #else
918 #define DMAMUX_BASE_INIT_DEFINE(n)
919 #define DMAMUX_BASE_INIT(n)
920 #define CHANNELS_PER_MUX(n)
921 #endif
922
923 /*
924 * define the dma
925 */
926 #define DMA_INIT(n) \
927 DMAMUX_BASE_INIT_DEFINE(n) \
928 static void dma_imx_config_func_##n(const struct device *dev); \
929 static __aligned(32) EDMA_TCDPOOL_CACHE_ATTR edma_tcd_t \
930 dma_tcdpool##n[DT_INST_PROP(n, dma_channels)][CONFIG_DMA_TCD_QUEUE_SIZE];\
931 static const struct dma_mcux_edma_config dma_config_##n = { \
932 .base = (DMA_Type *)DT_INST_REG_ADDR(n), \
933 DMAMUX_BASE_INIT(n) \
934 .dma_requests = DT_INST_PROP(n, dma_requests), \
935 .dma_channels = DT_INST_PROP(n, dma_channels), \
936 CHANNELS_PER_MUX(n) \
937 .irq_config_func = dma_imx_config_func_##n, \
938 .dmamux_reg_offset = DT_INST_PROP(n, dmamux_reg_offset), \
939 DMA_MCUX_EDMA_CHANNEL_GAP(n) \
940 .tcdpool = dma_tcdpool##n, \
941 }; \
942 \
943 static struct call_back \
944 dma_data_callback_##n[DT_INST_PROP(n, dma_channels)]; \
945 static ATOMIC_DEFINE( \
946 dma_channels_atomic_##n, DT_INST_PROP(n, dma_channels)); \
947 static struct dma_mcux_edma_data dma_data_##n = { \
948 .data_cb = dma_data_callback_##n, \
949 .channels_atomic = dma_channels_atomic_##n, \
950 }; \
951 \
952 DEVICE_DT_INST_DEFINE(n, \
953 &dma_mcux_edma_init, NULL, \
954 &dma_data_##n, &dma_config_##n, \
955 PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \
956 &dma_mcux_edma_api); \
957 \
958 DMA_MCUX_EDMA_CONFIG_FUNC(n);
959
960 DT_INST_FOREACH_STATUS_OKAY(DMA_INIT)
961