1 /*
2 * Copyright (c) 2016, Freescale Semiconductor, Inc.
3 * Copyright 2016-2022 NXP
4 * All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #include "fsl_dma.h"
10 #if (defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET)
11 #include "fsl_memory.h"
12 #endif
13
14 /*******************************************************************************
15 * Definitions
16 ******************************************************************************/
17
18 /* Component ID definition, used by tools. */
19 #ifndef FSL_COMPONENT_ID
20 #define FSL_COMPONENT_ID "platform.drivers.lpc_dma"
21 #endif
22
23 /*******************************************************************************
24 * Prototypes
25 ******************************************************************************/
26
27 /*!
28 * @brief Get instance number for DMA.
29 *
30 * @param base DMA peripheral base address.
31 */
32 static uint32_t DMA_GetInstance(DMA_Type *base);
33
34 /*!
35 * @brief Get virtual channel number.
36 *
37 * @param base DMA peripheral base address.
38 */
39 static uint32_t DMA_GetVirtualStartChannel(DMA_Type *base);
40
41 /*******************************************************************************
42 * Variables
43 ******************************************************************************/
44
45 /*! @brief Array to map DMA instance number to base pointer. */
46 static DMA_Type *const s_dmaBases[] = DMA_BASE_PTRS;
47
48 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
49 /*! @brief Array to map DMA instance number to clock name. */
50 static const clock_ip_name_t s_dmaClockName[] = DMA_CLOCKS;
51 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
52
53 #if !(defined(FSL_FEATURE_DMA_HAS_NO_RESET) && FSL_FEATURE_DMA_HAS_NO_RESET)
54 /*! @brief Pointers to DMA resets for each instance. */
55 static const reset_ip_name_t s_dmaResets[] = DMA_RSTS_N;
56 #endif /*! @brief Array to map DMA instance number to IRQ number. */
57 static const IRQn_Type s_dmaIRQNumber[] = DMA_IRQS;
58
59 /*! @brief Pointers to transfer handle for each DMA channel. */
60 static dma_handle_t *s_DMAHandle[FSL_FEATURE_DMA_ALL_CHANNELS];
61
62 /*! @brief DMA driver internal descriptor table */
63 #ifdef FSL_FEATURE_DMA0_DESCRIPTOR_ALIGN_SIZE
64 SDK_ALIGN(static dma_descriptor_t s_dma_descriptor_table0[FSL_FEATURE_DMA_MAX_CHANNELS],
65 FSL_FEATURE_DMA0_DESCRIPTOR_ALIGN_SIZE);
66 #else
67 #if (defined(CPU_MIMXRT685SEVKA_dsp) || defined(CPU_MIMXRT685SFVKB_dsp))
68 AT_NONCACHEABLE_SECTION_ALIGN(static dma_descriptor_t s_dma_descriptor_table0[FSL_FEATURE_DMA_MAX_CHANNELS],
69 FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZE);
70 #else
71 SDK_ALIGN(static dma_descriptor_t s_dma_descriptor_table0[FSL_FEATURE_DMA_MAX_CHANNELS],
72 FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZE);
73 #endif /* (defined(CPU_MIMXRT685SEVKA_dsp) || defined(CPU_MIMXRT685SFVKB_dsp)) */
74 #endif /* FSL_FEATURE_DMA0_DESCRIPTOR_ALIGN_SIZE */
75
76 #if defined(DMA1)
77 #ifdef FSL_FEATURE_DMA1_DESCRIPTOR_ALIGN_SIZE
78 SDK_ALIGN(static dma_descriptor_t s_dma_descriptor_table1[FSL_FEATURE_DMA_MAX_CHANNELS],
79 FSL_FEATURE_DMA1_DESCRIPTOR_ALIGN_SIZE);
80 #else
81 #if (defined(CPU_MIMXRT685SEVKA_dsp) || defined(CPU_MIMXRT685SFVKB_dsp))
82 AT_NONCACHEABLE_SECTION_ALIGN(static dma_descriptor_t s_dma_descriptor_table1[FSL_FEATURE_DMA_MAX_CHANNELS],
83 FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZE);
84 #else
85 SDK_ALIGN(static dma_descriptor_t s_dma_descriptor_table1[FSL_FEATURE_DMA_MAX_CHANNELS],
86 FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZE);
87 #endif /* (defined(CPU_MIMXRT685SEVKA_dsp) || defined(CPU_MIMXRT685SFVKB_dsp)) */
88 #endif /* FSL_FEATURE_DMA1_DESCRIPTOR_ALIGN_SIZE */
89 static dma_descriptor_t *s_dma_descriptor_table[] = {s_dma_descriptor_table0, s_dma_descriptor_table1};
90 #else
91 static dma_descriptor_t *s_dma_descriptor_table[] = {s_dma_descriptor_table0};
92 #endif
93
94 /*******************************************************************************
95 * Code
96 ******************************************************************************/
97
DMA_GetInstance(DMA_Type * base)98 static uint32_t DMA_GetInstance(DMA_Type *base)
99 {
100 uint32_t instance;
101 /* Find the instance index from base address mappings. */
102 for (instance = 0; instance < ARRAY_SIZE(s_dmaBases); instance++)
103 {
104 if (s_dmaBases[instance] == base)
105 {
106 break;
107 }
108 }
109 assert(instance < ARRAY_SIZE(s_dmaBases));
110
111 return instance;
112 }
113
DMA_GetVirtualStartChannel(DMA_Type * base)114 static uint32_t DMA_GetVirtualStartChannel(DMA_Type *base)
115 {
116 uint32_t startChannel = 0, instance = 0;
117 uint32_t i = 0;
118
119 instance = DMA_GetInstance(base);
120
121 /* Compute start channel */
122 for (i = 0; i < instance; i++)
123 {
124 startChannel += (uint32_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(s_dmaBases[i]);
125 }
126
127 return startChannel;
128 }
129
130 /*!
131 * brief Initializes DMA peripheral.
132 *
133 * This function enable the DMA clock, set descriptor table and
134 * enable DMA peripheral.
135 *
136 * param base DMA peripheral base address.
137 */
DMA_Init(DMA_Type * base)138 void DMA_Init(DMA_Type *base)
139 {
140 uint32_t instance = DMA_GetInstance(base);
141 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
142 /* enable dma clock gate */
143 CLOCK_EnableClock(s_dmaClockName[DMA_GetInstance(base)]);
144 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
145
146 #if !(defined(FSL_FEATURE_DMA_HAS_NO_RESET) && FSL_FEATURE_DMA_HAS_NO_RESET)
147 /* Reset the DMA module */
148 RESET_PeripheralReset(s_dmaResets[DMA_GetInstance(base)]);
149 #endif
150 /* set descriptor table */
151 #if (defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET)
152 base->SRAMBASE = MEMORY_ConvertMemoryMapAddress((uint32_t)s_dma_descriptor_table[instance], kMEMORY_Local2DMA);
153 #else
154 base->SRAMBASE = (uint32_t)s_dma_descriptor_table[instance];
155 #endif
156 /* enable dma peripheral */
157 base->CTRL |= DMA_CTRL_ENABLE_MASK;
158 }
159
160 /*!
161 * brief Deinitializes DMA peripheral.
162 *
163 * This function gates the DMA clock.
164 *
165 * param base DMA peripheral base address.
166 */
DMA_Deinit(DMA_Type * base)167 void DMA_Deinit(DMA_Type *base)
168 {
169 /* Disable DMA peripheral */
170 base->CTRL &= ~(DMA_CTRL_ENABLE_MASK);
171 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
172 CLOCK_DisableClock(s_dmaClockName[DMA_GetInstance(base)]);
173 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
174 }
175
176 /*!
177 * brief Set trigger settings of DMA channel.
178 * deprecated Do not use this function. It has been superceded by @ref DMA_SetChannelConfig.
179 *
180 * param base DMA peripheral base address.
181 * param channel DMA channel number.
182 * param trigger trigger configuration.
183 */
DMA_ConfigureChannelTrigger(DMA_Type * base,uint32_t channel,dma_channel_trigger_t * trigger)184 void DMA_ConfigureChannelTrigger(DMA_Type *base, uint32_t channel, dma_channel_trigger_t *trigger)
185 {
186 assert(FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base) != -1);
187 assert((channel < (uint32_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base)) && (NULL != trigger));
188
189 uint32_t tmpReg = (DMA_CHANNEL_CFG_HWTRIGEN_MASK | DMA_CHANNEL_CFG_TRIGPOL_MASK | DMA_CHANNEL_CFG_TRIGTYPE_MASK |
190 DMA_CHANNEL_CFG_TRIGBURST_MASK | DMA_CHANNEL_CFG_BURSTPOWER_MASK |
191 DMA_CHANNEL_CFG_SRCBURSTWRAP_MASK | DMA_CHANNEL_CFG_DSTBURSTWRAP_MASK);
192 tmpReg = base->CHANNEL[channel].CFG & (~tmpReg);
193 tmpReg |= (uint32_t)(trigger->type) | (uint32_t)(trigger->burst) | (uint32_t)(trigger->wrap);
194 base->CHANNEL[channel].CFG = tmpReg;
195 }
196
197 /*!
198 * brief Gets the remaining bytes of the current DMA descriptor transfer.
199 *
200 * param base DMA peripheral base address.
201 * param channel DMA channel number.
202 * return The number of bytes which have not been transferred yet.
203 */
DMA_GetRemainingBytes(DMA_Type * base,uint32_t channel)204 uint32_t DMA_GetRemainingBytes(DMA_Type *base, uint32_t channel)
205 {
206 assert(FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base) != -1);
207 assert(channel < (uint32_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base));
208
209 /* NOTE: when descriptors are chained, ACTIVE bit is set for whole chain. It makes
210 * impossible to distinguish between:
211 * - transfer finishes (represented by value '0x3FF')
212 * - and remaining 1024 bytes to transfer (value 0x3FF)
213 * for all descriptor in chain, except the last one.
214 * If you decide to use this function, please use 1023 transfers as maximal value */
215
216 /* Channel not active (transfer finished) and value is 0x3FF - nothing to transfer */
217 if ((!DMA_ChannelIsActive(base, channel)) &&
218 (0x3FFUL == ((base->CHANNEL[channel].XFERCFG & DMA_CHANNEL_XFERCFG_XFERCOUNT_MASK) >>
219 DMA_CHANNEL_XFERCFG_XFERCOUNT_SHIFT)))
220 {
221 return 0UL;
222 }
223
224 return ((base->CHANNEL[channel].XFERCFG & DMA_CHANNEL_XFERCFG_XFERCOUNT_MASK) >>
225 DMA_CHANNEL_XFERCFG_XFERCOUNT_SHIFT) +
226 1UL;
227 }
228
229 /* Verify and convert dma_xfercfg_t to XFERCFG register */
DMA_SetupXferCFG(dma_xfercfg_t * xfercfg,uint32_t * xfercfg_addr)230 static void DMA_SetupXferCFG(dma_xfercfg_t *xfercfg, uint32_t *xfercfg_addr)
231 {
232 assert(xfercfg != NULL);
233 /* check source increment */
234 assert((xfercfg->srcInc <= (uint8_t)kDMA_AddressInterleave4xWidth) &&
235 (xfercfg->dstInc <= (uint8_t)kDMA_AddressInterleave4xWidth));
236 /* check data width */
237 assert(xfercfg->byteWidth <= (uint8_t)kDMA_Transfer32BitWidth);
238 /* check transfer count */
239 assert(xfercfg->transferCount <= DMA_MAX_TRANSFER_COUNT);
240
241 uint32_t xfer = 0;
242
243 /* set valid flag - descriptor is ready now */
244 xfer |= DMA_CHANNEL_XFERCFG_CFGVALID(xfercfg->valid);
245 /* set reload - allow link to next descriptor */
246 xfer |= DMA_CHANNEL_XFERCFG_RELOAD(xfercfg->reload);
247 /* set swtrig flag - start transfer */
248 xfer |= DMA_CHANNEL_XFERCFG_SWTRIG(xfercfg->swtrig);
249 /* set transfer count */
250 xfer |= DMA_CHANNEL_XFERCFG_CLRTRIG(xfercfg->clrtrig);
251 /* set INTA */
252 xfer |= DMA_CHANNEL_XFERCFG_SETINTA(xfercfg->intA);
253 /* set INTB */
254 xfer |= DMA_CHANNEL_XFERCFG_SETINTB(xfercfg->intB);
255 /* set data width */
256 xfer |= DMA_CHANNEL_XFERCFG_WIDTH(xfercfg->byteWidth == 4U ? 2U : xfercfg->byteWidth - 1UL);
257 /* set source increment value */
258 xfer |= DMA_CHANNEL_XFERCFG_SRCINC(
259 (xfercfg->srcInc == (uint8_t)kDMA_AddressInterleave4xWidth) ? (xfercfg->srcInc - 1UL) : xfercfg->srcInc);
260 /* set destination increment value */
261 xfer |= DMA_CHANNEL_XFERCFG_DSTINC(
262 (xfercfg->dstInc == (uint8_t)kDMA_AddressInterleave4xWidth) ? (xfercfg->dstInc - 1UL) : xfercfg->dstInc);
263 /* set transfer count */
264 xfer |= DMA_CHANNEL_XFERCFG_XFERCOUNT(xfercfg->transferCount - 1UL);
265
266 /* store xferCFG */
267 *xfercfg_addr = xfer;
268 }
269
270 /*!
271 * brief setup dma descriptor
272 * Note: This function do not support configure wrap descriptor.
273 * param desc DMA descriptor address.
274 * param xfercfg Transfer configuration for DMA descriptor.
275 * param srcStartAddr Start address of source address.
276 * param dstStartAddr Start address of destination address.
277 * param nextDesc Address of next descriptor in chain.
278 */
DMA_SetupDescriptor(dma_descriptor_t * desc,uint32_t xfercfg,void * srcStartAddr,void * dstStartAddr,void * nextDesc)279 void DMA_SetupDescriptor(
280 dma_descriptor_t *desc, uint32_t xfercfg, void *srcStartAddr, void *dstStartAddr, void *nextDesc)
281 {
282 assert((((uint32_t)(uint32_t *)nextDesc) & ((uint32_t)FSL_FEATURE_DMA_LINK_DESCRIPTOR_ALIGN_SIZE - 1UL)) == 0UL);
283
284 #if (defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET)
285 srcStartAddr = (void *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)srcStartAddr, kMEMORY_Local2DMA);
286 dstStartAddr = (void *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)dstStartAddr, kMEMORY_Local2DMA);
287 nextDesc = (void *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)nextDesc, kMEMORY_Local2DMA);
288 #endif
289
290 uint32_t width = 0, srcInc = 0, dstInc = 0, transferCount = 0;
291
292 width = (xfercfg & DMA_CHANNEL_XFERCFG_WIDTH_MASK) >> DMA_CHANNEL_XFERCFG_WIDTH_SHIFT;
293 srcInc = (xfercfg & DMA_CHANNEL_XFERCFG_SRCINC_MASK) >> DMA_CHANNEL_XFERCFG_SRCINC_SHIFT;
294 dstInc = (xfercfg & DMA_CHANNEL_XFERCFG_DSTINC_MASK) >> DMA_CHANNEL_XFERCFG_DSTINC_SHIFT;
295 transferCount = ((xfercfg & DMA_CHANNEL_XFERCFG_XFERCOUNT_MASK) >> DMA_CHANNEL_XFERCFG_XFERCOUNT_SHIFT) + 1U;
296
297 /* covert register value to actual value */
298 if (width == 2U)
299 {
300 width = kDMA_Transfer32BitWidth;
301 }
302 else
303 {
304 width += 1U;
305 }
306
307 /*
308 * Transfers of 16 bit width require an address alignment to a multiple of 2 bytes.
309 * Transfers of 32 bit width require an address alignment to a multiple of 4 bytes.
310 * Transfers of 8 bit width can be at any address
311 */
312 if (((NULL != srcStartAddr) && (0UL == ((uint32_t)(uint32_t *)srcStartAddr) % width)) &&
313 ((NULL != dstStartAddr) && (0UL == ((uint32_t)(uint32_t *)dstStartAddr) % width)))
314 {
315 if (srcInc == 3U)
316 {
317 srcInc = kDMA_AddressInterleave4xWidth;
318 }
319
320 if (dstInc == 3U)
321 {
322 dstInc = kDMA_AddressInterleave4xWidth;
323 }
324
325 desc->xfercfg = xfercfg;
326 desc->srcEndAddr = DMA_DESCRIPTOR_END_ADDRESS((uint32_t *)srcStartAddr, srcInc, transferCount * width, width);
327 desc->dstEndAddr = DMA_DESCRIPTOR_END_ADDRESS((uint32_t *)dstStartAddr, dstInc, transferCount * width, width);
328 desc->linkToNextDesc = nextDesc;
329 }
330 else
331 {
332 /* if address alignment not satisfy the requirement, reset the descriptor to make sure DMA generate error */
333 desc->xfercfg = 0U;
334 desc->srcEndAddr = NULL;
335 desc->dstEndAddr = NULL;
336 }
337 }
338
339 /*!
340 * brief setup dma channel descriptor
341 * Note: This function support configure wrap descriptor.
342 * param desc DMA descriptor address.
343 * param xfercfg Transfer configuration for DMA descriptor.
344 * param srcStartAddr Start address of source address.
345 * param dstStartAddr Start address of destination address.
346 * param nextDesc Address of next descriptor in chain.
347 * param wrapType burst wrap type.
348 * param burstSize burst size, reference _dma_burst_size.
349 */
DMA_SetupChannelDescriptor(dma_descriptor_t * desc,uint32_t xfercfg,void * srcStartAddr,void * dstStartAddr,void * nextDesc,dma_burst_wrap_t wrapType,uint32_t burstSize)350 void DMA_SetupChannelDescriptor(dma_descriptor_t *desc,
351 uint32_t xfercfg,
352 void *srcStartAddr,
353 void *dstStartAddr,
354 void *nextDesc,
355 dma_burst_wrap_t wrapType,
356 uint32_t burstSize)
357 {
358 assert((((uint32_t)(uint32_t *)nextDesc) & ((uint32_t)FSL_FEATURE_DMA_LINK_DESCRIPTOR_ALIGN_SIZE - 1UL)) == 0UL);
359
360 #if (defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET)
361 srcStartAddr = (void *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)srcStartAddr, kMEMORY_Local2DMA);
362 dstStartAddr = (void *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)dstStartAddr, kMEMORY_Local2DMA);
363 nextDesc = (void *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)nextDesc, kMEMORY_Local2DMA);
364 #endif
365
366 uint32_t width = 0, srcInc = 0, dstInc = 0, transferCount = 0;
367
368 width = (xfercfg & DMA_CHANNEL_XFERCFG_WIDTH_MASK) >> DMA_CHANNEL_XFERCFG_WIDTH_SHIFT;
369 srcInc = (xfercfg & DMA_CHANNEL_XFERCFG_SRCINC_MASK) >> DMA_CHANNEL_XFERCFG_SRCINC_SHIFT;
370 dstInc = (xfercfg & DMA_CHANNEL_XFERCFG_DSTINC_MASK) >> DMA_CHANNEL_XFERCFG_DSTINC_SHIFT;
371 transferCount = ((xfercfg & DMA_CHANNEL_XFERCFG_XFERCOUNT_MASK) >> DMA_CHANNEL_XFERCFG_XFERCOUNT_SHIFT) + 1U;
372
373 /* covert register value to actual value */
374 if (width == 2U)
375 {
376 width = kDMA_Transfer32BitWidth;
377 }
378 else
379 {
380 width += 1U;
381 }
382
383 /*
384 * Transfers of 16 bit width require an address alignment to a multiple of 2 bytes.
385 * Transfers of 32 bit width require an address alignment to a multiple of 4 bytes.
386 * Transfers of 8 bit width can be at any address
387 */
388 if (((NULL != srcStartAddr) && (0UL == ((uint32_t)(uint32_t *)srcStartAddr) % width)) &&
389 ((NULL != dstStartAddr) && (0UL == ((uint32_t)(uint32_t *)dstStartAddr) % width)))
390 {
391 if (srcInc == 3U)
392 {
393 srcInc = kDMA_AddressInterleave4xWidth;
394 }
395
396 if (dstInc == 3U)
397 {
398 dstInc = kDMA_AddressInterleave4xWidth;
399 }
400
401 desc->xfercfg = xfercfg;
402
403 if (wrapType == kDMA_NoWrap)
404 {
405 desc->srcEndAddr =
406 DMA_DESCRIPTOR_END_ADDRESS((uint32_t *)srcStartAddr, srcInc, transferCount * width, width);
407 desc->dstEndAddr =
408 DMA_DESCRIPTOR_END_ADDRESS((uint32_t *)dstStartAddr, dstInc, transferCount * width, width);
409 }
410 /* for the wrap transfer, the destination address should be determined by the burstSize/width/interleave size */
411 if (wrapType == kDMA_SrcWrap)
412 {
413 desc->srcEndAddr =
414 (uint32_t *)((uint32_t)(uint32_t *)srcStartAddr + ((1UL << burstSize) - 1UL) * width * srcInc);
415 desc->dstEndAddr =
416 DMA_DESCRIPTOR_END_ADDRESS((uint32_t *)dstStartAddr, dstInc, transferCount * width, width);
417 }
418 if (wrapType == kDMA_DstWrap)
419 {
420 desc->srcEndAddr =
421 DMA_DESCRIPTOR_END_ADDRESS((uint32_t *)srcStartAddr, srcInc, transferCount * width, width);
422 desc->dstEndAddr =
423 (uint32_t *)((uint32_t)(uint32_t *)dstStartAddr + ((1UL << burstSize) - 1UL) * width * dstInc);
424 }
425 if (wrapType == kDMA_SrcAndDstWrap)
426 {
427 desc->srcEndAddr =
428 (uint32_t *)(((uint32_t)(uint32_t *)srcStartAddr) + ((1UL << burstSize) - 1UL) * width * srcInc);
429 desc->dstEndAddr =
430 (uint32_t *)(((uint32_t)(uint32_t *)dstStartAddr) + ((1UL << burstSize) - 1UL) * width * dstInc);
431 }
432
433 desc->linkToNextDesc = nextDesc;
434 }
435 else
436 {
437 /* if address alignment not satisfy the requirement, reset the descriptor to make sure DMA generate error */
438 desc->xfercfg = 0U;
439 desc->srcEndAddr = NULL;
440 desc->dstEndAddr = NULL;
441 }
442 }
443
444 /*!
445 * brief Create application specific DMA descriptor
446 * to be used in a chain in transfer
447 * deprecated Do not use this function. It has been superceded by @ref DMA_SetupDescriptor
448 * param desc DMA descriptor address.
449 * param xfercfg Transfer configuration for DMA descriptor.
450 * param srcAddr Address of last item to transmit
451 * param dstAddr Address of last item to receive.
452 * param nextDesc Address of next descriptor in chain.
453 */
DMA_CreateDescriptor(dma_descriptor_t * desc,dma_xfercfg_t * xfercfg,void * srcAddr,void * dstAddr,void * nextDesc)454 void DMA_CreateDescriptor(dma_descriptor_t *desc, dma_xfercfg_t *xfercfg, void *srcAddr, void *dstAddr, void *nextDesc)
455 {
456 assert((((uint32_t)(uint32_t *)nextDesc) & ((uint32_t)FSL_FEATURE_DMA_LINK_DESCRIPTOR_ALIGN_SIZE - 1UL)) == 0UL);
457 assert((NULL != srcAddr) && (0UL == ((uint32_t)(uint32_t *)srcAddr) % xfercfg->byteWidth));
458 assert((NULL != dstAddr) && (0UL == ((uint32_t)(uint32_t *)dstAddr) % xfercfg->byteWidth));
459
460 uint32_t xfercfg_reg = 0;
461
462 DMA_SetupXferCFG(xfercfg, &xfercfg_reg);
463
464 /* Set descriptor structure */
465 DMA_SetupDescriptor(desc, xfercfg_reg, srcAddr, dstAddr, nextDesc);
466 }
467
468 /*!
469 * brief Abort running transfer by handle.
470 *
471 * This function aborts DMA transfer specified by handle.
472 *
473 * param handle DMA handle pointer.
474 */
DMA_AbortTransfer(dma_handle_t * handle)475 void DMA_AbortTransfer(dma_handle_t *handle)
476 {
477 assert(NULL != handle);
478
479 DMA_DisableChannel(handle->base, handle->channel);
480 while ((DMA_COMMON_CONST_REG_GET(handle->base, handle->channel, BUSY) &
481 (1UL << DMA_CHANNEL_INDEX(handle->base, handle->channel))) != 0UL)
482 {
483 }
484 DMA_COMMON_REG_GET(handle->base, handle->channel, ABORT) |= 1UL << DMA_CHANNEL_INDEX(handle->base, handle->channel);
485 DMA_EnableChannel(handle->base, handle->channel);
486 }
487
488 /*!
489 * brief Creates the DMA handle.
490 *
491 * This function is called if using transaction API for DMA. This function
492 * initializes the internal state of DMA handle.
493 *
494 * param handle DMA handle pointer. The DMA handle stores callback function and
495 * parameters.
496 * param base DMA peripheral base address.
497 * param channel DMA channel number.
498 */
DMA_CreateHandle(dma_handle_t * handle,DMA_Type * base,uint32_t channel)499 void DMA_CreateHandle(dma_handle_t *handle, DMA_Type *base, uint32_t channel)
500 {
501 assert(FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base) != -1);
502 assert((NULL != handle) && (channel < (uint32_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base)));
503
504 uint32_t dmaInstance;
505 uint32_t startChannel = 0;
506 /* base address is invalid DMA instance */
507 dmaInstance = DMA_GetInstance(base);
508 startChannel = DMA_GetVirtualStartChannel(base);
509
510 (void)memset(handle, 0, sizeof(*handle));
511 handle->base = base;
512 handle->channel = (uint8_t)channel;
513 s_DMAHandle[startChannel + channel] = handle;
514 /* Enable NVIC interrupt */
515 (void)EnableIRQ(s_dmaIRQNumber[dmaInstance]);
516 /* Enable channel interrupt */
517 DMA_EnableChannelInterrupts(handle->base, channel);
518 }
519
520 /*!
521 * brief Installs a callback function for the DMA transfer.
522 *
523 * This callback is called in DMA IRQ handler. Use the callback to do something after
524 * the current major loop transfer completes.
525 *
526 * param handle DMA handle pointer.
527 * param callback DMA callback function pointer.
528 * param userData Parameter for callback function.
529 */
DMA_SetCallback(dma_handle_t * handle,dma_callback callback,void * userData)530 void DMA_SetCallback(dma_handle_t *handle, dma_callback callback, void *userData)
531 {
532 assert(handle != NULL);
533
534 handle->callback = callback;
535 handle->userData = userData;
536 }
537
538 /*!
539 * brief Prepares the DMA transfer structure.
540 * deprecated Do not use this function. It has been superceded by @ref DMA_PrepareChannelTransfer and
541 * DMA_PrepareChannelXfer.
542 * This function prepares the transfer configuration structure according to the user input.
543 *
544 * param config The user configuration structure of type dma_transfer_t.
545 * param srcAddr DMA transfer source address.
546 * param dstAddr DMA transfer destination address.
547 * param byteWidth DMA transfer destination address width(bytes).
548 * param transferBytes DMA transfer bytes to be transferred.
549 * param type DMA transfer type.
550 * param nextDesc Chain custom descriptor to transfer.
551 * note The data address and the data width must be consistent. For example, if the SRC
552 * is 4 bytes, so the source address must be 4 bytes aligned, or it shall result in
553 * source address error(SAE).
554 */
DMA_PrepareTransfer(dma_transfer_config_t * config,void * srcAddr,void * dstAddr,uint32_t byteWidth,uint32_t transferBytes,dma_transfer_type_t type,void * nextDesc)555 void DMA_PrepareTransfer(dma_transfer_config_t *config,
556 void *srcAddr,
557 void *dstAddr,
558 uint32_t byteWidth,
559 uint32_t transferBytes,
560 dma_transfer_type_t type,
561 void *nextDesc)
562 {
563 uint32_t xfer_count;
564 assert((NULL != config) && (NULL != srcAddr) && (NULL != dstAddr));
565 assert((byteWidth == 1UL) || (byteWidth == 2UL) || (byteWidth == 4UL));
566 assert((((uint32_t)(uint32_t *)nextDesc) & ((uint32_t)FSL_FEATURE_DMA_LINK_DESCRIPTOR_ALIGN_SIZE - 1UL)) == 0UL);
567
568 /* check max */
569 xfer_count = transferBytes / byteWidth;
570 assert((xfer_count <= DMA_MAX_TRANSFER_COUNT) && (0UL == transferBytes % byteWidth));
571
572 (void)memset(config, 0, sizeof(*config));
573
574 if (type == kDMA_MemoryToMemory)
575 {
576 config->xfercfg.srcInc = 1;
577 config->xfercfg.dstInc = 1;
578 config->isPeriph = false;
579 }
580
581 else if (type == kDMA_PeripheralToMemory)
582 {
583 /* Peripheral register - source doesn't increment */
584 config->xfercfg.srcInc = 0;
585 config->xfercfg.dstInc = 1;
586 config->isPeriph = true;
587 }
588 else if (type == kDMA_MemoryToPeripheral)
589 {
590 /* Peripheral register - destination doesn't increment */
591 config->xfercfg.srcInc = 1;
592 config->xfercfg.dstInc = 0;
593 config->isPeriph = true;
594 }
595 /* kDMA_StaticToStatic */
596 else
597 {
598 config->xfercfg.srcInc = 0;
599 config->xfercfg.dstInc = 0;
600 config->isPeriph = true;
601 }
602
603 config->dstAddr = (uint8_t *)dstAddr;
604 config->srcAddr = (uint8_t *)srcAddr;
605 config->nextDesc = (uint8_t *)nextDesc;
606 config->xfercfg.transferCount = (uint16_t)xfer_count;
607 config->xfercfg.byteWidth = (uint8_t)byteWidth;
608 config->xfercfg.intA = true;
609 config->xfercfg.reload = nextDesc != NULL;
610 config->xfercfg.valid = true;
611 }
612
613 /*!
614 * brief set channel config.
615 *
616 * This function provide a interface to configure channel configuration reisters.
617 *
618 * param base DMA base address.
619 * param channel DMA channel number.
620 * param config channel configurations structure.
621 */
DMA_SetChannelConfig(DMA_Type * base,uint32_t channel,dma_channel_trigger_t * trigger,bool isPeriph)622 void DMA_SetChannelConfig(DMA_Type *base, uint32_t channel, dma_channel_trigger_t *trigger, bool isPeriph)
623 {
624 assert(channel < (uint32_t)FSL_FEATURE_DMA_MAX_CHANNELS);
625
626 uint32_t tmpReg = DMA_CHANNEL_CFG_PERIPHREQEN_MASK;
627
628 if (trigger != NULL)
629 {
630 tmpReg |= DMA_CHANNEL_CFG_HWTRIGEN_MASK | DMA_CHANNEL_CFG_TRIGPOL_MASK | DMA_CHANNEL_CFG_TRIGTYPE_MASK |
631 DMA_CHANNEL_CFG_TRIGBURST_MASK | DMA_CHANNEL_CFG_BURSTPOWER_MASK | DMA_CHANNEL_CFG_SRCBURSTWRAP_MASK |
632 DMA_CHANNEL_CFG_DSTBURSTWRAP_MASK;
633 }
634
635 tmpReg = base->CHANNEL[channel].CFG & (~tmpReg);
636
637 if (trigger != NULL)
638 {
639 tmpReg |= (uint32_t)(trigger->type) | (uint32_t)(trigger->burst) | (uint32_t)(trigger->wrap);
640 }
641
642 tmpReg |= DMA_CHANNEL_CFG_PERIPHREQEN(isPeriph);
643
644 base->CHANNEL[channel].CFG = tmpReg;
645 }
646
647 /*!
648 * brief Prepare channel transfer configurations.
649 *
650 * This function used to prepare channel transfer configurations.
651 *
652 * param config Pointer to DMA channel transfer configuration structure.
653 * param srcStartAddr source start address.
654 * param dstStartAddr destination start address.
655 * param xferCfg xfer configuration, user can reference DMA_CHANNEL_XFER about to how to get xferCfg value.
656 * param type transfer type.
657 * param trigger DMA channel trigger configurations.
658 * param nextDesc address of next descriptor.
659 */
DMA_PrepareChannelTransfer(dma_channel_config_t * config,void * srcStartAddr,void * dstStartAddr,uint32_t xferCfg,dma_transfer_type_t type,dma_channel_trigger_t * trigger,void * nextDesc)660 void DMA_PrepareChannelTransfer(dma_channel_config_t *config,
661 void *srcStartAddr,
662 void *dstStartAddr,
663 uint32_t xferCfg,
664 dma_transfer_type_t type,
665 dma_channel_trigger_t *trigger,
666 void *nextDesc)
667 {
668 assert((NULL != config) && (NULL != srcStartAddr) && (NULL != dstStartAddr));
669 assert((((uint32_t)(uint32_t *)nextDesc) & ((uint32_t)FSL_FEATURE_DMA_LINK_DESCRIPTOR_ALIGN_SIZE - 1UL)) == 0UL);
670
671 /* check max */
672 (void)memset(config, 0, sizeof(*config));
673
674 if (type == kDMA_MemoryToMemory)
675 {
676 config->isPeriph = false;
677 }
678 else if (type == kDMA_PeripheralToMemory)
679 {
680 config->isPeriph = true;
681 }
682 else if (type == kDMA_MemoryToPeripheral)
683 {
684 config->isPeriph = true;
685 }
686 /* kDMA_StaticToStatic */
687 else
688 {
689 config->isPeriph = true;
690 }
691
692 config->dstStartAddr = (uint8_t *)dstStartAddr;
693 config->srcStartAddr = (uint8_t *)srcStartAddr;
694 config->nextDesc = (uint8_t *)nextDesc;
695 config->trigger = trigger;
696 config->xferCfg = xferCfg;
697 }
698
699 /*!
700 * brief load channel transfer decriptor.
701 *
702 * This function can be used to load desscriptor to driver internal channel descriptor that is used to start DMA
703 * transfer, the head descriptor table is defined in DMA driver, it is useful for the case:
704 * 1. for the polling transfer, application can allocate a local descriptor memory table to prepare a descriptor firstly
705 * and then call this api to load the configured descriptor to driver descriptor table. code DMA_Init(DMA0);
706 * DMA_EnableChannel(DMA0, DEMO_DMA_CHANNEL);
707 * DMA_SetupDescriptor(desc, xferCfg, s_srcBuffer, &s_destBuffer[0], NULL);
708 * DMA_LoadChannelDescriptor(DMA0, DEMO_DMA_CHANNEL, (dma_descriptor_t *)desc);
709 * DMA_DoChannelSoftwareTrigger(DMA0, DEMO_DMA_CHANNEL);
710 * while(DMA_ChannelIsBusy(DMA0, DEMO_DMA_CHANNEL))
711 * {}
712 * endcode
713 *
714 * param base DMA base address.
715 * param channel DMA channel.
716 * param descriptor configured DMA descriptor.
717 */
DMA_LoadChannelDescriptor(DMA_Type * base,uint32_t channel,dma_descriptor_t * descriptor)718 void DMA_LoadChannelDescriptor(DMA_Type *base, uint32_t channel, dma_descriptor_t *descriptor)
719 {
720 assert(NULL != descriptor);
721 assert(FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base) != -1);
722 assert(channel < (uint32_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base));
723
724 uint32_t instance = DMA_GetInstance(base);
725 dma_descriptor_t *channelDescriptor = (dma_descriptor_t *)(&s_dma_descriptor_table[instance][channel]);
726
727 channelDescriptor->xfercfg = descriptor->xfercfg;
728 channelDescriptor->srcEndAddr = descriptor->srcEndAddr;
729 channelDescriptor->dstEndAddr = descriptor->dstEndAddr;
730 channelDescriptor->linkToNextDesc = descriptor->linkToNextDesc;
731
732 /* Set channel XFERCFG register according first channel descriptor. */
733 base->CHANNEL[channel].XFERCFG = descriptor->xfercfg;
734 }
735
736 /*!
737 * brief Install DMA descriptor memory.
738 *
739 * This function used to register DMA descriptor memory for linked transfer, a typical case is ping pong
740 * transfer which will request more than one DMA descriptor memory space, althrough current DMA driver has
741 * a default DMA descriptor buffer, but it support one DMA descriptor for one channel only.
742 * User should be take care about the address of DMA descriptor pool which required align with 512BYTE.
743 *
744 * param handle Pointer to DMA channel transfer handle.
745 * param addr DMA descriptor address
746 * param num DMA descriptor number.
747 */
DMA_InstallDescriptorMemory(DMA_Type * base,void * addr)748 void DMA_InstallDescriptorMemory(DMA_Type *base, void *addr)
749 {
750 assert(addr != NULL);
751
752 #if defined FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZEn
753 assert((((uint32_t)(uint32_t *)addr) & ((uint32_t)FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZEn(base) - 1UL)) == 0U);
754 #else
755 assert((((uint32_t)(uint32_t *)addr) & ((uint32_t)FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZE - 1UL)) == 0U);
756 #endif
757 /* reconfigure the DMA descriptor base address */
758 base->SRAMBASE = (uint32_t)(uint32_t *)addr;
759 }
760
761 /*!
762 * brief Submit channel transfer paramter directly.
763 *
764 * This function used to configue channel head descriptor that is used to start DMA transfer, the head descriptor table
765 * is defined in DMA driver, it is useful for the case:
766 * 1. for the single transfer, application doesn't need to allocate descriptor table, the head descriptor can be used
767 for it.
768 * code
769 DMA_SetChannelConfig(base, channel, trigger, isPeriph);
770 DMA_CreateHandle(handle, base, channel)
771 DMA_SubmitChannelTransferParameter(handle, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc,
772 bytes), srcStartAddr, dstStartAddr, NULL);
773 DMA_StartTransfer(handle)
774 * endcode
775 *
776 * 2. for the linked transfer, application should responsible for link descriptor, for example, if 4 transfer is
777 required, then application should prepare
778 * three descriptor table with macro , the head descriptor in driver can be used for the first transfer descriptor.
779 * code
780 define link descriptor table in application with macro
781 DMA_ALLOCATE_LINK_DESCRIPTOR(nextDesc[3]);
782
783 DMA_SetupDescriptor(nextDesc0, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
784 srcStartAddr, dstStartAddr, nextDesc1);
785 DMA_SetupDescriptor(nextDesc1, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
786 srcStartAddr, dstStartAddr, nextDesc2);
787 DMA_SetupDescriptor(nextDesc2, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
788 srcStartAddr, dstStartAddr, NULL);
789 DMA_SetChannelConfig(base, channel, trigger, isPeriph);
790 DMA_CreateHandle(handle, base, channel)
791 DMA_SubmitChannelTransferParameter(handle, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc,
792 bytes), srcStartAddr, dstStartAddr, nextDesc0);
793 DMA_StartTransfer(handle);
794 * endcode
795 *
796 * param handle Pointer to DMA handle.
797 * param xferCfg xfer configuration, user can reference DMA_CHANNEL_XFER about to how to get xferCfg value.
798 * param srcStartAddr source start address.
799 * param dstStartAddr destination start address.
800 * param nextDesc address of next descriptor.
801 */
DMA_SubmitChannelTransferParameter(dma_handle_t * handle,uint32_t xferCfg,void * srcStartAddr,void * dstStartAddr,void * nextDesc)802 void DMA_SubmitChannelTransferParameter(
803 dma_handle_t *handle, uint32_t xferCfg, void *srcStartAddr, void *dstStartAddr, void *nextDesc)
804 {
805 assert((NULL != srcStartAddr) && (NULL != dstStartAddr));
806 assert(FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(handle->base) != -1);
807 assert(handle->channel < (uint8_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(handle->base));
808
809 uint32_t instance = DMA_GetInstance(handle->base);
810 dma_descriptor_t *descriptor = (dma_descriptor_t *)(&s_dma_descriptor_table[instance][handle->channel]);
811
812 DMA_SetupDescriptor(descriptor, xferCfg, srcStartAddr, dstStartAddr, nextDesc);
813
814 /* Set channel XFERCFG register according first channel descriptor. */
815 handle->base->CHANNEL[handle->channel].XFERCFG = xferCfg;
816 }
817
818 /*!
819 * brief Submit channel descriptor.
820 *
821 * This function used to configue channel head descriptor that is used to start DMA transfer, the head descriptor table
822 is defined in
823 * DMA driver, this functiono is typical for the ping pong case:
824 *
825 * 1. for the ping pong case, application should responsible for the descriptor, for example, application should
826 * prepare two descriptor table with macro.
827 * code
828 define link descriptor table in application with macro
829 DMA_ALLOCATE_LINK_DESCRIPTOR(nextDesc[2]);
830
831 DMA_SetupDescriptor(nextDesc0, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
832 srcStartAddr, dstStartAddr, nextDesc1);
833 DMA_SetupDescriptor(nextDesc1, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
834 srcStartAddr, dstStartAddr, nextDesc0);
835 DMA_SetChannelConfig(base, channel, trigger, isPeriph);
836 DMA_CreateHandle(handle, base, channel)
837 DMA_SubmitChannelDescriptor(handle, nextDesc0);
838 DMA_StartTransfer(handle);
839 * endcode
840 *
841 * param handle Pointer to DMA handle.
842 * param descriptor descriptor to submit.
843 */
DMA_SubmitChannelDescriptor(dma_handle_t * handle,dma_descriptor_t * descriptor)844 void DMA_SubmitChannelDescriptor(dma_handle_t *handle, dma_descriptor_t *descriptor)
845 {
846 assert((NULL != handle) && (NULL != descriptor));
847
848 DMA_LoadChannelDescriptor(handle->base, handle->channel, descriptor);
849 }
850
851 /*!
852 * brief Submits the DMA channel transfer request.
853 *
854 * This function submits the DMA transfer request according to the transfer configuration structure.
855 * If the user submits the transfer request repeatedly, this function packs an unprocessed request as
856 * a TCD and enables scatter/gather feature to process it in the next time.
857 * It is used for the case:
858 * 1. for the single transfer, application doesn't need to allocate descriptor table, the head descriptor can be used
859 for it.
860 * code
861 DMA_CreateHandle(handle, base, channel)
862 DMA_PrepareChannelTransfer(config,srcStartAddr,dstStartAddr,xferCfg,type,trigger,NULL);
863 DMA_SubmitChannelTransfer(handle, config)
864 DMA_StartTransfer(handle)
865 * endcode
866 *
867 * 2. for the linked transfer, application should responsible for link descriptor, for example, if 4 transfer is
868 required, then application should prepare
869 * three descriptor table with macro , the head descriptor in driver can be used for the first transfer descriptor.
870 * code
871 define link descriptor table in application with macro
872 DMA_ALLOCATE_LINK_DESCRIPTOR(nextDesc);
873
874 DMA_SetupDescriptor(nextDesc0, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
875 srcStartAddr, dstStartAddr, nextDesc1);
876 DMA_SetupDescriptor(nextDesc1, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
877 srcStartAddr, dstStartAddr, nextDesc2);
878 DMA_SetupDescriptor(nextDesc2, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
879 srcStartAddr, dstStartAddr, NULL);
880 DMA_CreateHandle(handle, base, channel)
881 DMA_PrepareChannelTransfer(config,srcStartAddr,dstStartAddr,xferCfg,type,trigger,nextDesc0);
882 DMA_SubmitChannelTransfer(handle, config)
883 DMA_StartTransfer(handle)
884 * endcode
885 *
886 * 3. for the ping pong case, application should responsible for link descriptor, for example, application should
887 prepare
888 * two descriptor table with macro , the head descriptor in driver can be used for the first transfer descriptor.
889 * code
890 define link descriptor table in application with macro
891 DMA_ALLOCATE_LINK_DESCRIPTOR(nextDesc);
892
893 DMA_SetupDescriptor(nextDesc0, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
894 srcStartAddr, dstStartAddr, nextDesc1);
895 DMA_SetupDescriptor(nextDesc1, DMA_CHANNEL_XFER(reload, clrTrig, intA, intB, width, srcInc, dstInc, bytes),
896 srcStartAddr, dstStartAddr, nextDesc0);
897 DMA_CreateHandle(handle, base, channel)
898 DMA_PrepareChannelTransfer(config,srcStartAddr,dstStartAddr,xferCfg,type,trigger,nextDesc0);
899 DMA_SubmitChannelTransfer(handle, config)
900 DMA_StartTransfer(handle)
901 * endcode
902 * param handle DMA handle pointer.
903 * param config Pointer to DMA transfer configuration structure.
904 * retval kStatus_DMA_Success It means submit transfer request succeed.
905 * retval kStatus_DMA_QueueFull It means TCD queue is full. Submit transfer request is not allowed.
906 * retval kStatus_DMA_Busy It means the given channel is busy, need to submit request later.
907 */
DMA_SubmitChannelTransfer(dma_handle_t * handle,dma_channel_config_t * config)908 status_t DMA_SubmitChannelTransfer(dma_handle_t *handle, dma_channel_config_t *config)
909 {
910 assert((NULL != handle) && (NULL != config));
911 assert(FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(handle->base) != -1);
912 assert(handle->channel < (uint8_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(handle->base));
913 uint32_t instance = DMA_GetInstance(handle->base);
914 dma_descriptor_t *descriptor = (dma_descriptor_t *)(&s_dma_descriptor_table[instance][handle->channel]);
915
916 /* Previous transfer has not finished */
917 if (DMA_ChannelIsActive(handle->base, handle->channel))
918 {
919 return kStatus_DMA_Busy;
920 }
921
922 /* setup channgel trigger configurations */
923 DMA_SetChannelConfig(handle->base, handle->channel, config->trigger, config->isPeriph);
924
925 DMA_SetupChannelDescriptor(
926 descriptor, config->xferCfg, config->srcStartAddr, config->dstStartAddr, config->nextDesc,
927 config->trigger == NULL ? kDMA_NoWrap : config->trigger->wrap,
928 (config->trigger == NULL ? (uint32_t)kDMA_BurstSize1 :
929 ((uint32_t)config->trigger->burst & (DMA_CHANNEL_CFG_BURSTPOWER_MASK)) >>
930 DMA_CHANNEL_CFG_BURSTPOWER_SHIFT));
931
932 /* Set channel XFERCFG register according first channel descriptor. */
933 handle->base->CHANNEL[handle->channel].XFERCFG = config->xferCfg;
934
935 return kStatus_Success;
936 }
937
938 /*!
939 * brief Submits the DMA transfer request.
940 * deprecated Do not use this function. It has been superceded by @ref DMA_SubmitChannelTransfer.
941 *
942 * This function submits the DMA transfer request according to the transfer configuration structure.
943 * If the user submits the transfer request repeatedly, this function packs an unprocessed request as
944 * a TCD and enables scatter/gather feature to process it in the next time.
945 *
946 * param handle DMA handle pointer.
947 * param config Pointer to DMA transfer configuration structure.
948 * retval kStatus_DMA_Success It means submit transfer request succeed.
949 * retval kStatus_DMA_QueueFull It means TCD queue is full. Submit transfer request is not allowed.
950 * retval kStatus_DMA_Busy It means the given channel is busy, need to submit request later.
951 */
DMA_SubmitTransfer(dma_handle_t * handle,dma_transfer_config_t * config)952 status_t DMA_SubmitTransfer(dma_handle_t *handle, dma_transfer_config_t *config)
953 {
954 assert((NULL != handle) && (NULL != config));
955 assert(FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(handle->base) != -1);
956 assert(handle->channel < (uint32_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(handle->base));
957
958 uint32_t instance = DMA_GetInstance(handle->base);
959 dma_descriptor_t *descriptor = (dma_descriptor_t *)(&s_dma_descriptor_table[instance][handle->channel]);
960
961 /* Previous transfer has not finished */
962 if (DMA_ChannelIsActive(handle->base, handle->channel))
963 {
964 return kStatus_DMA_Busy;
965 }
966
967 /* enable/disable peripheral request */
968 if (config->isPeriph)
969 {
970 DMA_EnableChannelPeriphRq(handle->base, handle->channel);
971 }
972 else
973 {
974 DMA_DisableChannelPeriphRq(handle->base, handle->channel);
975 }
976
977 DMA_CreateDescriptor(descriptor, &config->xfercfg, config->srcAddr, config->dstAddr, config->nextDesc);
978 /* Set channel XFERCFG register according first channel descriptor. */
979 handle->base->CHANNEL[handle->channel].XFERCFG = descriptor->xfercfg;
980
981 return kStatus_Success;
982 }
983
984 /*!
985 * brief DMA start transfer.
986 *
987 * This function enables the channel request. User can call this function after submitting the transfer request
988 * It will trigger transfer start with software trigger only when hardware trigger is not used.
989 *
990 * param handle DMA handle pointer.
991 */
DMA_StartTransfer(dma_handle_t * handle)992 void DMA_StartTransfer(dma_handle_t *handle)
993 {
994 assert(NULL != handle);
995 assert(FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(handle->base) != -1);
996
997 uint32_t channel = handle->channel;
998 assert(channel < (uint32_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(handle->base));
999
1000 /* enable channel */
1001 DMA_EnableChannel(handle->base, channel);
1002
1003 /* Do software trigger only when HW trigger is not enabled. */
1004 if ((handle->base->CHANNEL[handle->channel].CFG & DMA_CHANNEL_CFG_HWTRIGEN_MASK) == 0U)
1005 {
1006 handle->base->CHANNEL[channel].XFERCFG |= DMA_CHANNEL_XFERCFG_SWTRIG_MASK;
1007 }
1008 }
1009
DMA_IRQHandle(DMA_Type * base)1010 void DMA_IRQHandle(DMA_Type *base)
1011 {
1012 dma_handle_t *handle;
1013 uint8_t channel_index;
1014 uint32_t startChannel = DMA_GetVirtualStartChannel(base);
1015 uint32_t i = 0;
1016 bool intEnabled = false, intA = false, intB = false;
1017
1018 /* Find channels that have completed transfer */
1019 for (i = 0; i < (uint32_t)FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base); i++)
1020 {
1021 handle = s_DMAHandle[i + startChannel];
1022 /* Handle is not present */
1023 if (NULL == handle)
1024 {
1025 continue;
1026 }
1027 channel_index = DMA_CHANNEL_INDEX(base, handle->channel);
1028 /* Channel uses INTA flag */
1029 intEnabled = ((DMA_COMMON_REG_GET(handle->base, handle->channel, INTENSET) & (1UL << channel_index)) != 0UL);
1030 intA = ((DMA_COMMON_REG_GET(handle->base, handle->channel, INTA) & (1UL << channel_index)) != 0UL);
1031 if (intEnabled && intA)
1032 {
1033 /* Clear INTA flag */
1034 DMA_COMMON_REG_SET(handle->base, handle->channel, INTA, (1UL << channel_index));
1035 if (handle->callback != NULL)
1036 {
1037 (handle->callback)(handle, handle->userData, true, kDMA_IntA);
1038 }
1039 }
1040
1041 intB = ((DMA_COMMON_REG_GET(handle->base, handle->channel, INTB) & (1UL << channel_index)) != 0UL);
1042 /* Channel uses INTB flag */
1043 if (intEnabled && intB)
1044 {
1045 /* Clear INTB flag */
1046 DMA_COMMON_REG_SET(handle->base, handle->channel, INTB, (1UL << channel_index));
1047 if (handle->callback != NULL)
1048 {
1049 (handle->callback)(handle, handle->userData, true, kDMA_IntB);
1050 }
1051 }
1052 /* Error flag */
1053 if ((DMA_COMMON_REG_GET(handle->base, handle->channel, ERRINT) & (1UL << channel_index)) != 0UL)
1054 {
1055 /* Clear error flag */
1056 DMA_COMMON_REG_SET(handle->base, handle->channel, ERRINT, (1UL << channel_index));
1057 if (handle->callback != NULL)
1058 {
1059 (handle->callback)(handle, handle->userData, false, kDMA_IntError);
1060 }
1061 }
1062 }
1063 }
1064
1065 void DMA0_DriverIRQHandler(void);
DMA0_DriverIRQHandler(void)1066 void DMA0_DriverIRQHandler(void)
1067 {
1068 DMA_IRQHandle(DMA0);
1069 SDK_ISR_EXIT_BARRIER;
1070 }
1071
1072 #if defined(DMA1)
1073 void DMA1_DriverIRQHandler(void);
DMA1_DriverIRQHandler(void)1074 void DMA1_DriverIRQHandler(void)
1075 {
1076 DMA_IRQHandle(DMA1);
1077 SDK_ISR_EXIT_BARRIER;
1078 }
1079 #endif
1080