1 /*
2  * Copyright (c) 2020-2023 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @brief Common part of DMA drivers for some NXP SoC.
9  */
10 #include <zephyr/kernel.h>
11 #include <zephyr/device.h>
12 #include <soc.h>
13 #include <zephyr/drivers/dma.h>
14 #include <fsl_dma.h>
15 #include <fsl_inputmux.h>
16 #include <zephyr/logging/log.h>
17 #include <zephyr/irq.h>
18 #include <zephyr/sys/barrier.h>
19 #include <zephyr/sys/util.h>
20 #include <zephyr/sys/util_macro.h>
21 #include <zephyr/drivers/dma/dma_mcux_lpc.h>
22 
23 #define DT_DRV_COMPAT nxp_lpc_dma
24 
25 LOG_MODULE_REGISTER(dma_mcux_lpc, CONFIG_DMA_LOG_LEVEL);
26 
27 struct dma_mcux_lpc_config {
28 	DMA_Type *base;
29 	uint32_t otrig_base_address;
30 	uint32_t itrig_base_address;
31 	uint8_t num_of_channels;
32 	uint8_t num_of_otrigs;
33 	void (*irq_config_func)(const struct device *dev);
34 };
35 
36 struct channel_data {
37 	SDK_ALIGN(dma_descriptor_t dma_descriptor_table[CONFIG_DMA_MCUX_LPC_NUMBER_OF_DESCRIPTORS],
38 		  FSL_FEATURE_DMA_LINK_DESCRIPTOR_ALIGN_SIZE);
39 	dma_handle_t dma_handle;
40 	const struct device *dev;
41 	void *user_data;
42 	dma_callback_t dma_callback;
43 	enum dma_channel_direction dir;
44 	uint8_t src_inc;
45 	uint8_t dst_inc;
46 	dma_descriptor_t *curr_descriptor;
47 	uint8_t num_of_descriptors;
48 	bool descriptors_queued;
49 	uint32_t width;
50 	bool busy;
51 };
52 
53 struct dma_otrig {
54 	int8_t source_channel;
55 	int8_t linked_channel;
56 };
57 
58 struct dma_mcux_lpc_dma_data {
59 	struct channel_data *channel_data;
60 	struct dma_otrig *otrig_array;
61 	int8_t *channel_index;
62 	uint8_t num_channels_used;
63 };
64 
65 struct k_spinlock configuring_otrigs;
66 
67 #define NXP_LPC_DMA_MAX_XFER ((DMA_CHANNEL_XFERCFG_XFERCOUNT_MASK >> \
68 			      DMA_CHANNEL_XFERCFG_XFERCOUNT_SHIFT) + 1)
69 
70 #define DEV_BASE(dev) \
71 	((DMA_Type *)((const struct dma_mcux_lpc_config *const)(dev)->config)->base)
72 
73 #define DEV_CHANNEL_DATA(dev, ch)                                              \
74 	((struct channel_data *)(&(((struct dma_mcux_lpc_dma_data *)dev->data)->channel_data[ch])))
75 
76 #define DEV_DMA_HANDLE(dev, ch)                                               \
77 		((dma_handle_t *)(&(DEV_CHANNEL_DATA(dev, ch)->dma_handle)))
78 
79 #define EMPTY_OTRIG -1
80 
nxp_lpc_dma_callback(dma_handle_t * handle,void * param,bool transferDone,uint32_t intmode)81 static void nxp_lpc_dma_callback(dma_handle_t *handle, void *param,
82 			      bool transferDone, uint32_t intmode)
83 {
84 	int ret = -EIO;
85 	struct channel_data *data = (struct channel_data *)param;
86 	uint32_t channel = handle->channel;
87 
88 	if (intmode == kDMA_IntError) {
89 		DMA_AbortTransfer(handle);
90 	} else if (intmode == kDMA_IntA) {
91 		ret = DMA_STATUS_BLOCK;
92 	} else {
93 		ret = DMA_STATUS_COMPLETE;
94 	}
95 
96 	data->busy = DMA_ChannelIsBusy(data->dma_handle.base, channel);
97 
98 	if (data->dma_callback) {
99 		data->dma_callback(data->dev, data->user_data, channel, ret);
100 	}
101 }
102 
103 /* Handles DMA interrupts and dispatches to the individual channel */
dma_mcux_lpc_irq_handler(const struct device * dev)104 static void dma_mcux_lpc_irq_handler(const struct device *dev)
105 {
106 	DMA_IRQHandle(DEV_BASE(dev));
107 /*
108  * Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store
109  * immediate overlapping exception return operation might vector
110  * to incorrect interrupt
111  */
112 #if defined __CORTEX_M && (__CORTEX_M == 4U)
113 	barrier_dsync_fence_full();
114 #endif
115 }
116 
117 #ifdef CONFIG_SOC_SERIES_RW6XX
rw6xx_dma_addr_fixup(struct dma_block_config * block)118 static inline void rw6xx_dma_addr_fixup(struct dma_block_config *block)
119 {
120 	/* RW6xx AHB design does not route DMA engine through FlexSPI CACHE.
121 	 * Therefore, to use DMA from the FlexSPI space we must adjust the
122 	 * source address to use the non cached FlexSPI region.
123 	 * FlexSPI cached region is at 0x800_0000 (nonsecure) or 0x1800_0000
124 	 * (secure). We move the address into non cached region, which is at
125 	 * 0x4800_0000 or 0x5800_000.
126 	 */
127 	if (((block->source_address & 0xF8000000) == 0x18000000) ||
128 	  ((block->source_address & 0xF8000000) == 0x8000000)) {
129 		block->source_address = block->source_address + 0x40000000;
130 	}
131 	if (((block->dest_address & 0xF8000000) == 0x18000000) ||
132 	  ((block->dest_address & 0xF8000000) == 0x8000000)) {
133 		block->dest_address = block->dest_address + 0x40000000;
134 	}
135 
136 }
137 #endif
138 
dma_mcux_lpc_queue_descriptors(struct channel_data * data,struct dma_block_config * block,uint8_t src_inc,uint8_t dest_inc,bool callback_en)139 static int dma_mcux_lpc_queue_descriptors(struct channel_data *data,
140 					   struct dma_block_config *block,
141 					   uint8_t src_inc,
142 					   uint8_t dest_inc,
143 					   bool callback_en)
144 {
145 	uint32_t xfer_config = 0U;
146 	dma_descriptor_t *next_descriptor = NULL;
147 	uint32_t width = data->width;
148 	uint32_t max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width;
149 	bool setup_extra_descriptor = false;
150 	/* intA is used to indicate transfer of a block */
151 	uint8_t enable_a_interrupt;
152 	/* intB is used to indicate complete transfer of the list of blocks */
153 	uint8_t enable_b_interrupt;
154 	uint8_t reload;
155 	struct dma_block_config local_block;
156 	bool last_block = false;
157 
158 	memcpy(&local_block, block, sizeof(struct dma_block_config));
159 
160 	do {
161 		/* Descriptors are queued during dma_configure, do not add more
162 		 * during dma_reload.
163 		 */
164 		if (!data->descriptors_queued) {
165 			/* Increase the number of descriptors queued */
166 			data->num_of_descriptors++;
167 
168 			if (data->num_of_descriptors >= CONFIG_DMA_MCUX_LPC_NUMBER_OF_DESCRIPTORS) {
169 				return -ENOMEM;
170 			}
171 			/* Do we need to queue additional DMA descriptors for this block */
172 			if ((local_block.block_size > max_xfer_bytes) ||
173 			    (local_block.next_block != NULL)) {
174 				/* Allocate DMA descriptors */
175 				next_descriptor =
176 					&data->dma_descriptor_table[data->num_of_descriptors];
177 			} else {
178 				/* Check if this is the last block to transfer */
179 				if (local_block.next_block == NULL) {
180 					last_block = true;
181 					/* Last descriptor, check if we should setup a
182 					 * circular chain
183 					 */
184 					if (!local_block.source_reload_en) {
185 						/* No more descriptors */
186 						next_descriptor = NULL;
187 					} else if (data->num_of_descriptors == 1) {
188 						/* Allocate one more descriptors for
189 						 * ping-pong transfer
190 						 */
191 						next_descriptor = &data->dma_descriptor_table[
192 							data->num_of_descriptors];
193 
194 						setup_extra_descriptor = true;
195 					} else {
196 						/* Loop back to the head */
197 						next_descriptor = data->dma_descriptor_table;
198 					}
199 				}
200 			}
201 		} else {
202 			/* Descriptors have already been allocated, reuse them as this
203 			 * is called from a reload function
204 			 */
205 			next_descriptor = data->curr_descriptor->linkToNextDesc;
206 		}
207 
208 		/* SPI TX transfers need to queue a DMA descriptor to
209 		 * indicate an end of transfer. Source or destination
210 		 * address does not need to be change for these
211 		 * transactions and the transfer width is 4 bytes
212 		 */
213 		if ((local_block.source_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) &&
214 			(local_block.dest_addr_adj == DMA_ADDR_ADJ_NO_CHANGE)) {
215 			src_inc = 0;
216 			dest_inc = 0;
217 			width = sizeof(uint32_t);
218 		}
219 
220 		/* Fire an interrupt after the whole block has been transferred */
221 		if (local_block.block_size > max_xfer_bytes) {
222 			enable_a_interrupt = 0;
223 			enable_b_interrupt = 0;
224 		} else {
225 			/* Use intB when this is the end of the block list and transfer */
226 			if (last_block) {
227 				enable_a_interrupt = 0;
228 				enable_b_interrupt = 1;
229 			} else {
230 				/* Use intA when we need an interrupt per block
231 				 * Enable or disable intA based on user configuration
232 				 */
233 				enable_a_interrupt = callback_en;
234 				enable_b_interrupt = 0;
235 			}
236 		}
237 
238 		/* Reload if we have more descriptors */
239 		if (next_descriptor) {
240 			reload = 1;
241 		} else {
242 			reload = 0;
243 		}
244 
245 		/* Enable interrupt and reload for the descriptor */
246 		xfer_config = DMA_CHANNEL_XFER(reload, 0UL, enable_a_interrupt,
247 					enable_b_interrupt,
248 					width,
249 					src_inc,
250 					dest_inc,
251 					MIN(local_block.block_size, max_xfer_bytes));
252 
253 #ifdef CONFIG_SOC_SERIES_RW6XX
254 		rw6xx_dma_addr_fixup(&local_block);
255 #endif
256 		DMA_SetupDescriptor(data->curr_descriptor,
257 				xfer_config,
258 				(void *)local_block.source_address,
259 				(void *)local_block.dest_address,
260 				(void *)next_descriptor);
261 
262 		data->curr_descriptor = next_descriptor;
263 
264 		if (local_block.block_size > max_xfer_bytes) {
265 			local_block.block_size -= max_xfer_bytes;
266 			if (src_inc) {
267 				local_block.source_address += max_xfer_bytes;
268 			}
269 			if (dest_inc) {
270 				local_block.dest_address += max_xfer_bytes;
271 			}
272 		} else {
273 			local_block.block_size = 0;
274 		}
275 	} while (local_block.block_size > 0);
276 
277 	/* If an additional descriptor is queued for a certain case, set it up here.
278 	 */
279 	if (setup_extra_descriptor) {
280 		/* Increase the number of descriptors queued */
281 		data->num_of_descriptors++;
282 
283 		/* Loop back to the head */
284 		next_descriptor = data->dma_descriptor_table;
285 
286 		/* Leave curr pointer unchanged so we start queuing new data from
287 		 * this descriptor
288 		 */
289 		/* Enable or disable interrupt based on user request.
290 		 * Reload for the descriptor.
291 		 */
292 		xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, callback_en, 0U,
293 					width,
294 					src_inc,
295 					dest_inc,
296 					MIN(local_block.block_size, max_xfer_bytes));
297 		/* Mark this as invalid */
298 		xfer_config &= ~DMA_CHANNEL_XFERCFG_CFGVALID_MASK;
299 #ifdef CONFIG_SOC_SERIES_RW6XX
300 		rw6xx_dma_addr_fixup(&local_block);
301 #endif
302 		DMA_SetupDescriptor(data->curr_descriptor,
303 				xfer_config,
304 				(void *)local_block.source_address,
305 				(void *)local_block.dest_address,
306 				(void *)next_descriptor);
307 	}
308 
309 	return 0;
310 }
311 
dma_mcux_lpc_clear_channel_data(struct channel_data * data)312 static void dma_mcux_lpc_clear_channel_data(struct channel_data *data)
313 {
314 	data->dma_callback = NULL;
315 	data->dir = 0;
316 	data->src_inc = 0;
317 	data->dst_inc = 0;
318 	data->descriptors_queued = false;
319 	data->num_of_descriptors = 0;
320 	data->curr_descriptor = NULL;
321 	data->width = 0;
322 }
323 
324 /* Configure a channel */
dma_mcux_lpc_configure(const struct device * dev,uint32_t channel,struct dma_config * config)325 static int dma_mcux_lpc_configure(const struct device *dev, uint32_t channel,
326 				  struct dma_config *config)
327 {
328 	const struct dma_mcux_lpc_config *dev_config;
329 	dma_handle_t *p_handle;
330 	uint32_t xfer_config = 0U;
331 	struct channel_data *data;
332 	struct dma_mcux_lpc_dma_data *dma_data;
333 	struct dma_block_config *block_config;
334 	uint32_t virtual_channel;
335 	uint8_t otrig_index;
336 	uint8_t src_inc = 1, dst_inc = 1;
337 	bool is_periph = true;
338 	uint8_t width;
339 	uint32_t max_xfer_bytes;
340 	uint8_t reload = 0;
341 	bool complete_callback;
342 
343 	if (NULL == dev || NULL == config) {
344 		return -EINVAL;
345 	}
346 
347 	dev_config = dev->config;
348 	dma_data = dev->data;
349 	block_config = config->head_block;
350 	/* The DMA controller deals with just one transfer
351 	 * size, though the API provides separate sizes
352 	 * for source and dest. So assert that the source
353 	 * and dest sizes are the same.
354 	 */
355 	assert(config->dest_data_size == config->source_data_size);
356 	width = config->dest_data_size;
357 
358 	/* If skip is set on both source and destination
359 	 * then skip by the same amount on both sides
360 	 */
361 	if (block_config->source_gather_en && block_config->dest_scatter_en) {
362 		assert(block_config->source_gather_interval ==
363 		       block_config->dest_scatter_interval);
364 	}
365 
366 	max_xfer_bytes = NXP_LPC_DMA_MAX_XFER * width;
367 
368 	/*
369 	 * Check if circular mode is requested.
370 	 */
371 	if (config->head_block->source_reload_en ||
372 	    config->head_block->dest_reload_en) {
373 		reload = 1;
374 	}
375 
376 	/* Check if have a free slot to store DMA channel data */
377 	if (dma_data->num_channels_used > dev_config->num_of_channels) {
378 		LOG_ERR("out of DMA channel %d", channel);
379 		return -EINVAL;
380 	}
381 
382 	/* Check if the dma channel number is valid */
383 	if (channel >= dev_config->num_of_channels) {
384 		LOG_ERR("invalid DMA channel number %d", channel);
385 		return -EINVAL;
386 	}
387 
388 	if (config->source_data_size != 4U &&
389 		config->source_data_size != 2U &&
390 		config->source_data_size != 1U) {
391 		LOG_ERR("Source unit size error, %d", config->source_data_size);
392 		return -EINVAL;
393 	}
394 
395 	if (config->dest_data_size != 4U &&
396 		config->dest_data_size != 2U &&
397 		config->dest_data_size != 1U) {
398 		LOG_ERR("Dest unit size error, %d", config->dest_data_size);
399 		return -EINVAL;
400 	}
401 
402 	switch (config->channel_direction) {
403 	case MEMORY_TO_MEMORY:
404 		is_periph = false;
405 		if (block_config->source_gather_en) {
406 			src_inc = block_config->source_gather_interval / width;
407 			/* The current controller only supports incrementing the
408 			 * source and destination up to 4 time transfer width
409 			 */
410 			if ((src_inc > 4) || (src_inc == 3)) {
411 				return -EINVAL;
412 			}
413 		}
414 
415 		if (block_config->dest_scatter_en) {
416 			dst_inc = block_config->dest_scatter_interval / width;
417 			/* The current controller only supports incrementing the
418 			 * source and destination up to 4 time transfer width
419 			 */
420 			if ((dst_inc > 4) || (dst_inc == 3)) {
421 				return -EINVAL;
422 			}
423 		}
424 		break;
425 	case MEMORY_TO_PERIPHERAL:
426 		/* Set the source increment value */
427 		if (block_config->source_gather_en) {
428 			src_inc = block_config->source_gather_interval / width;
429 			/* The current controller only supports incrementing the
430 			 * source and destination up to 4 time transfer width
431 			 */
432 			if ((src_inc > 4) || (src_inc == 3)) {
433 				return -EINVAL;
434 			}
435 		}
436 
437 		dst_inc = 0;
438 		break;
439 	case PERIPHERAL_TO_MEMORY:
440 		src_inc = 0;
441 
442 		/* Set the destination increment value */
443 		if (block_config->dest_scatter_en) {
444 			dst_inc = block_config->dest_scatter_interval / width;
445 			/* The current controller only supports incrementing the
446 			 * source and destination up to 4 time transfer width
447 			 */
448 			if ((dst_inc > 4) || (dst_inc == 3)) {
449 				return -EINVAL;
450 			}
451 		}
452 		break;
453 	default:
454 		LOG_ERR("not support transfer direction");
455 		return -EINVAL;
456 	}
457 
458 	/* Check if user does not want to increment address */
459 	if (block_config->source_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) {
460 		src_inc = 0;
461 	}
462 
463 	if (block_config->dest_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) {
464 		dst_inc = 0;
465 	}
466 
467 	/* If needed, allocate a slot to store dma channel data */
468 	if (dma_data->channel_index[channel] == -1) {
469 		dma_data->channel_index[channel] = dma_data->num_channels_used;
470 		dma_data->num_channels_used++;
471 		/* Get the slot number that has the dma channel data */
472 		virtual_channel = dma_data->channel_index[channel];
473 		/* dma channel data */
474 		p_handle = DEV_DMA_HANDLE(dev, virtual_channel);
475 		data = DEV_CHANNEL_DATA(dev, virtual_channel);
476 
477 		DMA_CreateHandle(p_handle, DEV_BASE(dev), channel);
478 		DMA_SetCallback(p_handle, nxp_lpc_dma_callback, (void *)data);
479 	} else {
480 		/* Get the slot number that has the dma channel data */
481 		virtual_channel = dma_data->channel_index[channel];
482 		/* dma channel data */
483 		p_handle = DEV_DMA_HANDLE(dev, virtual_channel);
484 		data = DEV_CHANNEL_DATA(dev, virtual_channel);
485 	}
486 
487 	dma_mcux_lpc_clear_channel_data(data);
488 
489 	data->dir = config->channel_direction;
490 	/* Save the increment values for the reload function */
491 	data->src_inc = src_inc;
492 	data->dst_inc = dst_inc;
493 
494 	if (data->busy) {
495 		DMA_AbortTransfer(p_handle);
496 	}
497 
498 	LOG_DBG("channel is %d", p_handle->channel);
499 
500 	k_spinlock_key_t otrigs_key = k_spin_lock(&configuring_otrigs);
501 
502 	data->width = width;
503 
504 	if (config->source_chaining_en || config->dest_chaining_en) {
505 		/* Chaining is enabled */
506 		if (!dev_config->otrig_base_address || !dev_config->itrig_base_address) {
507 			LOG_ERR("Calling function tried to setup up channel"
508 			" chaining but the current platform is missing"
509 			" the correct trigger base addresses.");
510 			k_spin_unlock(&configuring_otrigs, otrigs_key);
511 			return -ENXIO;
512 		}
513 
514 		LOG_DBG("link dma 0 channel %d with channel %d",
515 			channel, config->linked_channel);
516 		uint8_t is_otrig_available = 0;
517 
518 		for (otrig_index = 0; otrig_index < dev_config->num_of_otrigs;
519 			++otrig_index) {
520 			if (dma_data->otrig_array[otrig_index].linked_channel == EMPTY_OTRIG ||
521 			    dma_data->otrig_array[otrig_index].source_channel == channel) {
522 				if (dma_data->otrig_array[otrig_index].source_channel == channel) {
523 					int ChannelToDisable =
524 						dma_data->otrig_array[otrig_index].linked_channel;
525 					DMA_DisableChannel(DEV_BASE(dev), ChannelToDisable);
526 					DEV_BASE(dev)->CHANNEL[ChannelToDisable].CFG &=
527 						~DMA_CHANNEL_CFG_HWTRIGEN_MASK;
528 				}
529 				is_otrig_available = 1;
530 				break;
531 			}
532 		}
533 		if (!is_otrig_available) {
534 			LOG_ERR("Calling function tried to setup up multiple"
535 			" channels to be configured but the dma driver has"
536 			" run out of OTrig Muxes");
537 			k_spin_unlock(&configuring_otrigs, otrigs_key);
538 			return -EINVAL;
539 		}
540 
541 		/* Since INPUTMUX handles the dma signals and
542 		 * must be hardware triggered via the INPUTMUX
543 		 * hardware.
544 		 */
545 		DEV_BASE(dev)->CHANNEL[config->linked_channel].CFG |=
546 			DMA_CHANNEL_CFG_HWTRIGEN_MASK;
547 
548 		DMA_EnableChannel(DEV_BASE(dev), config->linked_channel);
549 
550 		/* Link OTrig Muxes with passed-in channels */
551 		INPUTMUX_AttachSignal(INPUTMUX, otrig_index,
552 			dev_config->otrig_base_address + channel);
553 		INPUTMUX_AttachSignal(INPUTMUX, config->linked_channel,
554 				dev_config->itrig_base_address + otrig_index);
555 
556 		/* Otrig is now connected with linked channel */
557 		dma_data->otrig_array[otrig_index].source_channel = channel;
558 		dma_data->otrig_array[otrig_index].linked_channel = config->linked_channel;
559 	} else {
560 		/* Chaining is _NOT_ enabled, Freeing connected OTrig */
561 		for (otrig_index = 0; otrig_index < dev_config->num_of_otrigs; otrig_index++) {
562 			if (dma_data->otrig_array[otrig_index].linked_channel != EMPTY_OTRIG &&
563 			   (channel == dma_data->otrig_array[otrig_index].source_channel)) {
564 				int ChannelToDisable =
565 					dma_data->otrig_array[otrig_index].linked_channel;
566 				DMA_DisableChannel(DEV_BASE(dev), ChannelToDisable);
567 				DEV_BASE(dev)->CHANNEL[ChannelToDisable].CFG &=
568 					~DMA_CHANNEL_CFG_HWTRIGEN_MASK;
569 				dma_data->otrig_array[otrig_index].linked_channel = EMPTY_OTRIG;
570 				dma_data->otrig_array[otrig_index].source_channel = EMPTY_OTRIG;
571 				break;
572 			}
573 		}
574 	}
575 
576 	k_spin_unlock(&configuring_otrigs, otrigs_key);
577 
578 	complete_callback = config->complete_callback_en;
579 
580 	/* Check if we need to queue DMA descriptors */
581 	if ((block_config->block_size > max_xfer_bytes) ||
582 		(block_config->next_block != NULL)) {
583 		/* Allocate a DMA descriptor */
584 		data->curr_descriptor = data->dma_descriptor_table;
585 
586 		if (block_config->block_size > max_xfer_bytes) {
587 			/* Disable interrupt as this is not the entire data.
588 			 * Reload for the descriptor
589 			 */
590 			xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, 0UL, 0UL,
591 					width,
592 					src_inc,
593 					dst_inc,
594 					max_xfer_bytes);
595 		} else {
596 			/* Enable INTA interrupt if user requested DMA for each block.
597 			 * Reload for the descriptor.
598 			 */
599 			xfer_config = DMA_CHANNEL_XFER(1UL, 0UL, complete_callback, 0UL,
600 					width,
601 					src_inc,
602 					dst_inc,
603 					block_config->block_size);
604 		}
605 	} else {
606 		/* Enable interrupt for the descriptor */
607 		xfer_config = DMA_CHANNEL_XFER(0UL, 0UL, 1UL, 0UL,
608 				width,
609 				src_inc,
610 				dst_inc,
611 				block_config->block_size);
612 	}
613 	/* DMA controller requires that the address be aligned to transfer size */
614 	assert(block_config->source_address == ROUND_UP(block_config->source_address, width));
615 	assert(block_config->dest_address == ROUND_UP(block_config->dest_address, width));
616 
617 #ifdef CONFIG_SOC_SERIES_RW6XX
618 	rw6xx_dma_addr_fixup(block_config);
619 #endif
620 
621 	DMA_SubmitChannelTransferParameter(p_handle,
622 					xfer_config,
623 					(void *)block_config->source_address,
624 					(void *)block_config->dest_address,
625 					(void *)data->curr_descriptor);
626 
627 	/* Start queuing DMA descriptors */
628 	if (data->curr_descriptor) {
629 		if (block_config->block_size > max_xfer_bytes) {
630 			/* Queue additional DMA descriptors because the amount of data to
631 			 * be transferred is greater that the DMA descriptors max XFERCOUNT.
632 			 */
633 			struct dma_block_config local_block = { 0 };
634 
635 			if (src_inc) {
636 				local_block.source_address = block_config->source_address
637 							     + max_xfer_bytes;
638 			} else {
639 				local_block.source_address = block_config->source_address;
640 			}
641 			if (dst_inc) {
642 				local_block.dest_address = block_config->dest_address
643 							     + max_xfer_bytes;
644 			} else {
645 				local_block.dest_address = block_config->dest_address;
646 			}
647 			local_block.block_size = block_config->block_size - max_xfer_bytes;
648 			local_block.next_block = block_config->next_block;
649 			local_block.source_reload_en = reload;
650 
651 			if (block_config->next_block == NULL) {
652 				/* This is the last block, enable callback. */
653 				complete_callback = true;
654 			}
655 
656 			if (dma_mcux_lpc_queue_descriptors(data, &local_block,
657 					src_inc, dst_inc, complete_callback)) {
658 				return -ENOMEM;
659 			}
660 		}
661 		/* Get the next block to transfer */
662 		block_config = block_config->next_block;
663 
664 		while (block_config != NULL) {
665 			block_config->source_reload_en = reload;
666 
667 			/* DMA controller requires that the address be aligned to transfer size */
668 			assert(block_config->source_address ==
669 			       ROUND_UP(block_config->source_address, width));
670 			assert(block_config->dest_address ==
671 			       ROUND_UP(block_config->dest_address, width));
672 
673 			if (block_config->next_block == NULL) {
674 				/* This is the last block. Enable callback if not enabled. */
675 				complete_callback = true;
676 			}
677 			if (dma_mcux_lpc_queue_descriptors(data, block_config,
678 				src_inc, dst_inc, complete_callback)) {
679 				return -ENOMEM;
680 			}
681 
682 			/* Get the next block and start queuing descriptors */
683 			block_config = block_config->next_block;
684 		}
685 		/* We have finished queuing DMA descriptors */
686 		data->descriptors_queued = true;
687 	}
688 
689 	if (config->dma_slot) {
690 		uint32_t cfg_reg = 0;
691 
692 		/* User supplied manual trigger configuration */
693 		if (config->dma_slot & LPC_DMA_PERIPH_REQ_EN) {
694 			cfg_reg |= DMA_CHANNEL_CFG_PERIPHREQEN_MASK;
695 		}
696 		if (config->dma_slot & LPC_DMA_HWTRIG_EN) {
697 			/* Setup hardware trigger */
698 			cfg_reg |= DMA_CHANNEL_CFG_HWTRIGEN_MASK;
699 			if (config->dma_slot & LPC_DMA_TRIGTYPE_LEVEL) {
700 				cfg_reg |= DMA_CHANNEL_CFG_TRIGTYPE_MASK;
701 			}
702 			if (config->dma_slot & LPC_DMA_TRIGPOL_HIGH_RISING) {
703 				cfg_reg |= DMA_CHANNEL_CFG_TRIGPOL_MASK;
704 			}
705 			if (config->dma_slot & LPC_DMA_TRIGBURST) {
706 				cfg_reg |= DMA_CHANNEL_CFG_TRIGBURST_MASK;
707 				cfg_reg |= DMA_CHANNEL_CFG_BURSTPOWER(
708 					LPC_DMA_GET_BURSTPOWER(config->dma_slot));
709 			}
710 		}
711 		p_handle->base->CHANNEL[p_handle->channel].CFG = cfg_reg;
712 	} else if (is_periph) {
713 		DMA_EnableChannelPeriphRq(p_handle->base, p_handle->channel);
714 	} else {
715 		DMA_DisableChannelPeriphRq(p_handle->base, p_handle->channel);
716 	}
717 	DMA_SetChannelPriority(p_handle->base, p_handle->channel, config->channel_priority);
718 
719 	data->busy = false;
720 	if (config->dma_callback) {
721 		LOG_DBG("INSTALL call back on channel %d", channel);
722 		data->user_data = config->user_data;
723 		data->dma_callback = config->dma_callback;
724 		data->dev = dev;
725 	}
726 
727 	return 0;
728 }
729 
dma_mcux_lpc_start(const struct device * dev,uint32_t channel)730 static int dma_mcux_lpc_start(const struct device *dev, uint32_t channel)
731 {
732 	struct dma_mcux_lpc_dma_data *dev_data = dev->data;
733 	int8_t virtual_channel = dev_data->channel_index[channel];
734 	struct channel_data *data = DEV_CHANNEL_DATA(dev, virtual_channel);
735 
736 	LOG_DBG("START TRANSFER");
737 	LOG_DBG("DMA CTRL 0x%x", DEV_BASE(dev)->CTRL);
738 	data->busy = true;
739 	DMA_StartTransfer(DEV_DMA_HANDLE(dev, virtual_channel));
740 	return 0;
741 }
742 
dma_mcux_lpc_stop(const struct device * dev,uint32_t channel)743 static int dma_mcux_lpc_stop(const struct device *dev, uint32_t channel)
744 {
745 	struct dma_mcux_lpc_dma_data *dev_data = dev->data;
746 	int8_t virtual_channel = dev_data->channel_index[channel];
747 	struct channel_data *data = DEV_CHANNEL_DATA(dev, virtual_channel);
748 
749 	if (!data->busy) {
750 		return 0;
751 	}
752 	DMA_AbortTransfer(DEV_DMA_HANDLE(dev, virtual_channel));
753 	DMA_DisableChannel(DEV_BASE(dev), channel);
754 
755 	data->busy = false;
756 	return 0;
757 }
758 
dma_mcux_lpc_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)759 static int dma_mcux_lpc_reload(const struct device *dev, uint32_t channel,
760 			       uint32_t src, uint32_t dst, size_t size)
761 {
762 	struct dma_mcux_lpc_dma_data *dev_data = dev->data;
763 	int8_t virtual_channel = dev_data->channel_index[channel];
764 	struct channel_data *data = DEV_CHANNEL_DATA(dev, virtual_channel);
765 	uint32_t xfer_config = 0U;
766 
767 	/* DMA controller requires that the address be aligned to transfer size */
768 	assert(src == ROUND_UP(src, data->width));
769 	assert(dst == ROUND_UP(dst, data->width));
770 
771 	if (!data->descriptors_queued) {
772 		dma_handle_t *p_handle;
773 
774 		p_handle = DEV_DMA_HANDLE(dev, virtual_channel);
775 
776 		/* Only one buffer, enable interrupt */
777 		xfer_config = DMA_CHANNEL_XFER(0UL, 0UL, 1UL, 0UL,
778 					data->width,
779 					data->src_inc,
780 					data->dst_inc,
781 					size);
782 		DMA_SubmitChannelTransferParameter(p_handle,
783 						xfer_config,
784 						(void *)src,
785 						(void *)dst,
786 						NULL);
787 	} else {
788 		struct dma_block_config local_block = { 0 };
789 
790 		local_block.source_address = src;
791 		local_block.dest_address = dst;
792 		local_block.block_size = size;
793 		local_block.source_reload_en = 1;
794 		dma_mcux_lpc_queue_descriptors(data, &local_block,
795 					       data->src_inc, data->dst_inc, true);
796 	}
797 
798 	return 0;
799 }
800 
dma_mcux_lpc_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)801 static int dma_mcux_lpc_get_status(const struct device *dev, uint32_t channel,
802 				   struct dma_status *status)
803 {
804 	const struct dma_mcux_lpc_config *config = dev->config;
805 	struct dma_mcux_lpc_dma_data *dev_data = dev->data;
806 	int8_t virtual_channel = dev_data->channel_index[channel];
807 	struct channel_data *data = DEV_CHANNEL_DATA(dev, virtual_channel);
808 
809 	if (channel > config->num_of_channels) {
810 		return -EINVAL;
811 	}
812 
813 	/* If channel is actually busy or the virtual channel is just not set up */
814 	if (data->busy && (virtual_channel != -1)) {
815 		status->busy = true;
816 		status->pending_length = DMA_GetRemainingBytes(DEV_BASE(dev), channel);
817 	} else {
818 		status->busy = false;
819 		status->pending_length = 0;
820 	}
821 	status->dir = data->dir;
822 	LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CTRL);
823 	LOG_DBG("DMA INT 0x%x", DEV_BASE(dev)->INTSTAT);
824 
825 	return 0;
826 }
827 
dma_mcux_lpc_init(const struct device * dev)828 static int dma_mcux_lpc_init(const struct device *dev)
829 {
830 	const struct dma_mcux_lpc_config *config = dev->config;
831 	struct dma_mcux_lpc_dma_data *data = dev->data;
832 
833 	/* Indicate that the Otrig Muxes are not connected */
834 	for (int i = 0; i < config->num_of_otrigs; i++) {
835 		data->otrig_array[i].source_channel = EMPTY_OTRIG;
836 		data->otrig_array[i].linked_channel = EMPTY_OTRIG;
837 	}
838 
839 	/*
840 	 * Initialize to -1 to indicate dma channel does not have a slot
841 	 * assigned to store dma channel data
842 	 */
843 	for (int i = 0; i < config->num_of_channels; i++) {
844 		data->channel_index[i] = -1;
845 	}
846 
847 	data->num_channels_used = 0;
848 
849 	DMA_Init(DEV_BASE(dev));
850 	INPUTMUX_Init(INPUTMUX);
851 
852 	config->irq_config_func(dev);
853 
854 	return 0;
855 }
856 
857 static DEVICE_API(dma, dma_mcux_lpc_api) = {
858 	.config = dma_mcux_lpc_configure,
859 	.start = dma_mcux_lpc_start,
860 	.stop = dma_mcux_lpc_stop,
861 	.reload = dma_mcux_lpc_reload,
862 	.get_status = dma_mcux_lpc_get_status,
863 };
864 
865 #define DMA_MCUX_LPC_CONFIG_FUNC(n)					\
866 	static void dma_mcux_lpc_config_func_##n(const struct device *dev)	\
867 	{								\
868 		IRQ_CONNECT(DT_INST_IRQN(n),				\
869 			    DT_INST_IRQ(n, priority),			\
870 			    dma_mcux_lpc_irq_handler, DEVICE_DT_INST_GET(n), 0);\
871 									\
872 		irq_enable(DT_INST_IRQN(n));				\
873 	}
874 #define DMA_MCUX_LPC_IRQ_CFG_FUNC_INIT(n)				\
875 	.irq_config_func = dma_mcux_lpc_config_func_##n
876 #define DMA_MCUX_LPC_INIT_CFG(n)					\
877 	DMA_MCUX_LPC_DECLARE_CFG(n,					\
878 				 DMA_MCUX_LPC_IRQ_CFG_FUNC_INIT(n))
879 
880 #define DMA_MCUX_LPC_NUM_USED_CHANNELS(n)				\
881 	COND_CODE_0(CONFIG_DMA_MCUX_LPC_NUMBER_OF_CHANNELS_ALLOCATED,	\
882 		    (DT_INST_PROP(n, dma_channels)),			\
883 		    (MIN(CONFIG_DMA_MCUX_LPC_NUMBER_OF_CHANNELS_ALLOCATED,	\
884 			DT_INST_PROP(n, dma_channels))))
885 
886 #define DMA_MCUX_LPC_DECLARE_CFG(n, IRQ_FUNC_INIT)			\
887 static const struct dma_mcux_lpc_config dma_##n##_config = {		\
888 	.base = (DMA_Type *)DT_INST_REG_ADDR(n),			\
889 	.num_of_channels = DT_INST_PROP(n, dma_channels),		\
890 	.num_of_otrigs = DT_INST_PROP_OR(n, nxp_dma_num_of_otrigs, 0),			\
891 	.otrig_base_address = DT_INST_PROP_OR(n, nxp_dma_otrig_base_address, 0x0),	\
892 	.itrig_base_address = DT_INST_PROP_OR(n, nxp_dma_itrig_base_address, 0x0),	\
893 	IRQ_FUNC_INIT							\
894 }
895 
896 #define DMA_INIT(n) \
897 									\
898 	static const struct dma_mcux_lpc_config dma_##n##_config;	\
899 									\
900 	static struct channel_data dma_##n##_channel_data_arr		\
901 			[DMA_MCUX_LPC_NUM_USED_CHANNELS(n)] = {0};	\
902 									\
903 	static struct dma_otrig dma_##n##_otrig_arr			\
904 			[DT_INST_PROP_OR(n, nxp_dma_num_of_otrigs, 0)]; \
905 									\
906 	static int8_t							\
907 		dma_##n##_channel_index_arr				\
908 				[DT_INST_PROP(n, dma_channels)] = {0};	\
909 									\
910 	static struct dma_mcux_lpc_dma_data dma_data_##n = {		\
911 		.channel_data = dma_##n##_channel_data_arr,		\
912 		.channel_index = dma_##n##_channel_index_arr,		\
913 		.otrig_array = dma_##n##_otrig_arr,			\
914 	};								\
915 									\
916 	DEVICE_DT_INST_DEFINE(n,					\
917 			    &dma_mcux_lpc_init,				\
918 			    NULL,					\
919 			    &dma_data_##n, &dma_##n##_config,		\
920 			    PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY,	\
921 			    &dma_mcux_lpc_api);				\
922 									\
923 	DMA_MCUX_LPC_CONFIG_FUNC(n)					\
924 	DMA_MCUX_LPC_INIT_CFG(n);
925 
926 DT_INST_FOREACH_STATUS_OKAY(DMA_INIT)
927