1 /*
2  * Copyright 2024 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "dma_nxp_edma.h"
8 
9 #define EDMA_ACTIVE_TIMEOUT 50
10 
11 /* TODO list:
12  * 1) Support for requesting a specific channel.
13  * 2) Support for checking if DMA transfer is pending when attempting config. (?)
14  * 3) Support for error interrupt.
15  * 4) Support for error if buffer overflow/underrun.
16  * 5) Ideally, HALFMAJOR should be set on a per-channel basis not through a
17  * config. If not possible, this should be done through a DTS property. Also,
18  * maybe do the same for INTMAJOR IRQ.
19  */
20 
edma_isr(const void * parameter)21 static void edma_isr(const void *parameter)
22 {
23 	const struct edma_config *cfg;
24 	struct edma_data *data;
25 	struct edma_channel *chan;
26 	int ret;
27 	uint32_t update_size;
28 
29 	chan = (struct edma_channel *)parameter;
30 	cfg = chan->dev->config;
31 	data = chan->dev->data;
32 
33 	if (chan->state == CHAN_STATE_RELEASING || chan->state == CHAN_STATE_INIT) {
34 		/* skip, not safe to access channel register space */
35 		return;
36 	}
37 
38 	if (!EDMA_ChannelRegRead(data->hal_cfg, chan->id, EDMA_TCD_CH_INT)) {
39 		/* skip, interrupt was probably triggered by another channel */
40 		return;
41 	}
42 
43 	/* clear interrupt */
44 	EDMA_ChannelRegUpdate(data->hal_cfg, chan->id,
45 			      EDMA_TCD_CH_INT, EDMA_TCD_CH_INT_MASK, 0);
46 
47 	if (chan->cyclic_buffer) {
48 		update_size = chan->bsize;
49 
50 		if (IS_ENABLED(CONFIG_DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ)) {
51 			update_size = chan->bsize / 2;
52 		} else {
53 			update_size = chan->bsize;
54 		}
55 
56 		/* TODO: add support for error handling here */
57 		ret = EDMA_CHAN_PRODUCE_CONSUME_A(chan, update_size);
58 		if (ret < 0) {
59 			LOG_ERR("chan %d buffer overflow/underrun", chan->id);
60 		}
61 	}
62 
63 	/* TODO: are there any sanity checks we have to perform before invoking
64 	 * the registered callback?
65 	 */
66 	if (chan->cb) {
67 		chan->cb(chan->dev, chan->arg, chan->id, DMA_STATUS_COMPLETE);
68 	}
69 }
70 
lookup_channel(const struct device * dev,uint32_t chan_id)71 static struct edma_channel *lookup_channel(const struct device *dev,
72 					   uint32_t chan_id)
73 {
74 	struct edma_data *data;
75 	const struct edma_config *cfg;
76 	int i;
77 
78 	data = dev->data;
79 	cfg = dev->config;
80 
81 
82 	/* optimization: if dma-channels property is present then
83 	 * the channel data associated with the passed channel ID
84 	 * can be found at index chan_id in the array of channels.
85 	 */
86 	if (cfg->contiguous_channels) {
87 		/* check for index out of bounds */
88 		if (chan_id >= data->ctx.dma_channels) {
89 			return NULL;
90 		}
91 
92 		return &data->channels[chan_id];
93 	}
94 
95 	/* channels are passed through the valid-channels property.
96 	 * As such, since some channels may be missing we need to
97 	 * look through the entire channels array for an ID match.
98 	 */
99 	for (i = 0; i < data->ctx.dma_channels; i++) {
100 		if (data->channels[i].id == chan_id) {
101 			return &data->channels[i];
102 		}
103 	}
104 
105 	return NULL;
106 }
107 
edma_config(const struct device * dev,uint32_t chan_id,struct dma_config * dma_cfg)108 static int edma_config(const struct device *dev, uint32_t chan_id,
109 		       struct dma_config *dma_cfg)
110 {
111 	struct edma_data *data;
112 	const struct edma_config *cfg;
113 	struct edma_channel *chan;
114 	uint32_t transfer_type;
115 	int ret;
116 
117 	data = dev->data;
118 	cfg = dev->config;
119 
120 	if (!dma_cfg->head_block) {
121 		LOG_ERR("head block shouldn't be NULL");
122 		return -EINVAL;
123 	}
124 
125 	/* validate source data size (SSIZE) */
126 	if (!EDMA_TransferWidthIsValid(data->hal_cfg, dma_cfg->source_data_size)) {
127 		LOG_ERR("invalid source data size: %d",
128 			dma_cfg->source_data_size);
129 		return -EINVAL;
130 	}
131 
132 	/* validate destination data size (DSIZE) */
133 	if (!EDMA_TransferWidthIsValid(data->hal_cfg, dma_cfg->dest_data_size)) {
134 		LOG_ERR("invalid destination data size: %d",
135 			dma_cfg->dest_data_size);
136 		return -EINVAL;
137 	}
138 
139 	/* validate configured alignment */
140 	if (!EDMA_TransferWidthIsValid(data->hal_cfg, CONFIG_DMA_NXP_EDMA_ALIGN)) {
141 		LOG_ERR("configured alignment %d is invalid",
142 			CONFIG_DMA_NXP_EDMA_ALIGN);
143 		return -EINVAL;
144 	}
145 
146 	/* Scatter-Gather configurations currently not supported */
147 	if (dma_cfg->block_count != 1) {
148 		LOG_ERR("number of blocks %d not supported", dma_cfg->block_count);
149 		return -ENOTSUP;
150 	}
151 
152 	/* source address shouldn't be NULL */
153 	if (!dma_cfg->head_block->source_address) {
154 		LOG_ERR("source address cannot be NULL");
155 		return -EINVAL;
156 	}
157 
158 	/* destination address shouldn't be NULL */
159 	if (!dma_cfg->head_block->dest_address) {
160 		LOG_ERR("destination address cannot be NULL");
161 		return -EINVAL;
162 	}
163 
164 	/* check source address's (SADDR) alignment with respect to the data size (SSIZE)
165 	 *
166 	 * Failing to meet this condition will lead to the assertion of the SAE
167 	 * bit (see CHn_ES register).
168 	 *
169 	 * TODO: this will also restrict scenarios such as the following:
170 	 *	SADDR is 8B aligned and SSIZE is 16B. I've tested this
171 	 *	scenario and seems to raise no hardware errors (I'm assuming
172 	 *	because this doesn't break the 8B boundary of the 64-bit system
173 	 *	I tested it on). Is there a need to allow such a scenario?
174 	 */
175 	if (dma_cfg->head_block->source_address % dma_cfg->source_data_size) {
176 		LOG_ERR("source address 0x%x alignment doesn't match data size %d",
177 			dma_cfg->head_block->source_address,
178 			dma_cfg->source_data_size);
179 		return -EINVAL;
180 	}
181 
182 	/* check destination address's (DADDR) alignment with respect to the data size (DSIZE)
183 	 * Failing to meet this condition will lead to the assertion of the DAE
184 	 * bit (see CHn_ES register).
185 	 */
186 	if (dma_cfg->head_block->dest_address % dma_cfg->dest_data_size) {
187 		LOG_ERR("destination address 0x%x alignment doesn't match data size %d",
188 			dma_cfg->head_block->dest_address,
189 			dma_cfg->dest_data_size);
190 		return -EINVAL;
191 	}
192 
193 	/* source burst length should match destination burst length.
194 	 * This is because the burst length is the equivalent of NBYTES which
195 	 * is used for both the destination and the source.
196 	 */
197 	if (dma_cfg->source_burst_length !=
198 	    dma_cfg->dest_burst_length) {
199 		LOG_ERR("source burst length %d doesn't match destination burst length %d",
200 			dma_cfg->source_burst_length,
201 			dma_cfg->dest_burst_length);
202 		return -EINVAL;
203 	}
204 
205 	/* total number of bytes should be a multiple of NBYTES.
206 	 *
207 	 * This is needed because the EDMA engine performs transfers based
208 	 * on CITER (integer value) and NBYTES, thus it has no knowledge of
209 	 * the total transfer size. If the total transfer size is not a
210 	 * multiple of NBYTES then we'll end up with copying a wrong number
211 	 * of bytes (CITER = TOTAL_SIZE / BITER). This, of course, raises
212 	 * no error in the hardware but it's still wrong.
213 	 */
214 	if (dma_cfg->head_block->block_size % dma_cfg->source_burst_length) {
215 		LOG_ERR("block size %d should be a multiple of NBYTES %d",
216 			dma_cfg->head_block->block_size,
217 			dma_cfg->source_burst_length);
218 		return -EINVAL;
219 	}
220 
221 	/* check if NBYTES is a multiple of MAX(SSIZE, DSIZE).
222 	 *
223 	 * This stems from the fact that NBYTES needs to be a multiple
224 	 * of SSIZE AND DSIZE. If NBYTES is a multiple of MAX(SSIZE, DSIZE)
225 	 * then it will for sure satisfy the aforementioned condition (since
226 	 * SSIZE and DSIZE are powers of 2).
227 	 *
228 	 * Failing to meet this condition will lead to the assertion of the
229 	 * NCE bit (see CHn_ES register).
230 	 */
231 	if (dma_cfg->source_burst_length %
232 	    MAX(dma_cfg->source_data_size, dma_cfg->dest_data_size)) {
233 		LOG_ERR("NBYTES %d should be a multiple of MAX(SSIZE(%d), DSIZE(%d))",
234 			dma_cfg->source_burst_length,
235 			dma_cfg->source_data_size,
236 			dma_cfg->dest_data_size);
237 		return -EINVAL;
238 	}
239 
240 	/* fetch channel data */
241 	chan = lookup_channel(dev, chan_id);
242 	if (!chan) {
243 		LOG_ERR("channel ID %u is not valid", chan_id);
244 		return -EINVAL;
245 	}
246 
247 	/* save the block size for later usage in edma_reload */
248 	chan->bsize = dma_cfg->head_block->block_size;
249 
250 	if (dma_cfg->cyclic) {
251 		chan->cyclic_buffer = true;
252 
253 		chan->stat.read_position = 0;
254 		chan->stat.write_position = 0;
255 
256 		/* ASSUMPTION: for CONSUMER-type channels, the buffer from
257 		 * which the engine consumes should be full, while in the
258 		 * case of PRODUCER-type channels it should be empty.
259 		 */
260 		switch (dma_cfg->channel_direction) {
261 		case MEMORY_TO_PERIPHERAL:
262 			chan->type = CHAN_TYPE_CONSUMER;
263 			chan->stat.free = 0;
264 			chan->stat.pending_length = chan->bsize;
265 			break;
266 		case PERIPHERAL_TO_MEMORY:
267 			chan->type = CHAN_TYPE_PRODUCER;
268 			chan->stat.pending_length = 0;
269 			chan->stat.free = chan->bsize;
270 			break;
271 		default:
272 			LOG_ERR("unsupported transfer dir %d for cyclic mode",
273 				dma_cfg->channel_direction);
274 			return -ENOTSUP;
275 		}
276 	} else {
277 		chan->cyclic_buffer = false;
278 	}
279 
280 	/* check if transition to CONFIGURED is allowed */
281 	if (!channel_allows_transition(chan, CHAN_STATE_CONFIGURED)) {
282 		LOG_ERR("chan %d transition from %d to CONFIGURED not allowed",
283 			chan_id, chan->state);
284 		return -EPERM;
285 	}
286 
287 	ret = get_transfer_type(dma_cfg->channel_direction, &transfer_type);
288 	if (ret < 0) {
289 		return ret;
290 	}
291 
292 	chan->cb = dma_cfg->dma_callback;
293 	chan->arg = dma_cfg->user_data;
294 
295 	/* warning: this sets SOFF and DOFF to SSIZE and DSIZE which are POSITIVE. */
296 	ret = EDMA_ConfigureTransfer(data->hal_cfg, chan_id,
297 				     dma_cfg->head_block->source_address,
298 				     dma_cfg->head_block->dest_address,
299 				     dma_cfg->source_data_size,
300 				     dma_cfg->dest_data_size,
301 				     dma_cfg->source_burst_length,
302 				     dma_cfg->head_block->block_size,
303 				     transfer_type);
304 	if (ret < 0) {
305 		LOG_ERR("failed to configure transfer");
306 		return to_std_error(ret);
307 	}
308 
309 	/* TODO: channel MUX should be forced to 0 based on the previous state */
310 	if (EDMA_HAS_MUX(data->hal_cfg)) {
311 		ret = EDMA_SetChannelMux(data->hal_cfg, chan_id, dma_cfg->dma_slot);
312 		if (ret < 0) {
313 			LOG_ERR("failed to set channel MUX");
314 			return to_std_error(ret);
315 		}
316 	}
317 
318 	/* set SLAST and DLAST */
319 	ret = set_slast_dlast(dma_cfg, transfer_type, data, chan_id);
320 	if (ret < 0) {
321 		return ret;
322 	}
323 
324 	/* allow interrupting the CPU when a major cycle is completed.
325 	 *
326 	 * interesting note: only 1 major loop is performed per slave peripheral
327 	 * DMA request. For instance, if block_size = 768 and burst_size = 192
328 	 * we're going to get 4 transfers of 192 bytes. Each of these transfers
329 	 * translates to a DMA request made by the slave peripheral.
330 	 */
331 	EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
332 			      EDMA_TCD_CSR, EDMA_TCD_CSR_INTMAJOR_MASK, 0);
333 
334 	if (IS_ENABLED(CONFIG_DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ)) {
335 		/* if enabled through the above configuration, also
336 		 * allow the CPU to be interrupted when CITER = BITER / 2.
337 		 */
338 		EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CSR,
339 				      EDMA_TCD_CSR_INTHALF_MASK, 0);
340 	}
341 
342 	/* dump register status - for debugging purposes */
343 	edma_dump_channel_registers(data, chan_id);
344 
345 	chan->state = CHAN_STATE_CONFIGURED;
346 
347 	return 0;
348 }
349 
edma_get_status(const struct device * dev,uint32_t chan_id,struct dma_status * stat)350 static int edma_get_status(const struct device *dev, uint32_t chan_id,
351 			   struct dma_status *stat)
352 {
353 	struct edma_data *data;
354 	struct edma_channel *chan;
355 	uint32_t citer, biter, done;
356 	unsigned int key;
357 
358 	data = dev->data;
359 
360 	/* fetch channel data */
361 	chan = lookup_channel(dev, chan_id);
362 	if (!chan) {
363 		LOG_ERR("channel ID %u is not valid", chan_id);
364 		return -EINVAL;
365 	}
366 
367 	if (chan->cyclic_buffer) {
368 		key = irq_lock();
369 
370 		stat->free = chan->stat.free;
371 		stat->pending_length = chan->stat.pending_length;
372 
373 		irq_unlock(key);
374 	} else {
375 		/* note: no locking required here. The DMA interrupts
376 		 * have no effect over CITER and BITER.
377 		 */
378 		citer = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CITER);
379 		biter = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_BITER);
380 		done = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR) &
381 			EDMA_TCD_CH_CSR_DONE_MASK;
382 		if (done) {
383 			stat->free = chan->bsize;
384 			stat->pending_length = 0;
385 		} else {
386 			stat->free = (biter - citer) * (chan->bsize / biter);
387 			stat->pending_length = chan->bsize - stat->free;
388 		}
389 	}
390 
391 	LOG_DBG("free: %d, pending: %d", stat->free, stat->pending_length);
392 
393 	return 0;
394 }
395 
edma_suspend(const struct device * dev,uint32_t chan_id)396 static int edma_suspend(const struct device *dev, uint32_t chan_id)
397 {
398 	struct edma_data *data;
399 	const struct edma_config *cfg;
400 	struct edma_channel *chan;
401 
402 	data = dev->data;
403 	cfg = dev->config;
404 
405 	/* fetch channel data */
406 	chan = lookup_channel(dev, chan_id);
407 	if (!chan) {
408 		LOG_ERR("channel ID %u is not valid", chan_id);
409 		return -EINVAL;
410 	}
411 
412 	edma_dump_channel_registers(data, chan_id);
413 
414 	/* check if transition to SUSPENDED is allowed */
415 	if (!channel_allows_transition(chan, CHAN_STATE_SUSPENDED)) {
416 		LOG_ERR("chan %d transition from %d to SUSPENDED not allowed",
417 			chan_id, chan->state);
418 		return -EPERM;
419 	}
420 
421 	LOG_DBG("suspending channel %u", chan_id);
422 
423 	/* disable HW requests */
424 	EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
425 			      EDMA_TCD_CH_CSR, 0, EDMA_TCD_CH_CSR_ERQ_MASK);
426 
427 	chan->state = CHAN_STATE_SUSPENDED;
428 
429 	return 0;
430 }
431 
edma_stop(const struct device * dev,uint32_t chan_id)432 static int edma_stop(const struct device *dev, uint32_t chan_id)
433 {
434 	struct edma_data *data;
435 	const struct edma_config *cfg;
436 	struct edma_channel *chan;
437 	enum channel_state prev_state;
438 	int ret;
439 
440 	data = dev->data;
441 	cfg = dev->config;
442 
443 	/* fetch channel data */
444 	chan = lookup_channel(dev, chan_id);
445 	if (!chan) {
446 		LOG_ERR("channel ID %u is not valid", chan_id);
447 		return -EINVAL;
448 	}
449 
450 	prev_state = chan->state;
451 
452 	/* check if transition to STOPPED is allowed */
453 	if (!channel_allows_transition(chan, CHAN_STATE_STOPPED)) {
454 		LOG_ERR("chan %d transition from %d to STOPPED not allowed",
455 			chan_id, chan->state);
456 		return -EPERM;
457 	}
458 
459 	LOG_DBG("stopping channel %u", chan_id);
460 
461 	if (prev_state == CHAN_STATE_SUSPENDED) {
462 		/* if the channel has been suspended then there's
463 		 * no point in disabling the HW requests again. Just
464 		 * jump to the channel release operation.
465 		 */
466 		goto out_release_channel;
467 	}
468 
469 	/* disable HW requests */
470 	EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR, 0,
471 			      EDMA_TCD_CH_CSR_ERQ_MASK);
472 out_release_channel:
473 
474 	/* clear the channel MUX so that it can used by a different peripheral.
475 	 *
476 	 * note: because the channel is released during dma_stop() that means
477 	 * dma_start() can no longer be immediately called. This is because
478 	 * one needs to re-configure the channel MUX which can only be done
479 	 * through dma_config(). As such, if one intends to reuse the current
480 	 * configuration then please call dma_suspend() instead of dma_stop().
481 	 */
482 	if (EDMA_HAS_MUX(data->hal_cfg)) {
483 		ret = EDMA_SetChannelMux(data->hal_cfg, chan_id, 0);
484 		if (ret < 0) {
485 			LOG_ERR("failed to set channel MUX");
486 			return to_std_error(ret);
487 		}
488 	}
489 
490 	edma_dump_channel_registers(data, chan_id);
491 
492 	chan->state = CHAN_STATE_STOPPED;
493 
494 	return 0;
495 }
496 
edma_start(const struct device * dev,uint32_t chan_id)497 static int edma_start(const struct device *dev, uint32_t chan_id)
498 {
499 	struct edma_data *data;
500 	const struct edma_config *cfg;
501 	struct edma_channel *chan;
502 
503 	data = dev->data;
504 	cfg = dev->config;
505 
506 	/* fetch channel data */
507 	chan = lookup_channel(dev, chan_id);
508 	if (!chan) {
509 		LOG_ERR("channel ID %u is not valid", chan_id);
510 		return -EINVAL;
511 	}
512 
513 	/* check if transition to STARTED is allowed */
514 	if (!channel_allows_transition(chan, CHAN_STATE_STARTED)) {
515 		LOG_ERR("chan %d transition from %d to STARTED not allowed",
516 			chan_id, chan->state);
517 		return -EPERM;
518 	}
519 
520 	LOG_DBG("starting channel %u", chan_id);
521 
522 	/* enable HW requests */
523 	EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
524 			      EDMA_TCD_CH_CSR, EDMA_TCD_CH_CSR_ERQ_MASK, 0);
525 
526 	chan->state = CHAN_STATE_STARTED;
527 
528 	return 0;
529 }
530 
edma_reload(const struct device * dev,uint32_t chan_id,uint32_t src,uint32_t dst,size_t size)531 static int edma_reload(const struct device *dev, uint32_t chan_id, uint32_t src,
532 		       uint32_t dst, size_t size)
533 {
534 	struct edma_data *data;
535 	struct edma_channel *chan;
536 	int ret;
537 	unsigned int key;
538 
539 	data = dev->data;
540 
541 	/* fetch channel data */
542 	chan = lookup_channel(dev, chan_id);
543 	if (!chan) {
544 		LOG_ERR("channel ID %u is not valid", chan_id);
545 		return -EINVAL;
546 	}
547 
548 	/* channel needs to be started to allow reloading */
549 	if (chan->state != CHAN_STATE_STARTED) {
550 		LOG_ERR("reload is only supported on started channels");
551 		return -EINVAL;
552 	}
553 
554 	if (chan->cyclic_buffer) {
555 		key = irq_lock();
556 		ret = EDMA_CHAN_PRODUCE_CONSUME_B(chan, size);
557 		irq_unlock(key);
558 		if (ret < 0) {
559 			LOG_ERR("chan %d buffer overflow/underrun", chan_id);
560 			return ret;
561 		}
562 	}
563 
564 	return 0;
565 }
566 
edma_get_attribute(const struct device * dev,uint32_t type,uint32_t * val)567 static int edma_get_attribute(const struct device *dev, uint32_t type, uint32_t *val)
568 {
569 	switch (type) {
570 	case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
571 	case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
572 		*val = CONFIG_DMA_NXP_EDMA_ALIGN;
573 		break;
574 	case DMA_ATTR_MAX_BLOCK_COUNT:
575 		/* this is restricted to 1 because SG configurations are not supported */
576 		*val = 1;
577 		break;
578 	default:
579 		LOG_ERR("invalid attribute type: %d", type);
580 		return -EINVAL;
581 	}
582 
583 	return 0;
584 }
585 
edma_channel_filter(const struct device * dev,int chan_id,void * param)586 static bool edma_channel_filter(const struct device *dev, int chan_id, void *param)
587 {
588 	struct edma_channel *chan;
589 	int ret;
590 
591 	if (!param) {
592 		return false;
593 	}
594 
595 	if (*(int *)param != chan_id) {
596 		return false;
597 	}
598 
599 	chan = lookup_channel(dev, chan_id);
600 	if (!chan) {
601 		return false;
602 	}
603 
604 	if (chan->pd_dev) {
605 		ret = pm_device_runtime_get(chan->pd_dev);
606 		if (ret < 0) {
607 			LOG_ERR("failed to PM get channel %d PD dev: %d",
608 				chan_id, ret);
609 			return false;
610 		}
611 	}
612 
613 	irq_enable(chan->irq);
614 
615 	return true;
616 }
617 
edma_channel_release(const struct device * dev,uint32_t chan_id)618 static void edma_channel_release(const struct device *dev, uint32_t chan_id)
619 {
620 	struct edma_channel *chan;
621 	struct edma_data *data;
622 	int ret;
623 
624 	chan = lookup_channel(dev, chan_id);
625 	if (!chan) {
626 		return;
627 	}
628 
629 	data = dev->data;
630 
631 	if (!channel_allows_transition(chan, CHAN_STATE_RELEASING)) {
632 		LOG_ERR("chan %d transition from %d to RELEASING not allowed",
633 			chan_id, chan->state);
634 		return;
635 	}
636 
637 	/* channel needs to be INACTIVE before transitioning */
638 	if (!WAIT_FOR(!EDMA_CHAN_IS_ACTIVE(data, chan),
639 		      EDMA_ACTIVE_TIMEOUT, k_busy_wait(1))) {
640 		LOG_ERR("timed out while waiting for chan %d to become inactive",
641 			chan->id);
642 		return;
643 	}
644 
645 	/* start the process of disabling IRQ and PD */
646 	chan->state = CHAN_STATE_RELEASING;
647 
648 #ifdef CONFIG_NXP_IRQSTEER
649 	irq_disable(chan->irq);
650 #endif /* CONFIG_NXP_IRQSTEER */
651 
652 	if (chan->pd_dev) {
653 		ret = pm_device_runtime_put(chan->pd_dev);
654 		if (ret < 0) {
655 			LOG_ERR("failed to PM put channel %d PD dev: %d",
656 				chan_id, ret);
657 		}
658 	}
659 
660 	/* done, proceed with next state */
661 	chan->state = CHAN_STATE_INIT;
662 }
663 
664 static DEVICE_API(dma, edma_api) = {
665 	.reload = edma_reload,
666 	.config = edma_config,
667 	.start = edma_start,
668 	.stop = edma_stop,
669 	.suspend = edma_suspend,
670 	.resume = edma_start,
671 	.get_status = edma_get_status,
672 	.get_attribute = edma_get_attribute,
673 	.chan_filter = edma_channel_filter,
674 	.chan_release = edma_channel_release,
675 };
676 
edma_hal_cfg_get(const struct edma_config * cfg)677 static edma_config_t *edma_hal_cfg_get(const struct edma_config *cfg)
678 {
679 	int i;
680 
681 	for (i = 0; i < ARRAY_SIZE(s_edmaConfigs); i++) {
682 		if (cfg->regmap_phys == s_edmaConfigs[i].regmap) {
683 			return s_edmaConfigs + i;
684 		}
685 	}
686 
687 	return NULL;
688 }
689 
edma_init(const struct device * dev)690 static int edma_init(const struct device *dev)
691 {
692 	const struct edma_config *cfg;
693 	struct edma_data *data;
694 	mm_reg_t regmap;
695 
696 	data = dev->data;
697 	cfg = dev->config;
698 
699 	data->hal_cfg = edma_hal_cfg_get(cfg);
700 	if (!data->hal_cfg) {
701 		return -ENODEV;
702 	}
703 
704 	/* map instance MMIO */
705 	device_map(&regmap, cfg->regmap_phys, cfg->regmap_size, K_MEM_CACHE_NONE);
706 
707 	/* overwrite physical address set in the HAL configuration.
708 	 * We can down-cast the virtual address to a 32-bit address because
709 	 * we know we're working with 32-bit addresses only.
710 	 */
711 	data->hal_cfg->regmap = (uint32_t)POINTER_TO_UINT(regmap);
712 
713 	cfg->irq_config();
714 
715 	/* dma_request_channel() uses this variable to keep track of the
716 	 * available channels. As such, it needs to be initialized with NULL
717 	 * which signifies that all channels are initially available.
718 	 */
719 	data->channel_flags = ATOMIC_INIT(0);
720 	data->ctx.atomic = &data->channel_flags;
721 	data->ctx.dma_channels = data->hal_cfg->channels;
722 
723 	return 0;
724 }
725 
726 /* a few comments about the BUILD_ASSERT statements:
727  *	1) dma-channels and valid-channels should be mutually exclusive.
728  *	This means that you specify the one or the other. There's no real
729  *	need to have both of them.
730  *	2) Number of channels should match the number of interrupts for
731  *	said channels (TODO: what about error interrupts?)
732  *	3) The channel-mux property shouldn't be specified unless
733  *	the eDMA is MUX-capable (signaled via the EDMA_HAS_CHAN_MUX
734  *	configuration).
735  */
736 #define EDMA_INIT(inst)								\
737 										\
738 BUILD_ASSERT(!DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels) ||	\
739 	     !DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), valid_channels),	\
740 	     "dma_channels and valid_channels are mutually exclusive");		\
741 										\
742 BUILD_ASSERT(DT_INST_PROP_OR(inst, dma_channels, 0) ==				\
743 	     DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)) ||			\
744 	     DT_INST_PROP_LEN_OR(inst, valid_channels, 0) ==			\
745 	     DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)),				\
746 	     "number of interrupts needs to match number of channels");		\
747 										\
748 BUILD_ASSERT(DT_PROP_OR(DT_INST(inst, DT_DRV_COMPAT), hal_cfg_index, 0) <	\
749 	     ARRAY_SIZE(s_edmaConfigs),						\
750 	     "HAL configuration index out of bounds");				\
751 										\
752 static struct edma_channel channels_##inst[] = EDMA_CHANNEL_ARRAY_GET(inst);	\
753 										\
754 static void interrupt_config_function_##inst(void)				\
755 {										\
756 	EDMA_CONNECT_INTERRUPTS(inst);						\
757 }										\
758 										\
759 static struct edma_config edma_config_##inst = {				\
760 	.regmap_phys = DT_INST_REG_ADDR(inst),					\
761 	.regmap_size = DT_INST_REG_SIZE(inst),					\
762 	.irq_config = interrupt_config_function_##inst,				\
763 	.contiguous_channels = EDMA_CHANS_ARE_CONTIGUOUS(inst),			\
764 };										\
765 										\
766 static struct edma_data edma_data_##inst = {					\
767 	.channels = channels_##inst,						\
768 	.ctx.magic = DMA_MAGIC,							\
769 };										\
770 										\
771 DEVICE_DT_INST_DEFINE(inst, &edma_init, NULL,					\
772 		      &edma_data_##inst, &edma_config_##inst,			\
773 		      PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY,			\
774 		      &edma_api);						\
775 
776 DT_INST_FOREACH_STATUS_OKAY(EDMA_INIT);
777