1 /*
2  * Copyright 2024 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_
8 #define ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_
9 
10 #include <zephyr/device.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/drivers/dma.h>
13 #include <zephyr/logging/log.h>
14 
15 #include "fsl_edma_soc_rev2.h"
16 
17 LOG_MODULE_REGISTER(nxp_edma);
18 
19 /* used for driver binding */
20 #define DT_DRV_COMPAT nxp_edma
21 
22 /* workaround the fact that device_map() is not defined for SoCs with no MMU */
23 #ifndef DEVICE_MMIO_IS_IN_RAM
24 #define device_map(virt, phys, size, flags) *(virt) = (phys)
25 #endif /* DEVICE_MMIO_IS_IN_RAM */
26 
27 /* macros used to parse DTS properties */
28 
29 /* used in conjunction with LISTIFY which expects F to also take a variable
30  * number of arguments. Since IDENTITY doesn't do that we need to use a version
31  * of it which also takes a variable number of arguments.
32  */
33 #define IDENTITY_VARGS(V, ...) IDENTITY(V)
34 
35 /* used to generate an array of indexes for the channels */
36 #define _EDMA_CHANNEL_INDEX_ARRAY(inst)\
37 	LISTIFY(DT_INST_PROP_LEN_OR(inst, valid_channels, 0), IDENTITY_VARGS, (,))
38 
39 /* used to generate an array of indexes for the channels - this is different
40  * from _EDMA_CHANNEL_INDEX_ARRAY because the number of channels is passed
41  * explicitly through dma-channels so no need to deduce it from the length
42  * of the valid-channels property.
43  */
44 #define _EDMA_CHANNEL_INDEX_ARRAY_EXPLICIT(inst)\
45 	LISTIFY(DT_INST_PROP_OR(inst, dma_channels, 0), IDENTITY_VARGS, (,))
46 
47 /* used to generate an array of indexes for the interrupt */
48 #define _EDMA_INT_INDEX_ARRAY(inst)\
49 	LISTIFY(DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)), IDENTITY_VARGS, (,))
50 
51 /* used to register an ISR/arg pair. TODO: should we also use the priority? */
52 #define _EDMA_INT_CONNECT(idx, inst)				\
53 	IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, idx),		\
54 		    0, edma_isr,				\
55 		    &channels_##inst[idx], 0)
56 
57 /* used to declare a struct edma_channel by the non-explicit macro suite */
58 #define _EDMA_CHANNEL_DECLARE(idx, inst)				\
59 {									\
60 	.id = DT_INST_PROP_BY_IDX(inst, valid_channels, idx),		\
61 	.dev = DEVICE_DT_INST_GET(inst),				\
62 	.irq = DT_INST_IRQN_BY_IDX(inst, idx),				\
63 }
64 
65 /* used to declare a struct edma_channel by the explicit macro suite */
66 #define _EDMA_CHANNEL_DECLARE_EXPLICIT(idx, inst)			\
67 {									\
68 	.id = idx,							\
69 	.dev = DEVICE_DT_INST_GET(inst),				\
70 	.irq = DT_INST_IRQN_BY_IDX(inst, idx),				\
71 }
72 
73 /* used to create an array of channel IDs via the valid-channels property */
74 #define _EDMA_CHANNEL_ARRAY(inst)					\
75 	{ FOR_EACH_FIXED_ARG(_EDMA_CHANNEL_DECLARE, (,),		\
76 			     inst, _EDMA_CHANNEL_INDEX_ARRAY(inst)) }
77 
78 /* used to create an array of channel IDs via the dma-channels property */
79 #define _EDMA_CHANNEL_ARRAY_EXPLICIT(inst)				\
80 	{ FOR_EACH_FIXED_ARG(_EDMA_CHANNEL_DECLARE_EXPLICIT, (,), inst,	\
81 			     _EDMA_CHANNEL_INDEX_ARRAY_EXPLICIT(inst)) }
82 
83 /* used to construct the channel array based on the specified property:
84  * dma-channels or valid-channels.
85  */
86 #define EDMA_CHANNEL_ARRAY_GET(inst)							\
87 	COND_CODE_1(DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels),	\
88 		    (_EDMA_CHANNEL_ARRAY_EXPLICIT(inst)),				\
89 		    (_EDMA_CHANNEL_ARRAY(inst)))
90 
91 #define EDMA_HAL_CFG_GET(inst)								\
92 	COND_CODE_1(DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), hal_cfg_index),	\
93 		    (s_edmaConfigs[DT_INST_PROP(inst, hal_cfg_index)]),			\
94 		    (s_edmaConfigs[0]))
95 
96 /* used to register edma_isr for all specified interrupts */
97 #define EDMA_CONNECT_INTERRUPTS(inst)				\
98 	FOR_EACH_FIXED_ARG(_EDMA_INT_CONNECT, (;),		\
99 			   inst, _EDMA_INT_INDEX_ARRAY(inst))
100 
101 #define EDMA_CHANS_ARE_CONTIGUOUS(inst)\
102 	DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels)
103 
104 /* utility macros */
105 
106 /* a few words about EDMA_CHAN_PRODUCE_CONSUME_{A/B}:
107  *	- in the context of cyclic buffers we introduce
108  *	the concepts of consumer and producer channels.
109  *
110  *	- a consumer channel is a channel for which the
111  *	DMA copies data from a buffer, thus leading to
112  *	less data in said buffer (data is consumed with
113  *	each transfer).
114  *
115  *	- a producer channel is a channel for which the
116  *	DMA copies data into a buffer, thus leading to
117  *	more data in said buffer (data is produced with
118  *	each transfer).
119  *
120  *	- for consumer channels, each DMA interrupt will
121  *	signal that an amount of data has been consumed
122  *	from the buffer (half of the buffer size if
123  *	HALFMAJOR is enabled, the whole buffer otherwise).
124  *
125  *	- for producer channels, each DMA interrupt will
126  *	signal that an amount of data has been added
127  *	to the buffer.
128  *
129  *	- to signal this, the ISR uses EDMA_CHAN_PRODUCE_CONSUME_A
130  *	which will "consume" data from the buffer for
131  *	consumer channels and "produce" data for
132  *	producer channels.
133  *
134  *	- since the upper layers using this driver need
135  *	to let the EDMA driver know whenever they've produced
136  *	(in the case of consumer channels) or consumed
137  *	data (in the case of producer channels) they can
138  *	do so through the reload() function.
139  *
140  *	- reload() uses EDMA_CHAN_PRODUCE_CONSUME_B which
141  *	for consumer channels will "produce" data and
142  *	"consume" data for producer channels, thus letting
143  *	the driver know what action the upper layer has
144  *	performed (if the channel is a consumer it's only
145  *	natural that the upper layer will write/produce more
146  *	data to the buffer. The same rationale applies to
147  *	producer channels).
148  *
149  *	- EDMA_CHAN_PRODUCE_CONSUME_B is just the opposite
150  *	of EDMA_CHAN_PRODUCE_CONSUME_A. If one produces
151  *	data, the other will consume and vice-versa.
152  *
153  *	- all of this information is valid only in the
154  *	context of cyclic buffers. If this behaviour is
155  *	not enabled, querying the status will simply
156  *	resolve to querying CITER and BITER.
157  */
158 #define EDMA_CHAN_PRODUCE_CONSUME_A(chan, size)\
159 	((chan)->type == CHAN_TYPE_CONSUMER ?\
160 	 edma_chan_cyclic_consume(chan, size) :\
161 	 edma_chan_cyclic_produce(chan, size))
162 
163 #define EDMA_CHAN_PRODUCE_CONSUME_B(chan, size)\
164 	((chan)->type == CHAN_TYPE_CONSUMER ?\
165 	 edma_chan_cyclic_produce(chan, size) :\
166 	 edma_chan_cyclic_consume(chan, size))
167 
168 enum channel_type {
169 	CHAN_TYPE_CONSUMER = 0,
170 	CHAN_TYPE_PRODUCER,
171 };
172 
173 enum channel_state {
174 	CHAN_STATE_INIT = 0,
175 	CHAN_STATE_CONFIGURED,
176 	CHAN_STATE_STARTED,
177 	CHAN_STATE_STOPPED,
178 	CHAN_STATE_SUSPENDED,
179 };
180 
181 struct edma_channel {
182 	/* channel ID, needs to be the same as the hardware channel ID */
183 	uint32_t id;
184 	/* pointer to device representing the EDMA instance, used by edma_isr */
185 	const struct device *dev;
186 	/* current state of the channel */
187 	enum channel_state state;
188 	/* type of the channel (PRODUCER/CONSUMER) - only applicable to cyclic
189 	 * buffer configurations.
190 	 */
191 	enum channel_type type;
192 	/* argument passed to the user-defined DMA callback */
193 	void *arg;
194 	/* user-defined callback, called at the end of a channel's interrupt
195 	 * handling.
196 	 */
197 	dma_callback_t cb;
198 	/* INTID associated with the channel */
199 	int irq;
200 	/* the channel's status */
201 	struct dma_status stat;
202 	/* cyclic buffer size - currently, this is set to head_block's size */
203 	uint32_t bsize;
204 	/* set to true if the channel uses a cyclic buffer configuration */
205 	bool cyclic_buffer;
206 };
207 
208 struct edma_data {
209 	/* this needs to be the first member */
210 	struct dma_context ctx;
211 	mm_reg_t regmap;
212 	struct edma_channel *channels;
213 	atomic_t channel_flags;
214 	edma_config_t *hal_cfg;
215 };
216 
217 struct edma_config {
218 	uint32_t regmap_phys;
219 	uint32_t regmap_size;
220 	void (*irq_config)(void);
221 	/* true if channels are contiguous. The channels may not be contiguous
222 	 * if the valid-channels property is used instead of dma-channels. This
223 	 * is used to improve the time complexity of the channel lookup
224 	 * function.
225 	 */
226 	bool contiguous_channels;
227 };
228 
channel_change_state(struct edma_channel * chan,enum channel_state next)229 static inline int channel_change_state(struct edma_channel *chan,
230 				       enum channel_state next)
231 {
232 	enum channel_state prev = chan->state;
233 
234 	LOG_DBG("attempting to change state from %d to %d for channel %d", prev, next, chan->id);
235 
236 	/* validate transition */
237 	switch (prev) {
238 	case CHAN_STATE_INIT:
239 		if (next != CHAN_STATE_CONFIGURED) {
240 			return -EPERM;
241 		}
242 		break;
243 	case CHAN_STATE_CONFIGURED:
244 		if (next != CHAN_STATE_STARTED &&
245 		    next != CHAN_STATE_CONFIGURED) {
246 			return -EPERM;
247 		}
248 		break;
249 	case CHAN_STATE_STARTED:
250 		if (next != CHAN_STATE_STOPPED &&
251 		    next != CHAN_STATE_SUSPENDED) {
252 			return -EPERM;
253 		}
254 		break;
255 	case CHAN_STATE_STOPPED:
256 		if (next != CHAN_STATE_CONFIGURED) {
257 			return -EPERM;
258 		}
259 		break;
260 	case CHAN_STATE_SUSPENDED:
261 		if (next != CHAN_STATE_STARTED &&
262 		    next != CHAN_STATE_STOPPED) {
263 			return -EPERM;
264 		}
265 		break;
266 	default:
267 		LOG_ERR("invalid channel previous state: %d", prev);
268 		return -EINVAL;
269 	}
270 
271 	/* transition OK, proceed */
272 	chan->state = next;
273 
274 	return 0;
275 }
276 
get_transfer_type(enum dma_channel_direction dir,uint32_t * type)277 static inline int get_transfer_type(enum dma_channel_direction dir, uint32_t *type)
278 {
279 	switch (dir) {
280 	case MEMORY_TO_MEMORY:
281 		*type = kEDMA_TransferTypeM2M;
282 		break;
283 	case MEMORY_TO_PERIPHERAL:
284 		*type = kEDMA_TransferTypeM2P;
285 		break;
286 	case PERIPHERAL_TO_MEMORY:
287 		*type = kEDMA_TransferTypeP2M;
288 		break;
289 	default:
290 		LOG_ERR("invalid channel direction: %d", dir);
291 		return -EINVAL;
292 	}
293 
294 	return 0;
295 }
296 
data_size_is_valid(uint16_t size)297 static inline bool data_size_is_valid(uint16_t size)
298 {
299 	switch (size) {
300 	case 1:
301 	case 2:
302 	case 4:
303 	case 8:
304 	case 16:
305 	case 32:
306 	case 64:
307 		break;
308 	default:
309 		return false;
310 	}
311 
312 	return true;
313 }
314 
315 /* TODO: we may require setting the channel type through DTS
316  * or through struct dma_config. For now, we'll only support
317  * MEMORY_TO_PERIPHERAL and PERIPHERAL_TO_MEMORY directions
318  * and assume that these are bound to a certain channel type.
319  */
edma_set_channel_type(struct edma_channel * chan,enum dma_channel_direction dir)320 static inline int edma_set_channel_type(struct edma_channel *chan,
321 					enum dma_channel_direction dir)
322 {
323 	switch (dir) {
324 	case MEMORY_TO_PERIPHERAL:
325 		chan->type = CHAN_TYPE_CONSUMER;
326 		break;
327 	case PERIPHERAL_TO_MEMORY:
328 		chan->type = CHAN_TYPE_PRODUCER;
329 		break;
330 	default:
331 		LOG_ERR("unsupported transfer direction: %d", dir);
332 		return -ENOTSUP;
333 	}
334 
335 	return 0;
336 }
337 
338 /* this function is used in cyclic buffer configurations. What it does
339  * is it updates the channel's read position based on the number of
340  * bytes requested. If the number of bytes that's being read is higher
341  * than the number of bytes available in the buffer (pending_length)
342  * this will lead to an error. The main point of this check is to
343  * provide a way for the user to determine if data is consumed at a
344  * higher rate than it is being produced.
345  *
346  * This function is used in edma_isr() for CONSUMER channels to mark
347  * that data has been consumed (i.e: data has been transferred to the
348  * destination) (this is done via EDMA_CHAN_PRODUCE_CONSUME_A that's
349  * called in edma_isr()). For producer channels, this function is used
350  * in edma_reload() to mark the fact that the user of the EDMA driver
351  * has consumed data.
352  */
edma_chan_cyclic_consume(struct edma_channel * chan,uint32_t bytes)353 static inline int edma_chan_cyclic_consume(struct edma_channel *chan,
354 					   uint32_t bytes)
355 {
356 	if (bytes > chan->stat.pending_length) {
357 		return -EINVAL;
358 	}
359 
360 	chan->stat.read_position =
361 		(chan->stat.read_position + bytes) % chan->bsize;
362 
363 	if (chan->stat.read_position > chan->stat.write_position) {
364 		chan->stat.free = chan->stat.read_position -
365 			chan->stat.write_position;
366 	} else if (chan->stat.read_position == chan->stat.write_position) {
367 		chan->stat.free = chan->bsize;
368 	} else {
369 		chan->stat.free = chan->bsize -
370 			(chan->stat.write_position - chan->stat.read_position);
371 	}
372 
373 	chan->stat.pending_length = chan->bsize - chan->stat.free;
374 
375 	return 0;
376 }
377 
378 /* this function is used in cyclic buffer configurations. What it does
379  * is it updates the channel's write position based on the number of
380  * bytes requested. If the number of bytes that's being written is higher
381  * than the number of free bytes in the buffer this will lead to an error.
382  * The main point of this check is to provide a way for the user to determine
383  * if data is produced at a higher rate than it is being consumed.
384  *
385  * This function is used in edma_isr() for PRODUCER channels to mark
386  * that data has been produced (i.e: data has been transferred to the
387  * destination) (this is done via EDMA_CHAN_PRODUCE_CONSUME_A that's
388  * called in edma_isr()). For consumer channels, this function is used
389  * in edma_reload() to mark the fact that the user of the EDMA driver
390  * has produced data.
391  */
edma_chan_cyclic_produce(struct edma_channel * chan,uint32_t bytes)392 static inline int edma_chan_cyclic_produce(struct edma_channel *chan,
393 					   uint32_t bytes)
394 {
395 	if (bytes > chan->stat.free) {
396 		return -EINVAL;
397 	}
398 
399 	chan->stat.write_position =
400 		(chan->stat.write_position + bytes) % chan->bsize;
401 
402 	if (chan->stat.write_position > chan->stat.read_position) {
403 		chan->stat.pending_length = chan->stat.write_position -
404 			chan->stat.read_position;
405 	} else if (chan->stat.write_position == chan->stat.read_position) {
406 		chan->stat.pending_length = chan->bsize;
407 	} else {
408 		chan->stat.pending_length = chan->bsize -
409 			(chan->stat.read_position - chan->stat.write_position);
410 	}
411 
412 	chan->stat.free = chan->bsize - chan->stat.pending_length;
413 
414 	return 0;
415 }
416 
edma_dump_channel_registers(struct edma_data * data,uint32_t chan_id)417 static inline void edma_dump_channel_registers(struct edma_data *data,
418 					       uint32_t chan_id)
419 {
420 	LOG_DBG("dumping channel data for channel %d", chan_id);
421 
422 	LOG_DBG("CH_CSR: 0x%x",
423 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR));
424 	LOG_DBG("CH_ES: 0x%x",
425 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_ES));
426 	LOG_DBG("CH_INT: 0x%x",
427 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_INT));
428 	LOG_DBG("CH_SBR: 0x%x",
429 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_SBR));
430 	LOG_DBG("CH_PRI: 0x%x",
431 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_PRI));
432 
433 	if (EDMA_HAS_MUX(data->hal_cfg)) {
434 		LOG_DBG("CH_MUX: 0x%x",
435 			EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_MUX));
436 	}
437 
438 	LOG_DBG("TCD_SADDR: 0x%x",
439 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SADDR));
440 	LOG_DBG("TCD_SOFF: 0x%x",
441 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SOFF));
442 	LOG_DBG("TCD_ATTR: 0x%x",
443 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_ATTR));
444 	LOG_DBG("TCD_NBYTES: 0x%x",
445 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_NBYTES));
446 	LOG_DBG("TCD_SLAST_SDA: 0x%x",
447 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA));
448 	LOG_DBG("TCD_DADDR: 0x%x",
449 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DADDR));
450 	LOG_DBG("TCD_DOFF: 0x%x",
451 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DOFF));
452 	LOG_DBG("TCD_CITER: 0x%x",
453 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CITER));
454 	LOG_DBG("TCD_DLAST_SGA: 0x%x",
455 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA));
456 	LOG_DBG("TCD_CSR: 0x%x",
457 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CSR));
458 	LOG_DBG("TCD_BITER: 0x%x",
459 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_BITER));
460 }
461 
set_slast_dlast(struct dma_config * dma_cfg,uint32_t transfer_type,struct edma_data * data,uint32_t chan_id)462 static inline int set_slast_dlast(struct dma_config *dma_cfg,
463 				  uint32_t transfer_type,
464 				  struct edma_data *data,
465 				  uint32_t chan_id)
466 {
467 	int32_t slast, dlast;
468 
469 	if (transfer_type == kEDMA_TransferTypeP2M) {
470 		slast = 0;
471 	} else {
472 		switch (dma_cfg->head_block->source_addr_adj) {
473 		case DMA_ADDR_ADJ_INCREMENT:
474 			slast = (int32_t)dma_cfg->head_block->block_size;
475 			break;
476 		case DMA_ADDR_ADJ_DECREMENT:
477 			slast = (-1) * (int32_t)dma_cfg->head_block->block_size;
478 			break;
479 		default:
480 			LOG_ERR("unsupported SADDR adjustment: %d",
481 				dma_cfg->head_block->source_addr_adj);
482 			return -EINVAL;
483 		}
484 	}
485 
486 	if (transfer_type == kEDMA_TransferTypeM2P) {
487 		dlast = 0;
488 	} else {
489 		switch (dma_cfg->head_block->dest_addr_adj) {
490 		case DMA_ADDR_ADJ_INCREMENT:
491 			dlast = (int32_t)dma_cfg->head_block->block_size;
492 			break;
493 		case DMA_ADDR_ADJ_DECREMENT:
494 			dlast = (-1) * (int32_t)dma_cfg->head_block->block_size;
495 			break;
496 		default:
497 			LOG_ERR("unsupported DADDR adjustment: %d",
498 				dma_cfg->head_block->dest_addr_adj);
499 			return -EINVAL;
500 		}
501 	}
502 
503 	LOG_DBG("attempting to commit SLAST %d", slast);
504 	LOG_DBG("attempting to commit DLAST %d", dlast);
505 
506 	/* commit configuration */
507 	EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA, slast);
508 	EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA, dlast);
509 
510 	return 0;
511 }
512 
513 /* the NXP HAL EDMA driver uses some custom return values
514  * that need to be converted to standard error codes. This function
515  * performs exactly this translation.
516  */
to_std_error(int edma_err)517 static inline int to_std_error(int edma_err)
518 {
519 	switch (edma_err) {
520 	case kStatus_EDMA_InvalidConfiguration:
521 	case kStatus_InvalidArgument:
522 		return -EINVAL;
523 	case kStatus_Busy:
524 		return -EBUSY;
525 	default:
526 		LOG_ERR("unknown EDMA error code: %d", edma_err);
527 		return -EINVAL;
528 	}
529 }
530 
531 #endif /* ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_ */
532