1 /*
2  * Copyright (c) 2022 Andriy Gelman
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT infineon_xmc4xxx_dma
8 
9 #include <soc.h>
10 #include <stdint.h>
11 #include <xmc_dma.h>
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/dma.h>
14 #include <zephyr/dt-bindings/dma/infineon-xmc4xxx-dma.h>
15 #include <zephyr/irq.h>
16 
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_REGISTER(dma_xmc4xxx, CONFIG_DMA_LOG_LEVEL);
19 
20 #define MAX_PRIORITY	  7
21 #define DMA_MAX_BLOCK_LEN 4095
22 #define DLR_LINE_UNSET	  0xff
23 
24 #define DLR_SRSEL_RS_BITSIZE 4
25 #define DLR_SRSEL_RS_MSK     0xf
26 
27 #define ALL_EVENTS                                                                                 \
28 	(XMC_DMA_CH_EVENT_TRANSFER_COMPLETE | XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE |           \
29 	 XMC_DMA_CH_EVENT_SRC_TRANSACTION_COMPLETE | XMC_DMA_CH_EVENT_DST_TRANSACTION_COMPLETE |   \
30 	 XMC_DMA_CH_EVENT_ERROR)
31 
32 struct dma_xmc4xxx_channel {
33 	dma_callback_t cb;
34 	void *user_data;
35 	uint16_t block_ts;
36 	uint8_t source_data_size;
37 	uint8_t dlr_line;
38 };
39 
40 struct dma_xmc4xxx_config {
41 	XMC_DMA_t *dma;
42 	void (*irq_configure)(void);
43 };
44 
45 struct dma_xmc4xxx_data {
46 	struct dma_context ctx;
47 	struct dma_xmc4xxx_channel *channels;
48 };
49 
50 #define HANDLE_EVENT(event_test, get_channels_event, ret)                                  \
51 do {                                                                                       \
52 	if (event & (XMC_DMA_CH_##event_test)) {                                           \
53 		uint32_t channels_event = get_channels_event(dma);                         \
54 		int channel = find_lsb_set(channels_event) - 1;                            \
55 		struct dma_xmc4xxx_channel *dma_channel;                                   \
56 											   \
57 		__ASSERT_NO_MSG(channel >= 0);                                             \
58 		dma_channel = &dev_data->channels[channel];                                \
59 		/* Event has to be cleared before callback. The callback may call */       \
60 		/* dma_start() and re-enable the event */                                  \
61 		XMC_DMA_CH_ClearEventStatus(dma, channel, XMC_DMA_CH_##event_test);        \
62 		if (dma_channel->cb) {                                                     \
63 			dma_channel->cb(dev, dma_channel->user_data, channel, (ret));      \
64 		}                                                                          \
65 }                                                                                          \
66 } while (0)
67 
68 /* Isr is level triggered, so we don't have to loop over all the channels */
69 /* in a single call */
dma_xmc4xxx_isr(const struct device * dev)70 static void dma_xmc4xxx_isr(const struct device *dev)
71 {
72 	struct dma_xmc4xxx_data *dev_data = dev->data;
73 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
74 	int num_dma_channels = dev_data->ctx.dma_channels;
75 	XMC_DMA_t *dma = dev_cfg->dma;
76 	uint32_t event;
77 	uint32_t sr_overruns;
78 
79 	/* There are two types of possible DMA error events: */
80 	/* 1. Error response from AHB slave on the HRESP bus during DMA transfer. */
81 	/*    Treat this as EPERM error. */
82 	/* 2. Service request overruns on the DLR line. */
83 	/*    Treat this EIO error. */
84 
85 	event = XMC_DMA_GetEventStatus(dma);
86 	HANDLE_EVENT(EVENT_ERROR, XMC_DMA_GetChannelsErrorStatus, -EPERM);
87 	HANDLE_EVENT(EVENT_BLOCK_TRANSFER_COMPLETE, XMC_DMA_GetChannelsBlockCompleteStatus, 0);
88 	HANDLE_EVENT(EVENT_TRANSFER_COMPLETE, XMC_DMA_GetChannelsTransferCompleteStatus, 0);
89 
90 	sr_overruns = DLR->OVRSTAT;
91 
92 	if (sr_overruns == 0) {
93 		return;
94 	}
95 
96 	/* clear the overruns */
97 	DLR->OVRCLR = sr_overruns;
98 
99 	/* notify about overruns */
100 	for (int i = 0; i < num_dma_channels; i++) {
101 		struct dma_xmc4xxx_channel *dma_channel;
102 
103 		dma_channel = &dev_data->channels[i];
104 		if (dma_channel->cb && dma_channel->dlr_line != DLR_LINE_UNSET &&
105 		    sr_overruns & BIT(dma_channel->dlr_line)) {
106 
107 			LOG_ERR("Overruns detected on channel %d", i);
108 			dma_channel->cb(dev, dma_channel->user_data, i, -EIO);
109 
110 			/* From XMC4700/4800 reference documentation - Section 4.4.1 */
111 			/* Once the overrun condition is entered the user can clear the */
112 			/* overrun status bits by writing to the DLR_OVRCLR register. */
113 			/* Additionally the pending request must be reset by successively */
114 			/* disabling and enabling the respective line. */
115 			DLR->LNEN &= ~BIT(dma_channel->dlr_line);
116 			DLR->LNEN |= BIT(dma_channel->dlr_line);
117 		}
118 	}
119 }
120 
dma_xmc4xxx_config(const struct device * dev,uint32_t channel,struct dma_config * config)121 static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct dma_config *config)
122 {
123 	struct dma_xmc4xxx_data *dev_data = dev->data;
124 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
125 	struct dma_block_config *block = config->head_block;
126 	XMC_DMA_t *dma = dev_cfg->dma;
127 	uint8_t dlr_line = DLR_LINE_UNSET;
128 
129 	if (channel >= dev_data->ctx.dma_channels) {
130 		LOG_ERR("Invalid channel number");
131 		return -EINVAL;
132 	}
133 
134 	if (config->channel_priority > MAX_PRIORITY) {
135 		LOG_ERR("Invalid priority");
136 		return -EINVAL;
137 	}
138 
139 	if (config->source_chaining_en || config->dest_chaining_en) {
140 		LOG_ERR("Channel chaining is not supported");
141 		return -EINVAL;
142 	}
143 
144 	if (config->channel_direction != MEMORY_TO_MEMORY &&
145 	    config->channel_direction != MEMORY_TO_PERIPHERAL &&
146 	    config->channel_direction != PERIPHERAL_TO_MEMORY) {
147 		LOG_ERR("Unsupported channel direction");
148 		return -EINVAL;
149 	}
150 
151 	if (config->block_count != 1) {
152 		LOG_ERR("Invalid block count");
153 		return -EINVAL;
154 	}
155 
156 	if (block->source_gather_en || block->dest_scatter_en) {
157 		if (dma != XMC_DMA0 || channel >= 2) {
158 			LOG_ERR("Gather/scatter only supported on DMA0 on ch0 and ch1");
159 			return -EINVAL;
160 		}
161 	}
162 
163 	if (config->dest_data_size != 1 && config->dest_data_size != 2 &&
164 	    config->dest_data_size != 4) {
165 		LOG_ERR("Invalid dest size, Only 1,2,4 bytes supported");
166 		return -EINVAL;
167 	}
168 
169 	if (config->source_data_size != 1 && config->source_data_size != 2 &&
170 	    config->source_data_size != 4) {
171 		LOG_ERR("Invalid source size, Only 1,2,4 bytes supported");
172 		return -EINVAL;
173 	}
174 
175 	if (config->source_burst_length != 1 && config->source_burst_length != 4 &&
176 	    config->source_burst_length != 8) {
177 		LOG_ERR("Invalid src burst length (data size units). Only 1,4,8 units supported");
178 		return -EINVAL;
179 	}
180 
181 	if (config->dest_burst_length != 1 && config->dest_burst_length != 4 &&
182 	    config->dest_burst_length != 8) {
183 		LOG_ERR("Invalid dest burst length (data size units). Only 1,4,8 units supported");
184 		return -EINVAL;
185 	}
186 
187 	if (block->block_size / config->source_data_size > DMA_MAX_BLOCK_LEN) {
188 		LOG_ERR("Block transactions must be <= 4095");
189 		return -EINVAL;
190 	}
191 
192 	if (XMC_DMA_CH_IsEnabled(dma, channel)) {
193 		LOG_ERR("Channel is still active");
194 		return -EINVAL;
195 	}
196 
197 	XMC_DMA_CH_ClearEventStatus(dma, channel, ALL_EVENTS);
198 
199 	/* check dma slot number */
200 	dma->CH[channel].SAR = block->source_address;
201 	dma->CH[channel].DAR = block->dest_address;
202 	dma->CH[channel].LLP = 0;
203 
204 	/* set number of transactions */
205 	dma->CH[channel].CTLH = block->block_size / config->source_data_size;
206 	/* set priority and software handshaking for src/dst. if hardware hankshaking is used */
207 	/* it will be enabled later in the code */
208 	dma->CH[channel].CFGL = (config->channel_priority << GPDMA0_CH_CFGL_CH_PRIOR_Pos) |
209 				GPDMA0_CH_CFGL_HS_SEL_SRC_Msk | GPDMA0_CH_CFGL_HS_SEL_DST_Msk;
210 
211 	dma->CH[channel].CTLL = config->dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
212 				config->source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
213 				block->dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
214 				block->source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
215 				config->dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
216 				config->source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
217 				BIT(GPDMA0_CH_CTLL_INT_EN_Pos);
218 
219 	if (config->channel_direction == MEMORY_TO_PERIPHERAL ||
220 	    config->channel_direction == PERIPHERAL_TO_MEMORY) {
221 		uint8_t request_source = XMC4XXX_DMA_GET_REQUEST_SOURCE(config->dma_slot);
222 		uint8_t dlr_line_reg = XMC4XXX_DMA_GET_LINE(config->dma_slot);
223 
224 		dlr_line = dlr_line_reg;
225 		if (dma == XMC_DMA0 && dlr_line > 7) {
226 			LOG_ERR("Unsupported request line %d for DMA0."
227 					"Should be in range [0,7]", dlr_line);
228 			return -EINVAL;
229 		}
230 
231 		if (dma == XMC_DMA1 && (dlr_line < 8 || dlr_line > 11)) {
232 			LOG_ERR("Unsupported request line %d for DMA1."
233 					"Should be in range [8,11]", dlr_line);
234 			return -EINVAL;
235 		}
236 
237 		/* clear any overruns */
238 		DLR->OVRCLR = BIT(dlr_line);
239 		/* enable the dma line */
240 		DLR->LNEN &= ~BIT(dlr_line);
241 		DLR->LNEN |= BIT(dlr_line);
242 
243 		/* connect DMA Line to SR */
244 		if (dma == XMC_DMA0) {
245 			DLR->SRSEL0 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
246 			DLR->SRSEL0 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
247 		}
248 
249 		if (dma == XMC_DMA1) {
250 			dlr_line_reg -= 8;
251 			DLR->SRSEL1 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
252 			DLR->SRSEL1 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
253 		}
254 
255 		/* connect DMA channel to DMA line */
256 		if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
257 			dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_DEST_PER_Pos) | 4;
258 			dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_DST_Pos);
259 			dma->CH[channel].CTLL |= 1 << GPDMA0_CH_CTLL_TT_FC_Pos;
260 		}
261 
262 		if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
263 			dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_SRC_PER_Pos) | 4;
264 			dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_SRC_Pos);
265 			dma->CH[channel].CTLL |= 2 << GPDMA0_CH_CTLL_TT_FC_Pos;
266 		}
267 	}
268 
269 	if (block->source_gather_en) {
270 		dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos);
271 		/* truncate if we are out of range */
272 		dma->CH[channel].SGR = (block->source_gather_interval & GPDMA0_CH_SGR_SGI_Msk) |
273 				       block->source_gather_count << GPDMA0_CH_SGR_SGC_Pos;
274 	}
275 
276 	if (block->dest_scatter_en) {
277 		dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos);
278 		/* truncate if we are out of range */
279 		dma->CH[channel].DSR = (block->dest_scatter_interval & GPDMA0_CH_DSR_DSI_Msk) |
280 				       block->dest_scatter_count << GPDMA0_CH_DSR_DSC_Pos;
281 	}
282 
283 	dev_data->channels[channel].cb = config->dma_callback;
284 	dev_data->channels[channel].user_data = config->user_data;
285 	dev_data->channels[channel].block_ts = block->block_size / config->source_data_size;
286 	dev_data->channels[channel].source_data_size = config->source_data_size;
287 	dev_data->channels[channel].dlr_line = dlr_line;
288 
289 	XMC_DMA_CH_DisableEvent(dma, channel, ALL_EVENTS);
290 	XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_TRANSFER_COMPLETE);
291 
292 	/* trigger enable on block transfer complete */
293 	if (config->complete_callback_en) {
294 		XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE);
295 	}
296 
297 	if (config->error_callback_en) {
298 		XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_ERROR);
299 	}
300 
301 	LOG_DBG("Configured channel %d for %08X to %08X (%u)", channel, block->source_address,
302 		block->dest_address, block->block_size);
303 
304 	return 0;
305 }
306 
dma_xmc4xxx_start(const struct device * dev,uint32_t channel)307 static int dma_xmc4xxx_start(const struct device *dev, uint32_t channel)
308 {
309 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
310 
311 	LOG_DBG("Starting channel %d", channel);
312 	XMC_DMA_CH_Enable(dev_cfg->dma, channel);
313 	return 0;
314 }
315 
dma_xmc4xxx_stop(const struct device * dev,uint32_t channel)316 static int dma_xmc4xxx_stop(const struct device *dev, uint32_t channel)
317 {
318 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
319 	struct dma_xmc4xxx_data *dev_data = dev->data;
320 	struct dma_xmc4xxx_channel *dma_channel;
321 	XMC_DMA_t *dma = dev_cfg->dma;
322 
323 	dma_channel = &dev_data->channels[channel];
324 	XMC_DMA_CH_Suspend(dma, channel);
325 
326 	/* wait until ongoing transfer finishes */
327 	while (XMC_DMA_CH_IsEnabled(dma, channel) &&
328 	      (dma->CH[channel].CFGL & GPDMA0_CH_CFGL_FIFO_EMPTY_Msk) == 0) {
329 	}
330 
331 	/* disconnect DLR line to stop overuns */
332 	if (dma_channel->dlr_line != DLR_LINE_UNSET) {
333 		DLR->LNEN &= ~BIT(dma_channel->dlr_line);
334 	}
335 
336 	dma_channel->dlr_line = DLR_LINE_UNSET;
337 	dma_channel->cb = NULL;
338 
339 	XMC_DMA_CH_Disable(dma, channel);
340 	return 0;
341 }
342 
dma_xmc4xxx_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)343 static int dma_xmc4xxx_reload(const struct device *dev, uint32_t channel, uint32_t src,
344 			      uint32_t dst, size_t size)
345 {
346 	struct dma_xmc4xxx_data *dev_data = dev->data;
347 	size_t block_ts;
348 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
349 	XMC_DMA_t *dma = dev_cfg->dma;
350 	struct dma_xmc4xxx_channel *dma_channel;
351 
352 	if (channel >= dev_data->ctx.dma_channels) {
353 		LOG_ERR("Invalid channel number");
354 		return -EINVAL;
355 	}
356 
357 	if (XMC_DMA_CH_IsEnabled(dma, channel)) {
358 		LOG_ERR("Channel is still active");
359 		return -EINVAL;
360 	}
361 
362 	dma_channel = &dev_data->channels[channel];
363 	block_ts = size / dma_channel->source_data_size;
364 	if (block_ts > DMA_MAX_BLOCK_LEN) {
365 		LOG_ERR("Block transactions must be <= 4095");
366 		return -EINVAL;
367 	}
368 	dma_channel->block_ts = block_ts;
369 
370 	/* do we need to clear any errors */
371 	dma->CH[channel].SAR = src;
372 	dma->CH[channel].DAR = dst;
373 	dma->CH[channel].CTLH = block_ts;
374 
375 	return 0;
376 }
377 
dma_xmc4xxx_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)378 static int dma_xmc4xxx_get_status(const struct device *dev, uint32_t channel,
379 				  struct dma_status *stat)
380 {
381 	struct dma_xmc4xxx_data *dev_data = dev->data;
382 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
383 	XMC_DMA_t *dma = dev_cfg->dma;
384 	struct dma_xmc4xxx_channel *dma_channel;
385 
386 	if (channel >= dev_data->ctx.dma_channels) {
387 		LOG_ERR("Invalid channel number");
388 		return -EINVAL;
389 	}
390 	dma_channel = &dev_data->channels[channel];
391 
392 	stat->busy = XMC_DMA_CH_IsEnabled(dma, channel);
393 
394 	stat->pending_length  = dma_channel->block_ts - XMC_DMA_CH_GetTransferredData(dma, channel);
395 	stat->pending_length *= dma_channel->source_data_size;
396 	/* stat->dir and other remaining fields are not set. They are are not */
397 	/* useful for xmc4xxx peripheral drivers. */
398 
399 	return 0;
400 }
401 
dma_xmc4xxx_chan_filter(const struct device * dev,int channel,void * filter_param)402 static bool dma_xmc4xxx_chan_filter(const struct device *dev, int channel, void *filter_param)
403 {
404 	uint32_t requested_channel;
405 
406 	if (!filter_param) {
407 		return true;
408 	}
409 
410 	requested_channel = *(uint32_t *)filter_param;
411 
412 	if (channel == requested_channel) {
413 		return true;
414 	}
415 
416 	return false;
417 }
418 
dma_xmc4xxx_suspend(const struct device * dev,uint32_t channel)419 static int dma_xmc4xxx_suspend(const struct device *dev, uint32_t channel)
420 {
421 	struct dma_xmc4xxx_data *dev_data = dev->data;
422 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
423 	XMC_DMA_t *dma = dev_cfg->dma;
424 
425 	if (channel >= dev_data->ctx.dma_channels) {
426 		LOG_ERR("Invalid channel number");
427 		return -EINVAL;
428 	}
429 
430 	XMC_DMA_CH_Suspend(dma, channel);
431 	return 0;
432 }
433 
dma_xmc4xxx_resume(const struct device * dev,uint32_t channel)434 static int dma_xmc4xxx_resume(const struct device *dev, uint32_t channel)
435 {
436 	struct dma_xmc4xxx_data *dev_data = dev->data;
437 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
438 	XMC_DMA_t *dma = dev_cfg->dma;
439 
440 	if (channel >= dev_data->ctx.dma_channels) {
441 		LOG_ERR("Invalid channel number");
442 		return -EINVAL;
443 	}
444 
445 	XMC_DMA_CH_Resume(dma, channel);
446 	return 0;
447 }
448 
dma_xmc4xxx_init(const struct device * dev)449 static int dma_xmc4xxx_init(const struct device *dev)
450 {
451 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
452 
453 	XMC_DMA_Enable(dev_cfg->dma);
454 	dev_cfg->irq_configure();
455 	return 0;
456 }
457 
458 static const struct dma_driver_api dma_xmc4xxx_driver_api = {
459 	.config = dma_xmc4xxx_config,
460 	.reload = dma_xmc4xxx_reload,
461 	.start = dma_xmc4xxx_start,
462 	.stop = dma_xmc4xxx_stop,
463 	.get_status = dma_xmc4xxx_get_status,
464 	.chan_filter = dma_xmc4xxx_chan_filter,
465 	.suspend = dma_xmc4xxx_suspend,
466 	.resume = dma_xmc4xxx_resume,
467 };
468 
469 #define XMC4XXX_DMA_INIT(inst)                                                  \
470 	static void dma_xmc4xxx##inst##_irq_configure(void)                     \
471 	{                                                                       \
472 		IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, 0, irq),                   \
473 			    DT_INST_IRQ_BY_IDX(inst, 0, priority),              \
474 			    dma_xmc4xxx_isr,                                    \
475 			    DEVICE_DT_INST_GET(inst), 0);                       \
476 		irq_enable(DT_INST_IRQ_BY_IDX(inst, 0, irq));                   \
477 	}                                                                       \
478 	static const struct dma_xmc4xxx_config dma_xmc4xxx##inst##_config = {   \
479 		.dma = (XMC_DMA_t *)DT_INST_REG_ADDR(inst),                     \
480 		.irq_configure = dma_xmc4xxx##inst##_irq_configure,             \
481 	};                                                                      \
482 										\
483 	static struct dma_xmc4xxx_channel                                       \
484 		dma_xmc4xxx##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \
485 	ATOMIC_DEFINE(dma_xmc4xxx_atomic##inst,                                 \
486 		      DT_INST_PROP(inst, dma_channels));                        \
487 	static struct dma_xmc4xxx_data dma_xmc4xxx##inst##_data = {             \
488 		.ctx =  {                                                       \
489 			.magic = DMA_MAGIC,                                     \
490 			.atomic = dma_xmc4xxx_atomic##inst,                     \
491 			.dma_channels = DT_INST_PROP(inst, dma_channels),       \
492 		},                                                              \
493 		.channels = dma_xmc4xxx##inst##_channels,                       \
494 	};                                                                      \
495 										\
496 	DEVICE_DT_INST_DEFINE(inst, &dma_xmc4xxx_init, NULL,                    \
497 			      &dma_xmc4xxx##inst##_data,                        \
498 			      &dma_xmc4xxx##inst##_config, PRE_KERNEL_1,        \
499 			      CONFIG_DMA_INIT_PRIORITY, &dma_xmc4xxx_driver_api);
500 
501 DT_INST_FOREACH_STATUS_OKAY(XMC4XXX_DMA_INIT)
502