1 /*
2 * Copyright (c) 2022 Andriy Gelman
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT infineon_xmc4xxx_dma
8
9 #include <soc.h>
10 #include <stdint.h>
11 #include <xmc_dma.h>
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/dma.h>
14 #include <zephyr/dt-bindings/dma/infineon-xmc4xxx-dma.h>
15 #include <zephyr/irq.h>
16
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_REGISTER(dma_xmc4xxx, CONFIG_DMA_LOG_LEVEL);
19
20 #define MAX_PRIORITY 7
21 #define DMA_MAX_BLOCK_LEN 4095
22 #define DLR_LINE_UNSET 0xff
23
24 #define DLR_SRSEL_RS_BITSIZE 4
25 #define DLR_SRSEL_RS_MSK 0xf
26
27 #define MULTI_BLOCK_NUM_CHANNELS 2
28
29 #define XMC_DMA_CTLL_MEMORY_TO_MEMORY 0
30 #define XMC_DMA_CTLL_MEMORY_TO_PERIPHERAL 1
31 #define XMC_DMA_CTLL_PERIPHERAL_TO_MEMORY 2
32
33 #define ALL_EVENTS \
34 (XMC_DMA_CH_EVENT_TRANSFER_COMPLETE | XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE | \
35 XMC_DMA_CH_EVENT_SRC_TRANSACTION_COMPLETE | XMC_DMA_CH_EVENT_DST_TRANSACTION_COMPLETE | \
36 XMC_DMA_CH_EVENT_ERROR)
37
38 struct dma_xmc4xxx_channel {
39 dma_callback_t cb;
40 void *user_data;
41 uint32_t dest_address;
42 uint32_t transfer_size;
43 uint8_t source_data_size;
44 uint8_t dlr_line;
45 uint8_t channel_direction;
46 uint8_t dest_addr_adj;
47 bool multi_block;
48 };
49
50 struct dma_xmc4xxx_descriptor {
51 uint32_t sar; /* source address */
52 uint32_t dar; /* destination address */
53 uint32_t llp; /* linked-list pointer to the next descriptor or null if last descriptor */
54 uint32_t ctll; /* control register low */
55 uint32_t ctlh; /* control register high */
56 uint32_t dstat; /* status register fetched from address DSTATAR after block completes*/
57 } __packed;
58
59 struct dma_xmc4xxx_scatter_gather {
60 bool enabled;
61 uint32_t interval;
62 uint16_t count;
63 };
64
65 static struct dma_xmc4xxx_descriptor descriptor_list[MULTI_BLOCK_NUM_CHANNELS]
66 [CONFIG_DMA_XMC4XXX_NUM_DESCRIPTORS];
67
68 struct dma_xmc4xxx_config {
69 XMC_DMA_t *dma;
70 void (*irq_configure)(void);
71 };
72
73 struct dma_xmc4xxx_data {
74 struct dma_context ctx;
75 struct dma_xmc4xxx_channel *channels;
76 };
77
78 #define HANDLE_EVENT(event_test, get_channels_event, ret) \
79 do { \
80 if (event & (XMC_DMA_CH_##event_test)) { \
81 uint32_t channels_event = get_channels_event(dma); \
82 int channel = find_lsb_set(channels_event) - 1; \
83 struct dma_xmc4xxx_channel *dma_channel; \
84 \
85 __ASSERT_NO_MSG(channel >= 0); \
86 dma_channel = &dev_data->channels[channel]; \
87 /* Event has to be cleared before callback. The callback may call */ \
88 /* dma_start() and re-enable the event */ \
89 XMC_DMA_CH_ClearEventStatus(dma, channel, XMC_DMA_CH_##event_test); \
90 if (dma_channel->cb) { \
91 dma_channel->cb(dev, dma_channel->user_data, channel, (ret)); \
92 } \
93 } \
94 } while (0)
95
96 /* Isr is level triggered, so we don't have to loop over all the channels */
97 /* in a single call */
dma_xmc4xxx_isr(const struct device * dev)98 static void dma_xmc4xxx_isr(const struct device *dev)
99 {
100 struct dma_xmc4xxx_data *dev_data = dev->data;
101 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
102 int num_dma_channels = dev_data->ctx.dma_channels;
103 XMC_DMA_t *dma = dev_cfg->dma;
104 uint32_t event;
105 uint32_t sr_overruns;
106
107 /* There are two types of possible DMA error events: */
108 /* 1. Error response from AHB slave on the HRESP bus during DMA transfer. */
109 /* Treat this as EPERM error. */
110 /* 2. Service request overruns on the DLR line. */
111 /* Treat this EIO error. */
112
113 event = XMC_DMA_GetEventStatus(dma);
114 HANDLE_EVENT(EVENT_ERROR, XMC_DMA_GetChannelsErrorStatus, -EPERM);
115 HANDLE_EVENT(EVENT_BLOCK_TRANSFER_COMPLETE, XMC_DMA_GetChannelsBlockCompleteStatus, 0);
116 HANDLE_EVENT(EVENT_TRANSFER_COMPLETE, XMC_DMA_GetChannelsTransferCompleteStatus, 0);
117
118 sr_overruns = DLR->OVRSTAT;
119
120 if (sr_overruns == 0) {
121 return;
122 }
123
124 /* clear the overruns */
125 DLR->OVRCLR = sr_overruns;
126
127 /* notify about overruns */
128 for (int i = 0; i < num_dma_channels; i++) {
129 struct dma_xmc4xxx_channel *dma_channel;
130
131 dma_channel = &dev_data->channels[i];
132 if (dma_channel->dlr_line != DLR_LINE_UNSET &&
133 sr_overruns & BIT(dma_channel->dlr_line)) {
134
135 /* From XMC4700/4800 reference documentation - Section 4.4.1 */
136 /* Once the overrun condition is entered the user can clear the */
137 /* overrun status bits by writing to the DLR_OVRCLR register. */
138 /* Additionally the pending request must be reset by successively */
139 /* disabling and enabling the respective line. */
140 DLR->LNEN &= ~BIT(dma_channel->dlr_line);
141 DLR->LNEN |= BIT(dma_channel->dlr_line);
142
143 LOG_ERR("Overruns detected on channel %d", i);
144 if (dma_channel->cb != NULL) {
145 dma_channel->cb(dev, dma_channel->user_data, i, -EIO);
146 }
147 }
148 }
149 }
150
dma_xmc4xxx_reg_ctll(struct dma_block_config * block,struct dma_config * config)151 static uint32_t dma_xmc4xxx_reg_ctll(struct dma_block_config *block, struct dma_config *config)
152 {
153 uint32_t ctll;
154
155 ctll = config->dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
156 config->source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
157 block->dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
158 block->source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
159 config->dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
160 config->source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
161 BIT(GPDMA0_CH_CTLL_INT_EN_Pos);
162
163 /* Only GPDMA flow controller supported */
164 if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
165 ctll |= XMC_DMA_CTLL_MEMORY_TO_PERIPHERAL << GPDMA0_CH_CTLL_TT_FC_Pos;
166 }
167
168 if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
169 ctll |= XMC_DMA_CTLL_PERIPHERAL_TO_MEMORY << GPDMA0_CH_CTLL_TT_FC_Pos;
170 }
171
172 if (block->source_gather_en && block->source_gather_count > 0) {
173 ctll |= BIT(GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos);
174 }
175
176 if (block->dest_scatter_en && block->dest_scatter_count > 0) {
177 ctll |= BIT(GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos);
178 }
179
180 return ctll;
181 }
182
183 #define SET_CHECK_SCATTER_GATHER(type) \
184 do { \
185 if (block->type##_en && block->type##_count > 0 && !type.enabled) { \
186 type.enabled = true; \
187 type.interval = block->type##_interval; \
188 type.count = block->type##_count; \
189 } else if (block->type##_en && type.enabled) { \
190 if (block->type##_interval != type.interval || \
191 block->type##_count != type.count) { \
192 LOG_ERR(STRINGIFY(type) " parameters must be consistent " \
193 "across enabled blocks"); \
194 return -EINVAL; \
195 } \
196 } \
197 } while (0)
198
dma_xmc4xxx_config(const struct device * dev,uint32_t channel,struct dma_config * config)199 static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct dma_config *config)
200 {
201 struct dma_xmc4xxx_data *dev_data = dev->data;
202 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
203 struct dma_block_config *block = config->head_block;
204 XMC_DMA_t *dma = dev_cfg->dma;
205 uint8_t dlr_line = DLR_LINE_UNSET;
206 struct dma_xmc4xxx_scatter_gather source_gather = { 0 };
207 struct dma_xmc4xxx_scatter_gather dest_scatter = { 0 };
208
209 if (channel >= dev_data->ctx.dma_channels) {
210 LOG_ERR("Invalid channel number");
211 return -EINVAL;
212 }
213
214 if (config->channel_priority > MAX_PRIORITY) {
215 LOG_ERR("Invalid priority");
216 return -EINVAL;
217 }
218
219 if (config->source_chaining_en || config->dest_chaining_en) {
220 LOG_ERR("Channel chaining is not supported");
221 return -EINVAL;
222 }
223
224 if (config->channel_direction != MEMORY_TO_MEMORY &&
225 config->channel_direction != MEMORY_TO_PERIPHERAL &&
226 config->channel_direction != PERIPHERAL_TO_MEMORY) {
227 LOG_ERR("Unsupported channel direction");
228 return -EINVAL;
229 }
230
231 if (config->block_count > CONFIG_DMA_XMC4XXX_NUM_DESCRIPTORS) {
232 LOG_ERR("Block count exceeds descriptor array size");
233 return -EINVAL;
234 }
235
236 if (block->source_gather_en || block->dest_scatter_en || config->block_count != 1 ||
237 config->cyclic) {
238 if ((uint32_t)dma != (uint32_t)XMC_DMA0 || channel >= 2) {
239 LOG_ERR("Multi-block, cyclic and gather/scatter only supported on DMA0 on "
240 "ch0 and ch1");
241 return -EINVAL;
242 }
243 }
244
245 if (config->dest_data_size != 1 && config->dest_data_size != 2 &&
246 config->dest_data_size != 4) {
247 LOG_ERR("Invalid dest size, Only 1,2,4 bytes supported");
248 return -EINVAL;
249 }
250
251 if (config->source_data_size != 1 && config->source_data_size != 2 &&
252 config->source_data_size != 4) {
253 LOG_ERR("Invalid source size, Only 1,2,4 bytes supported");
254 return -EINVAL;
255 }
256
257 if (config->source_burst_length != 1 && config->source_burst_length != 4 &&
258 config->source_burst_length != 8) {
259 LOG_ERR("Invalid src burst length (data size units). Only 1,4,8 units supported");
260 return -EINVAL;
261 }
262
263 if (config->dest_burst_length != 1 && config->dest_burst_length != 4 &&
264 config->dest_burst_length != 8) {
265 LOG_ERR("Invalid dest burst length (data size units). Only 1,4,8 units supported");
266 return -EINVAL;
267 }
268
269 if (block->block_size / config->source_data_size > DMA_MAX_BLOCK_LEN) {
270 LOG_ERR("Block transactions must be <= 4095");
271 return -EINVAL;
272 }
273
274 if (XMC_DMA_CH_IsEnabled(dma, channel)) {
275 LOG_ERR("Channel is still active");
276 return -EINVAL;
277 }
278
279 XMC_DMA_CH_ClearEventStatus(dma, channel, ALL_EVENTS);
280
281 /* check dma slot number */
282 if (config->block_count == 1 && config->cyclic == 0) {
283 uint32_t ctll;
284
285 dma->CH[channel].SAR = block->source_address;
286 dma->CH[channel].DAR = block->dest_address;
287 dma->CH[channel].LLP = 0;
288
289 /* set number of transactions */
290 dma->CH[channel].CTLH = block->block_size / config->source_data_size;
291
292 ctll = dma_xmc4xxx_reg_ctll(block, config);
293
294 SET_CHECK_SCATTER_GATHER(source_gather);
295 SET_CHECK_SCATTER_GATHER(dest_scatter);
296
297 dma->CH[channel].CTLL = ctll;
298
299 } else {
300 struct dma_xmc4xxx_descriptor *desc;
301
302 dma->CH[channel].LLP = (uint32_t)&descriptor_list[channel][0];
303 dma->CH[channel].CTLL = BIT(GPDMA0_CH_CTLL_LLP_DST_EN_Pos) |
304 BIT(GPDMA0_CH_CTLL_LLP_SRC_EN_Pos);
305 for (int i = 0; i < config->block_count; i++) {
306 uint32_t ctll;
307
308 desc = &descriptor_list[channel][i];
309
310 desc->sar = block->source_address;
311 desc->dar = block->dest_address;
312 desc->ctlh = block->block_size / config->source_data_size;
313
314 ctll = dma_xmc4xxx_reg_ctll(block, config);
315
316 if (i < config->block_count - 1) {
317 desc->llp = (uint32_t)&descriptor_list[channel][i + 1];
318 } else if (config->cyclic) {
319 desc->llp = (uint32_t)&descriptor_list[channel][0];
320 } else {
321 desc->llp = 0;
322 }
323
324 if (i < config->block_count - 1 || config->cyclic) {
325 ctll |= BIT(GPDMA0_CH_CTLL_LLP_DST_EN_Pos) |
326 BIT(GPDMA0_CH_CTLL_LLP_SRC_EN_Pos);
327 }
328
329 desc->ctll = ctll;
330
331 SET_CHECK_SCATTER_GATHER(source_gather);
332 SET_CHECK_SCATTER_GATHER(dest_scatter);
333
334 block = block->next_block;
335 }
336 }
337
338 block = config->head_block;
339
340 /* set priority and software handshaking for src/dst. if hardware hankshaking is used */
341 /* it will be enabled later in the code */
342 dma->CH[channel].CFGL = (config->channel_priority << GPDMA0_CH_CFGL_CH_PRIOR_Pos) |
343 GPDMA0_CH_CFGL_HS_SEL_SRC_Msk | GPDMA0_CH_CFGL_HS_SEL_DST_Msk;
344
345 dma->CH[channel].CFGH = 0;
346
347 if (config->channel_direction == MEMORY_TO_PERIPHERAL ||
348 config->channel_direction == PERIPHERAL_TO_MEMORY) {
349 uint8_t request_source = XMC4XXX_DMA_GET_REQUEST_SOURCE(config->dma_slot);
350 uint8_t dlr_line_reg = XMC4XXX_DMA_GET_LINE(config->dma_slot);
351
352 dlr_line = dlr_line_reg;
353 if ((uint32_t)dma == (uint32_t)XMC_DMA0 && dlr_line > 7) {
354 LOG_ERR("Unsupported request line %d for DMA0."
355 "Should be in range [0,7]", dlr_line);
356 return -EINVAL;
357 }
358
359 if ((uint32_t)dma == (uint32_t)XMC_DMA1 && (dlr_line < 8 || dlr_line > 11)) {
360 LOG_ERR("Unsupported request line %d for DMA1."
361 "Should be in range [8,11]", dlr_line);
362 return -EINVAL;
363 }
364
365 /* clear any overruns */
366 DLR->OVRCLR = BIT(dlr_line);
367 /* enable the dma line */
368 DLR->LNEN &= ~BIT(dlr_line);
369 DLR->LNEN |= BIT(dlr_line);
370
371 /* connect DMA Line to SR */
372 if ((uint32_t)dma == (uint32_t)XMC_DMA0) {
373 DLR->SRSEL0 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
374 DLR->SRSEL0 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
375 }
376
377 if ((uint32_t)dma == (uint32_t)XMC_DMA1) {
378 dlr_line_reg -= 8;
379 DLR->SRSEL1 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
380 DLR->SRSEL1 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
381 }
382
383 /* connect DMA channel to DMA line */
384 if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
385 dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_DEST_PER_Pos) | 4;
386 dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_DST_Pos);
387 }
388
389 if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
390 dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_SRC_PER_Pos) | 4;
391 dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_SRC_Pos);
392 }
393 }
394
395 if (block->fifo_mode_control > 0) {
396 dma->CH[channel].CFGH |= GPDMA0_CH_CFGH_FIFO_MODE_Msk;
397 }
398
399 if ((uint32_t)dma == (uint32_t)XMC_DMA0) {
400 if (channel == 0 || channel == 1) {
401 /* reset scatter/gather registers */
402 dma->CH[channel].SGR = 0;
403 dma->CH[channel].DSR = 0;
404 }
405 }
406
407 if (source_gather.enabled) {
408 /* truncate if we are out of range */
409 dma->CH[channel].SGR = (source_gather.interval & GPDMA0_CH_SGR_SGI_Msk) |
410 source_gather.count << GPDMA0_CH_SGR_SGC_Pos;
411 }
412
413 if (dest_scatter.enabled) {
414 /* truncate if we are out of range */
415 dma->CH[channel].DSR = (dest_scatter.interval & GPDMA0_CH_DSR_DSI_Msk) |
416 dest_scatter.count << GPDMA0_CH_DSR_DSC_Pos;
417 }
418
419 dev_data->channels[channel].cb = config->dma_callback;
420 dev_data->channels[channel].user_data = config->user_data;
421 dev_data->channels[channel].transfer_size = block->block_size;
422 dev_data->channels[channel].source_data_size = config->source_data_size;
423 dev_data->channels[channel].dlr_line = dlr_line;
424 dev_data->channels[channel].channel_direction = config->channel_direction;
425 dev_data->channels[channel].dest_addr_adj = block->dest_addr_adj;
426 dev_data->channels[channel].dest_address = block->dest_address;
427
428 if (config->block_count > 1) {
429 dev_data->channels[channel].multi_block = true;
430 } else {
431 dev_data->channels[channel].multi_block = false;
432 }
433
434 XMC_DMA_CH_DisableEvent(dma, channel, ALL_EVENTS);
435 XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_TRANSFER_COMPLETE);
436
437 /* trigger enable on block transfer complete */
438 if (config->complete_callback_en) {
439 XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE);
440 }
441
442 if (!config->error_callback_dis) {
443 XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_ERROR);
444 }
445
446 LOG_DBG("Configured channel %d for %08X to %08X (%u)", channel, block->source_address,
447 block->dest_address, block->block_size);
448
449 return 0;
450 }
451
dma_xmc4xxx_start(const struct device * dev,uint32_t channel)452 static int dma_xmc4xxx_start(const struct device *dev, uint32_t channel)
453 {
454 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
455 struct dma_xmc4xxx_data *dev_data = dev->data;
456 uint8_t dlr_line = dev_data->channels[channel].dlr_line;
457
458 LOG_DBG("Starting channel %d", channel);
459 if (dlr_line != DLR_LINE_UNSET && (DLR->LNEN & BIT(dlr_line)) == 0) {
460 DLR->LNEN |= BIT(dlr_line);
461 }
462
463 XMC_DMA_CH_Enable(dev_cfg->dma, channel);
464 return 0;
465 }
466
dma_xmc4xxx_stop(const struct device * dev,uint32_t channel)467 static int dma_xmc4xxx_stop(const struct device *dev, uint32_t channel)
468 {
469 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
470 struct dma_xmc4xxx_data *dev_data = dev->data;
471 struct dma_xmc4xxx_channel *dma_channel;
472 XMC_DMA_t *dma = dev_cfg->dma;
473
474 dma_channel = &dev_data->channels[channel];
475 XMC_DMA_CH_Suspend(dma, channel);
476
477 /* wait until ongoing transfer finishes */
478 while (XMC_DMA_CH_IsEnabled(dma, channel) &&
479 (dma->CH[channel].CFGL & GPDMA0_CH_CFGL_FIFO_EMPTY_Msk) == 0) {
480 }
481
482 /* disconnect DLR line to stop overuns */
483 if (dma_channel->dlr_line != DLR_LINE_UNSET) {
484 DLR->LNEN &= ~BIT(dma_channel->dlr_line);
485 }
486
487 XMC_DMA_CH_Disable(dma, channel);
488 XMC_DMA_CH_Resume(dma, channel);
489 return 0;
490 }
491
dma_xmc4xxx_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)492 static int dma_xmc4xxx_reload(const struct device *dev, uint32_t channel, uint32_t src,
493 uint32_t dst, size_t size)
494 {
495 struct dma_xmc4xxx_data *dev_data = dev->data;
496 size_t block_ts;
497 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
498 XMC_DMA_t *dma = dev_cfg->dma;
499 struct dma_xmc4xxx_channel *dma_channel;
500
501 if (channel >= dev_data->ctx.dma_channels) {
502 LOG_ERR("Invalid channel number");
503 return -EINVAL;
504 }
505
506 if (XMC_DMA_CH_IsEnabled(dma, channel)) {
507 LOG_ERR("Channel is still active");
508 return -EINVAL;
509 }
510
511 dma_channel = &dev_data->channels[channel];
512 block_ts = size / dma_channel->source_data_size;
513 if (block_ts > DMA_MAX_BLOCK_LEN) {
514 LOG_ERR("Block transactions must be <= 4095");
515 return -EINVAL;
516 }
517 dma_channel->transfer_size = size;
518 dma_channel->dest_address = dst;
519
520 /* do we need to clear any errors */
521 dma->CH[channel].SAR = src;
522 dma->CH[channel].DAR = dst;
523 dma->CH[channel].CTLH = block_ts;
524
525 return 0;
526 }
527
dma_xmc4xxx_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)528 static int dma_xmc4xxx_get_status(const struct device *dev, uint32_t channel,
529 struct dma_status *stat)
530 {
531 struct dma_xmc4xxx_data *dev_data = dev->data;
532 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
533 XMC_DMA_t *dma = dev_cfg->dma;
534 struct dma_xmc4xxx_channel *dma_channel;
535 uint32_t transferred_bytes;
536
537 if (channel >= dev_data->ctx.dma_channels) {
538 LOG_ERR("Invalid channel number");
539 return -EINVAL;
540 }
541 dma_channel = &dev_data->channels[channel];
542
543 stat->busy = XMC_DMA_CH_IsEnabled(dma, channel);
544
545 if (dma_channel->multi_block) {
546 /* not supported for multi-block transfers */
547 stat->pending_length = 0;
548 return 0;
549 }
550
551 /* Use DAR to check for transferred bytes when possible. Value CTL.BLOCK_TS does not */
552 /* appear to guarantee that the last value is fully transferred to dest. */
553 if (dma_channel->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT) {
554 transferred_bytes = dma->CH[channel].DAR - dma_channel->dest_address;
555 stat->pending_length = dma_channel->transfer_size - transferred_bytes;
556 } else if (dma_channel->dest_addr_adj == DMA_ADDR_ADJ_DECREMENT) {
557 transferred_bytes = dma_channel->dest_address - dma->CH[channel].DAR;
558 stat->pending_length = dma_channel->transfer_size - transferred_bytes;
559 } else {
560 uint32_t num_source_transfers = XMC_DMA_CH_GetTransferredData(dma, channel);
561
562 stat->pending_length = dma_channel->transfer_size -
563 num_source_transfers * dma_channel->source_data_size;
564 }
565
566 /* stat->dir and other remaining fields are not set. They are not */
567 /* useful for xmc4xxx peripheral drivers. */
568
569 return 0;
570 }
571
dma_xmc4xxx_chan_filter(const struct device * dev,int channel,void * filter_param)572 static bool dma_xmc4xxx_chan_filter(const struct device *dev, int channel, void *filter_param)
573 {
574 uint32_t requested_channel;
575
576 if (!filter_param) {
577 return true;
578 }
579
580 requested_channel = *(uint32_t *)filter_param;
581
582 if (channel == requested_channel) {
583 return true;
584 }
585
586 return false;
587 }
588
dma_xmc4xxx_suspend(const struct device * dev,uint32_t channel)589 static int dma_xmc4xxx_suspend(const struct device *dev, uint32_t channel)
590 {
591 struct dma_xmc4xxx_data *dev_data = dev->data;
592 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
593 XMC_DMA_t *dma = dev_cfg->dma;
594
595 if (channel >= dev_data->ctx.dma_channels) {
596 LOG_ERR("Invalid channel number");
597 return -EINVAL;
598 }
599
600 XMC_DMA_CH_Suspend(dma, channel);
601 return 0;
602 }
603
dma_xmc4xxx_resume(const struct device * dev,uint32_t channel)604 static int dma_xmc4xxx_resume(const struct device *dev, uint32_t channel)
605 {
606 struct dma_xmc4xxx_data *dev_data = dev->data;
607 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
608 XMC_DMA_t *dma = dev_cfg->dma;
609
610 if (channel >= dev_data->ctx.dma_channels) {
611 LOG_ERR("Invalid channel number");
612 return -EINVAL;
613 }
614
615 XMC_DMA_CH_Resume(dma, channel);
616 return 0;
617 }
618
dma_xmc4xxx_init(const struct device * dev)619 static int dma_xmc4xxx_init(const struct device *dev)
620 {
621 const struct dma_xmc4xxx_config *dev_cfg = dev->config;
622
623 XMC_DMA_Enable(dev_cfg->dma);
624 dev_cfg->irq_configure();
625 return 0;
626 }
627
628 static DEVICE_API(dma, dma_xmc4xxx_driver_api) = {
629 .config = dma_xmc4xxx_config,
630 .reload = dma_xmc4xxx_reload,
631 .start = dma_xmc4xxx_start,
632 .stop = dma_xmc4xxx_stop,
633 .get_status = dma_xmc4xxx_get_status,
634 .chan_filter = dma_xmc4xxx_chan_filter,
635 .suspend = dma_xmc4xxx_suspend,
636 .resume = dma_xmc4xxx_resume,
637 };
638
639 #define XMC4XXX_DMA_INIT(inst) \
640 static void dma_xmc4xxx##inst##_irq_configure(void) \
641 { \
642 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, 0, irq), \
643 DT_INST_IRQ_BY_IDX(inst, 0, priority), \
644 dma_xmc4xxx_isr, \
645 DEVICE_DT_INST_GET(inst), 0); \
646 irq_enable(DT_INST_IRQ_BY_IDX(inst, 0, irq)); \
647 } \
648 static const struct dma_xmc4xxx_config dma_xmc4xxx##inst##_config = { \
649 .dma = (XMC_DMA_t *)DT_INST_REG_ADDR(inst), \
650 .irq_configure = dma_xmc4xxx##inst##_irq_configure, \
651 }; \
652 \
653 static struct dma_xmc4xxx_channel \
654 dma_xmc4xxx##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \
655 ATOMIC_DEFINE(dma_xmc4xxx_atomic##inst, \
656 DT_INST_PROP(inst, dma_channels)); \
657 static struct dma_xmc4xxx_data dma_xmc4xxx##inst##_data = { \
658 .ctx = { \
659 .magic = DMA_MAGIC, \
660 .atomic = dma_xmc4xxx_atomic##inst, \
661 .dma_channels = DT_INST_PROP(inst, dma_channels), \
662 }, \
663 .channels = dma_xmc4xxx##inst##_channels, \
664 }; \
665 \
666 DEVICE_DT_INST_DEFINE(inst, &dma_xmc4xxx_init, NULL, \
667 &dma_xmc4xxx##inst##_data, \
668 &dma_xmc4xxx##inst##_config, PRE_KERNEL_1, \
669 CONFIG_DMA_INIT_PRIORITY, &dma_xmc4xxx_driver_api);
670
671 DT_INST_FOREACH_STATUS_OKAY(XMC4XXX_DMA_INIT)
672