1 /*
2 * Copyright (c) 2024 Silicon Laboratories Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdbool.h>
8 #include <stddef.h>
9
10 #include <zephyr/device.h>
11 #include <zephyr/drivers/dma.h>
12 #include <zephyr/drivers/dma/dma_silabs_ldma.h>
13 #include <zephyr/logging/log.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/sys/mem_blocks.h>
16
17 #include "em_ldma.h"
18 #include "dmadrv.h"
19
20 #define DT_DRV_COMPAT silabs_ldma
21
22 #define DMA_IRQ_PRIORITY 3
23
24 LOG_MODULE_REGISTER(silabs_dma, CONFIG_DMA_LOG_LEVEL);
25
26 struct dma_silabs_channel {
27 enum dma_channel_direction dir;
28 uint32_t complete_callback_en;
29 atomic_t busy;
30 void *user_data;
31 dma_callback_t cb;
32 LDMA_TransferCfg_t xfer_config;
33 LDMA_Descriptor_t *desc;
34 };
35
36 struct dma_silabs_config {
37 void (*config_irq)(const struct device *dev);
38 const struct device *clock_dev;
39 };
40
41 struct dma_silabs_data {
42 struct dma_context dma_ctx;
43 struct dma_silabs_channel *dma_chan_table;
44 struct sys_mem_blocks *dma_desc_pool;
45 };
46
dma_silabs_get_blocksize(uint32_t src_blen,uint32_t dst_blen,uint32_t src_dsize)47 static int dma_silabs_get_blocksize(uint32_t src_blen, uint32_t dst_blen, uint32_t src_dsize)
48 {
49 const static struct {
50 int native;
51 int efr;
52 } ldma_blocksize_map[] = {
53 { 0x0001, ldmaCtrlBlockSizeUnit1 },
54 { 0x0002, ldmaCtrlBlockSizeUnit2 },
55 { 0x0003, ldmaCtrlBlockSizeUnit3 },
56 { 0x0004, ldmaCtrlBlockSizeUnit4 },
57 { 0x0006, ldmaCtrlBlockSizeUnit6 },
58 { 0x0008, ldmaCtrlBlockSizeUnit8 },
59 { 0x0010, ldmaCtrlBlockSizeUnit16 },
60 { 0x0020, ldmaCtrlBlockSizeUnit32 },
61 { 0x0040, ldmaCtrlBlockSizeUnit64 },
62 { 0x0080, ldmaCtrlBlockSizeUnit128 },
63 { 0x0100, ldmaCtrlBlockSizeUnit256 },
64 { 0x0200, ldmaCtrlBlockSizeUnit512 },
65 { 0x0400, ldmaCtrlBlockSizeUnit1024 }
66 };
67 uint32_t arb_unit;
68
69 if (src_blen != dst_blen) {
70 LOG_ERR("Source burst length (%u) and destination burst length(%u) must be equal",
71 src_blen, dst_blen);
72 return -ENOTSUP;
73 }
74
75 if (src_blen % src_dsize) {
76 LOG_ERR("burst length (%u) and data size (%u) mismatch", src_blen, dst_blen);
77 return -EINVAL;
78 }
79
80 arb_unit = src_blen / src_dsize;
81
82 for (int i = 0; i < ARRAY_SIZE(ldma_blocksize_map); i++) {
83 if (ldma_blocksize_map[i].native == arb_unit) {
84 return ldma_blocksize_map[i].efr;
85 }
86 }
87 return -EINVAL;
88 }
89
dma_silabs_block_to_descriptor(struct dma_config * config,struct dma_silabs_channel * chan_conf,struct dma_block_config * block,LDMA_Descriptor_t * desc)90 static int dma_silabs_block_to_descriptor(struct dma_config *config,
91 struct dma_silabs_channel *chan_conf,
92 struct dma_block_config *block, LDMA_Descriptor_t *desc)
93 {
94 int ret, src_size, xfer_count;
95
96 if (block->dest_scatter_count || block->source_gather_count ||
97 block->source_gather_interval || block->dest_scatter_interval ||
98 block->dest_reload_en || block->source_reload_en) {
99 return -ENOTSUP;
100 }
101
102 if ((block->source_gather_en || block->dest_scatter_en) && config->block_count == 1) {
103 LOG_WRN("DMA scatter_gather enabled but there is only one descriptor "
104 "configured");
105 }
106
107 memset(desc, 0, sizeof(*desc));
108
109 if (config->channel_direction == MEMORY_TO_MEMORY) {
110 desc->xfer.structReq = 1;
111 }
112
113 if (config->source_data_size != config->dest_data_size) {
114 LOG_ERR("Source data size(%u) and destination data size(%u) must be equal",
115 config->source_data_size, config->dest_data_size);
116 return -ENOTSUP;
117 }
118
119 if (config->source_data_size < 1 || config->source_data_size > 4) {
120 return -ENOTSUP;
121 }
122
123 src_size = config->source_data_size;
124 desc->xfer.size = LOG2(src_size);
125
126 if (block->block_size % config->source_data_size) {
127 xfer_count = block->block_size / config->source_data_size;
128 } else {
129 xfer_count = block->block_size / config->source_data_size - 1;
130 }
131
132 if (xfer_count > LDMA_DESCRIPTOR_MAX_XFER_SIZE) {
133 return -ENOTSUP;
134 }
135
136 desc->xfer.xferCnt = xfer_count;
137
138 /* Warning : High LDMA blockSize (high burst) mean a large transfer
139 * without LDMA controller re-arbitration.
140 */
141 ret = dma_silabs_get_blocksize(config->source_burst_length, config->dest_burst_length,
142 config->source_data_size);
143 if (ret < 0) {
144 return ret;
145 }
146
147 desc->xfer.blockSize = ret;
148
149 /* if complete_callbacks_enabled, callback is called at then end of each descriptor
150 * in the list (block for zephyr)
151 */
152 desc->xfer.doneIfs = config->complete_callback_en;
153
154 if (config->channel_direction == PERIPHERAL_TO_MEMORY ||
155 config->channel_direction == MEMORY_TO_PERIPHERAL) {
156 if (block->flow_control_mode) {
157 desc->xfer.reqMode = ldmaCtrlReqModeAll;
158 } else {
159 desc->xfer.reqMode = ldmaCtrlReqModeBlock;
160 }
161 } else {
162 desc->xfer.reqMode = ldmaCtrlReqModeAll;
163 }
164
165 /* In silabs LDMA, increment sign is managed with the transfer configuration
166 * which is common for all descs of the channel. Zephyr DMA API allows
167 * to manage increment sign for each block desc which can't be done with
168 * silabs LDMA. If increment sign is different in 2 block desc, then an
169 * error is returned.
170 */
171 if (block->source_addr_adj != DMA_ADDR_ADJ_NO_CHANGE &&
172 block->source_addr_adj != chan_conf->xfer_config.ldmaCfgSrcIncSign) {
173 return -ENOTSUP;
174 }
175
176 if (block->source_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) {
177 desc->xfer.srcInc = ldmaCtrlSrcIncNone;
178 } else {
179 desc->xfer.srcInc = ldmaCtrlSrcIncOne;
180 }
181
182 if (block->dest_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) {
183 desc->xfer.dstInc = ldmaCtrlDstIncNone;
184 } else {
185 desc->xfer.dstInc = ldmaCtrlDstIncOne;
186 }
187
188 desc->xfer.srcAddrMode = ldmaCtrlSrcAddrModeAbs;
189 desc->xfer.dstAddrMode = ldmaCtrlDstAddrModeAbs;
190
191 if (block->source_address == 0) {
192 LOG_WRN("source_buffer address is null.");
193 }
194 if (block->dest_address == 0) {
195 LOG_WRN("dest_buffer address is null.");
196 }
197
198 desc->xfer.srcAddr = block->source_address;
199 desc->xfer.dstAddr = block->dest_address;
200
201 return 0;
202 }
203
dma_silabs_release_descriptor(struct dma_silabs_data * data,LDMA_Descriptor_t * desc)204 static int dma_silabs_release_descriptor(struct dma_silabs_data *data, LDMA_Descriptor_t *desc)
205 {
206 LDMA_Descriptor_t *head_desc, *next_desc;
207 int ret;
208
209 head_desc = desc;
210 while (desc) {
211 next_desc = LDMA_DESCRIPTOR_LINKABS_LINKADDR_TO_ADDR(desc->xfer.linkAddr);
212 ret = sys_mem_blocks_free(data->dma_desc_pool, 1, (void **)&desc);
213 if (ret) {
214 return ret;
215 }
216 desc = next_desc;
217 /* Protection against descriptor loop*/
218 if (desc == head_desc) {
219 break;
220 }
221 }
222
223 return 0;
224 }
225
dma_silabs_configure_descriptor(struct dma_config * config,struct dma_silabs_data * data,struct dma_silabs_channel * chan_conf)226 static int dma_silabs_configure_descriptor(struct dma_config *config, struct dma_silabs_data *data,
227 struct dma_silabs_channel *chan_conf)
228 {
229 struct dma_block_config *head_block = config->head_block;
230 struct dma_block_config *block = config->head_block;
231 LDMA_Descriptor_t *desc, *prev_desc;
232 int ret;
233
234 /* Descriptors configuration
235 * block refers to user configured block (dma_block_config structure from dma.h)
236 * desc refers to driver configured block (LDMA_Descriptor_t structure from silabs
237 * hal)
238 */
239 prev_desc = NULL;
240 while (block) {
241 ret = sys_mem_blocks_alloc(data->dma_desc_pool, 1, (void **)&desc);
242 if (ret) {
243 goto err;
244 }
245
246 ret = dma_silabs_block_to_descriptor(config, chan_conf, block, desc);
247 if (ret) {
248 goto err;
249 }
250
251 if (!prev_desc) {
252 chan_conf->desc = desc;
253 } else {
254 prev_desc->xfer.linkAddr = LDMA_DESCRIPTOR_LINKABS_ADDR_TO_LINKADDR(desc);
255 prev_desc->xfer.linkMode = ldmaLinkModeAbs;
256 prev_desc->xfer.link = 1;
257 }
258
259 prev_desc = desc;
260 block = block->next_block;
261 if (block == head_block) {
262 block = NULL;
263 prev_desc->xfer.linkAddr =
264 LDMA_DESCRIPTOR_LINKABS_ADDR_TO_LINKADDR(chan_conf->desc);
265 prev_desc->xfer.linkMode = ldmaLinkModeAbs;
266 prev_desc->xfer.link = 1;
267 }
268 }
269
270 return 0;
271 err:
272 /* Free all eventually allocated descriptor */
273 dma_silabs_release_descriptor(data, chan_conf->desc);
274
275 return ret;
276 }
277
dma_silabs_irq_handler(const struct device * dev,uint32_t id)278 static void dma_silabs_irq_handler(const struct device *dev, uint32_t id)
279 {
280 const struct dma_silabs_data *data = dev->data;
281 struct dma_silabs_channel *chan;
282 int status;
283 uint32_t pending, chnum;
284
285 pending = LDMA_IntGetEnabled();
286
287 for (chnum = 0; chnum < data->dma_ctx.dma_channels; chnum++) {
288 chan = &data->dma_chan_table[chnum];
289 status = DMA_STATUS_COMPLETE;
290
291 if (pending & LDMA_IF_ERROR) {
292 if (chan->cb) {
293 chan->cb(dev, chan->user_data, chnum, -EIO);
294 }
295 } else if (pending & BIT(chnum)) {
296 LDMA_IntClear(BIT(chnum));
297
298 /* Is it only an interrupt for the end of a descriptor and not a complete
299 * transfer.
300 */
301 if (chan->complete_callback_en) {
302 status = DMA_STATUS_BLOCK;
303 } else {
304 atomic_clear(&chan->busy);
305 }
306
307 /*
308 * In the case that the transfer is done but we have append a new
309 * descriptor, we need to manually load the next descriptor
310 */
311 if (LDMA_TransferDone(chnum) &&
312 LDMA->CH[chnum].LINK & _LDMA_CH_LINK_LINK_MASK) {
313 sys_clear_bit((mem_addr_t)&LDMA->CHDONE, chnum);
314 LDMA->LINKLOAD = BIT(chnum);
315 }
316
317 if (chan->cb) {
318 chan->cb(dev, chan->user_data, chnum, status);
319 }
320 }
321 }
322 }
323
dma_silabs_configure(const struct device * dev,uint32_t channel,struct dma_config * config)324 static int dma_silabs_configure(const struct device *dev, uint32_t channel,
325 struct dma_config *config)
326 {
327 struct dma_silabs_data *data = dev->data;
328 struct dma_silabs_channel *chan_conf = &data->dma_chan_table[channel];
329 LDMA_TransferCfg_t *xfer_config = &chan_conf->xfer_config;
330 int ret;
331
332 if (channel > data->dma_ctx.dma_channels) {
333 return -EINVAL;
334 }
335
336 if (!config) {
337 return -EINVAL;
338 }
339
340 if (atomic_get(&chan_conf->busy)) {
341 LOG_ERR("DMA channel %u is busy", channel);
342 return -EBUSY;
343 }
344
345 /* Release previously owned descriptor for this channel*/
346 ret = dma_silabs_release_descriptor(data, chan_conf->desc);
347 if (ret) {
348 return ret;
349 }
350
351 if (config->dest_data_size != config->source_data_size) {
352 LOG_ERR("source and dest data size differ");
353 return -ENOTSUP;
354 }
355
356 if (config->source_handshake || config->dest_handshake || config->source_chaining_en ||
357 config->dest_chaining_en || config->linked_channel) {
358 return -ENOTSUP;
359 }
360
361 LDMA_StopTransfer(channel);
362
363 chan_conf->user_data = config->user_data;
364 chan_conf->cb = config->dma_callback;
365 chan_conf->dir = config->channel_direction;
366 chan_conf->complete_callback_en = config->complete_callback_en;
367
368 memset(xfer_config, 0, sizeof(*xfer_config));
369
370 switch (config->channel_direction) {
371 case MEMORY_TO_MEMORY:
372 break;
373 case PERIPHERAL_TO_MEMORY:
374 case MEMORY_TO_PERIPHERAL:
375 xfer_config->ldmaReqSel = SILABS_LDMA_SLOT_TO_REQSEL(config->dma_slot);
376 break;
377 case PERIPHERAL_TO_PERIPHERAL:
378 case HOST_TO_MEMORY:
379 case MEMORY_TO_HOST:
380 default:
381 return -ENOTSUP;
382 }
383
384 /* Directly transform channel_priority into efr priority */
385 if (config->channel_priority < ldmaCfgArbSlotsAs1 ||
386 config->channel_priority > ldmaCfgArbSlotsAs8) {
387 return -EINVAL;
388 }
389 xfer_config->ldmaCfgArbSlots = config->channel_priority;
390
391 switch (config->head_block->source_addr_adj) {
392 case DMA_ADDR_ADJ_INCREMENT:
393 xfer_config->ldmaCfgSrcIncSign = ldmaCfgSrcIncSignPos;
394 break;
395 case DMA_ADDR_ADJ_DECREMENT:
396 xfer_config->ldmaCfgSrcIncSign = ldmaCfgSrcIncSignNeg;
397 break;
398 case DMA_ADDR_ADJ_NO_CHANGE:
399 xfer_config->ldmaCfgSrcIncSign = ldmaCfgSrcIncSignPos;
400 break;
401 default:
402 LOG_ERR("Addr Adjustement error %d", config->head_block->source_addr_adj);
403 break;
404 }
405
406 switch (config->head_block->dest_addr_adj) {
407 case DMA_ADDR_ADJ_INCREMENT:
408 xfer_config->ldmaCfgDstIncSign = ldmaCfgDstIncSignPos;
409 break;
410 case DMA_ADDR_ADJ_DECREMENT:
411 xfer_config->ldmaCfgDstIncSign = ldmaCfgDstIncSignNeg;
412 break;
413 case DMA_ADDR_ADJ_NO_CHANGE:
414 xfer_config->ldmaCfgDstIncSign = ldmaCfgDstIncSignPos;
415 break;
416 default:
417 break;
418 }
419
420 ret = dma_silabs_configure_descriptor(config, data, chan_conf);
421 if (ret) {
422 return ret;
423 }
424
425 atomic_set_bit(data->dma_ctx.atomic, channel);
426
427 return 0;
428 }
429
dma_silabs_start(const struct device * dev,uint32_t channel)430 static int dma_silabs_start(const struct device *dev, uint32_t channel)
431 {
432 const struct dma_silabs_data *data = dev->data;
433 struct dma_silabs_channel *chan = &data->dma_chan_table[channel];
434
435 if (channel > data->dma_ctx.dma_channels) {
436 return -EINVAL;
437 }
438
439 atomic_inc(&chan->busy);
440
441 LDMA_StartTransfer(channel, &chan->xfer_config, chan->desc);
442
443 return 0;
444 }
445
dma_silabs_stop(const struct device * dev,uint32_t channel)446 static int dma_silabs_stop(const struct device *dev, uint32_t channel)
447 {
448 const struct dma_silabs_data *data = dev->data;
449 struct dma_silabs_channel *chan = &data->dma_chan_table[channel];
450
451 if (channel > data->dma_ctx.dma_channels) {
452 return -EINVAL;
453 }
454
455 LDMA_StopTransfer(channel);
456
457 atomic_clear(&chan->busy);
458
459 LDMA_IntClear(BIT(channel));
460
461 return 0;
462 }
463
dma_silabs_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)464 static int dma_silabs_get_status(const struct device *dev, uint32_t channel,
465 struct dma_status *status)
466 {
467 const struct dma_silabs_data *data = dev->data;
468
469 if (channel > data->dma_ctx.dma_channels) {
470 return -EINVAL;
471 }
472
473 if (!atomic_test_bit(data->dma_ctx.atomic, channel)) {
474 return -EINVAL;
475 }
476
477 status->pending_length = LDMA_TransferRemainingCount(channel);
478 status->busy = data->dma_chan_table[channel].busy;
479 status->dir = data->dma_chan_table[channel].dir;
480
481 return 0;
482 }
483
dma_silabs_chan_filter(const struct device * dev,int channel,void * filter_param)484 bool dma_silabs_chan_filter(const struct device *dev, int channel, void *filter_param)
485 {
486 ARG_UNUSED(dev);
487 ARG_UNUSED(filter_param);
488 return (DMADRV_AllocateChannelById(channel, 0) == ECODE_EMDRV_DMADRV_OK);
489 }
490
dma_silabs_chan_release(const struct device * dev,uint32_t channel)491 void dma_silabs_chan_release(const struct device *dev, uint32_t channel)
492 {
493 ARG_UNUSED(dev);
494 Ecode_t __maybe_unused err = DMADRV_FreeChannel(channel);
495
496 __ASSERT_NO_MSG(err == ECODE_EMDRV_DMADRV_OK);
497 }
498
dma_silabs_init(const struct device * dev)499 static int dma_silabs_init(const struct device *dev)
500 {
501 const struct dma_silabs_config *config = dev->config;
502
503 /* Clock is managed by em_ldma */
504 DMADRV_Init();
505
506 /* LDMA_Init configure IRQ but we want IRQ to match with configured one in the dts*/
507 config->config_irq(dev);
508
509 return 0;
510 }
511
512 static DEVICE_API(dma, dma_funcs) = {
513 .config = dma_silabs_configure,
514 .start = dma_silabs_start,
515 .stop = dma_silabs_stop,
516 .get_status = dma_silabs_get_status,
517 .chan_filter = dma_silabs_chan_filter,
518 .chan_release = dma_silabs_chan_release
519 };
520
silabs_ldma_append_block(const struct device * dev,uint32_t channel,struct dma_config * config)521 int silabs_ldma_append_block(const struct device *dev, uint32_t channel, struct dma_config *config)
522 {
523 const struct dma_silabs_data *data = dev->data;
524 struct dma_silabs_channel *chan_conf = &data->dma_chan_table[channel];
525 struct dma_block_config *block_config = config->head_block;
526 LDMA_Descriptor_t *desc = data->dma_chan_table[channel].desc;
527 unsigned int key;
528 int ret;
529
530 __ASSERT(!((uintptr_t)desc & ~_LDMA_CH_LINK_LINKADDR_MASK),
531 "DMA Descriptor is not 32 bits aligned");
532
533 if (channel > data->dma_ctx.dma_channels) {
534 return -EINVAL;
535 }
536
537 if (!atomic_test_bit(data->dma_ctx.atomic, channel)) {
538 return -EINVAL;
539 }
540
541 /* DMA Channel already have loaded a descriptor with a linkaddr
542 * so we can't append a new block just after the current transfer.
543 * You can't also append a descriptor list.
544 * This check is here to not use the function in a wrong way
545 */
546 if (desc->xfer.linkAddr || config->head_block->next_block) {
547 return -EINVAL;
548 }
549
550 /* A link is already set by a previous call to the function */
551 if (sys_test_bit((mem_addr_t)&LDMA->CH[channel].LINK, _LDMA_CH_LINK_LINK_SHIFT)) {
552 return -EINVAL;
553 }
554
555 ret = dma_silabs_block_to_descriptor(config, chan_conf, block_config, desc);
556 if (ret) {
557 return ret;
558 }
559
560 key = irq_lock();
561 if (!LDMA_TransferDone(channel)) {
562 /*
563 * It is voluntary to split this 2 lines in order to separate the write of the link
564 * addr and the write of the link bit. In this way, there is always a linkAddr when
565 * the link bit is set.
566 */
567 sys_write32((uintptr_t)desc, (mem_addr_t)&LDMA->CH[channel].LINK);
568 sys_set_bit((mem_addr_t)&LDMA->CH[channel].LINK, _LDMA_CH_LINK_LINK_SHIFT);
569 irq_unlock(key);
570
571 } else {
572 irq_unlock(key);
573 LDMA_StartTransfer(channel, &chan_conf->xfer_config, desc);
574 }
575
576 return 0;
577 }
578
579 #define SILABS_DMA_IRQ_CONNECT(n, inst) \
580 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, n, irq), DT_INST_IRQ_BY_IDX(inst, n, priority), \
581 dma_silabs_irq_handler, DEVICE_DT_INST_GET(inst), 0); \
582 irq_enable(DT_INST_IRQ_BY_IDX(inst, n, irq));
583
584 #define CONFIGURE_ALL_IRQS(inst, n) LISTIFY(n, SILABS_DMA_IRQ_CONNECT, (), inst)
585
586 #define DMA_SILABS_LDMA_INIT(inst) \
587 \
588 static void silabs_dma_irq_configure_##inst(const struct device *dev) \
589 { \
590 ARG_UNUSED(dev); \
591 CONFIGURE_ALL_IRQS(inst, DT_NUM_IRQS(DT_DRV_INST(inst))); \
592 }; \
593 \
594 const struct dma_silabs_config dma_silabs_config_##inst = { \
595 .config_irq = silabs_dma_irq_configure_##inst \
596 }; \
597 \
598 static ATOMIC_DEFINE(dma_channels_atomic_##inst, DT_INST_PROP(inst, dma_channels)); \
599 \
600 static struct dma_silabs_channel \
601 dma_silabs_channel_##inst[DT_INST_PROP(inst, dma_channels)]; \
602 \
603 SYS_MEM_BLOCKS_DEFINE_STATIC(desc_pool_##inst, sizeof(LDMA_Descriptor_t), \
604 CONFIG_DMA_MAX_DESCRIPTOR, 4); \
605 \
606 static struct dma_silabs_data dma_silabs_data_##inst = { \
607 .dma_ctx.magic = DMA_MAGIC, \
608 .dma_ctx.dma_channels = DT_INST_PROP(inst, dma_channels), \
609 .dma_ctx.atomic = dma_channels_atomic_##inst, \
610 .dma_chan_table = dma_silabs_channel_##inst, \
611 .dma_desc_pool = &desc_pool_##inst \
612 }; \
613 \
614 DEVICE_DT_INST_DEFINE(inst, &dma_silabs_init, NULL, &dma_silabs_data_##inst, \
615 &dma_silabs_config_##inst, PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \
616 &dma_funcs);
617
618 DT_INST_FOREACH_STATUS_OKAY(DMA_SILABS_LDMA_INIT);
619