1 /*
2 * Copyright (c) 2024 Silicon Laboratories Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdbool.h>
8 #include <stddef.h>
9
10 #include <zephyr/device.h>
11 #include <zephyr/drivers/dma.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/irq.h>
14 #include <zephyr/sys/mem_blocks.h>
15
16 #include "em_ldma.h"
17
18 #define DT_DRV_COMPAT silabs_ldma
19
20 #define DMA_IRQ_PRIORITY 3
21
22 LOG_MODULE_REGISTER(silabs_dma, CONFIG_DMA_LOG_LEVEL);
23
24 struct dma_silabs_channel {
25 enum dma_channel_direction dir;
26 uint32_t complete_callback_en;
27 atomic_t busy;
28 void *user_data;
29 dma_callback_t cb;
30 LDMA_TransferCfg_t xfer_config;
31 LDMA_Descriptor_t *desc;
32 };
33
34 struct dma_silabs_config {
35 void (*config_irq)(const struct device *dev);
36 const struct device *clock_dev;
37 };
38
39 struct dma_silabs_data {
40 struct dma_context dma_ctx;
41 struct dma_silabs_channel *dma_chan_table;
42 struct sys_mem_blocks *dma_desc_pool;
43 };
44
dma_silabs_get_blocksize(uint32_t src_blen,uint32_t dst_blen,uint32_t src_dsize)45 static int dma_silabs_get_blocksize(uint32_t src_blen, uint32_t dst_blen, uint32_t src_dsize)
46 {
47 const static struct {
48 int native;
49 int efr;
50 } ldma_blocksize_map[] = {
51 { 0x0001, ldmaCtrlBlockSizeUnit1 },
52 { 0x0002, ldmaCtrlBlockSizeUnit2 },
53 { 0x0003, ldmaCtrlBlockSizeUnit3 },
54 { 0x0004, ldmaCtrlBlockSizeUnit4 },
55 { 0x0006, ldmaCtrlBlockSizeUnit6 },
56 { 0x0008, ldmaCtrlBlockSizeUnit8 },
57 { 0x0010, ldmaCtrlBlockSizeUnit16 },
58 { 0x0020, ldmaCtrlBlockSizeUnit32 },
59 { 0x0040, ldmaCtrlBlockSizeUnit64 },
60 { 0x0080, ldmaCtrlBlockSizeUnit128 },
61 { 0x0100, ldmaCtrlBlockSizeUnit256 },
62 { 0x0200, ldmaCtrlBlockSizeUnit512 },
63 { 0x0400, ldmaCtrlBlockSizeUnit1024 }
64 };
65 uint32_t arb_unit;
66
67 if (src_blen != dst_blen) {
68 LOG_ERR("Source burst length (%u) and destination burst length(%u) must be equal",
69 src_blen, dst_blen);
70 return -ENOTSUP;
71 }
72
73 if (src_blen % src_dsize) {
74 LOG_ERR("burst length (%u) and data size (%u) mismatch", src_blen, dst_blen);
75 return -EINVAL;
76 }
77
78 arb_unit = src_blen / src_dsize;
79
80 for (int i = 0; i < ARRAY_SIZE(ldma_blocksize_map); i++) {
81 if (ldma_blocksize_map[i].native == arb_unit) {
82 return ldma_blocksize_map[i].efr;
83 }
84 }
85 return -EINVAL;
86 }
87
dma_silabs_block_to_descriptor(struct dma_config * config,struct dma_silabs_channel * chan_conf,struct dma_block_config * block,LDMA_Descriptor_t * desc)88 static int dma_silabs_block_to_descriptor(struct dma_config *config,
89 struct dma_silabs_channel *chan_conf,
90 struct dma_block_config *block, LDMA_Descriptor_t *desc)
91 {
92 int ret, src_size, xfer_count;
93
94 if (block->dest_scatter_count || block->source_gather_count ||
95 block->source_gather_interval || block->dest_scatter_interval ||
96 block->dest_reload_en || block->source_reload_en) {
97 return -ENOTSUP;
98 }
99
100 if ((block->source_gather_en || block->dest_scatter_en) && config->block_count == 1) {
101 LOG_WRN("DMA scatter_gather enabled but there is only one descriptor "
102 "configured");
103 }
104
105 memset(desc, 0, sizeof(*desc));
106
107 desc->xfer.structReq = 1;
108
109 if (config->source_data_size != config->dest_data_size) {
110 LOG_ERR("Source data size(%u) and destination data size(%u) must be equal",
111 config->source_data_size, config->dest_data_size);
112 return -ENOTSUP;
113 }
114
115 if (config->source_data_size < 1 || config->source_data_size > 4) {
116 return -ENOTSUP;
117 }
118
119 src_size = config->source_data_size;
120 desc->xfer.size = LOG2(src_size);
121
122 if (block->block_size % config->source_data_size) {
123 xfer_count = block->block_size / config->source_data_size;
124 } else {
125 xfer_count = block->block_size / config->source_data_size - 1;
126 }
127
128 if (xfer_count > LDMA_DESCRIPTOR_MAX_XFER_SIZE) {
129 return -ENOTSUP;
130 }
131
132 desc->xfer.xferCnt = xfer_count;
133
134 /* Warning : High LDMA blockSize (high burst) mean a large transfer
135 * without LDMA controller re-arbitration.
136 */
137 ret = dma_silabs_get_blocksize(config->source_burst_length, config->dest_burst_length,
138 config->source_data_size);
139 if (ret < 0) {
140 return ret;
141 }
142
143 desc->xfer.blockSize = ret;
144
145 /* if complete_callbacks_enabled, callback is called at then end of each descriptor
146 * in the list (block for zephyr)
147 */
148 desc->xfer.doneIfs = config->complete_callback_en;
149 desc->xfer.reqMode = ldmaCtrlReqModeAll;
150 desc->xfer.ignoreSrec = block->flow_control_mode;
151
152 /* In silabs LDMA, increment sign is managed with the transfer configuration
153 * which is common for all descs of the channel. Zephyr DMA API allows
154 * to manage increment sign for each block desc which can't be done with
155 * silabs LDMA. If increment sign is different in 2 block desc, then an
156 * error is returned.
157 */
158 if (block->source_addr_adj != DMA_ADDR_ADJ_NO_CHANGE &&
159 block->source_addr_adj != chan_conf->xfer_config.ldmaCfgSrcIncSign) {
160 return -ENOTSUP;
161 }
162
163 if (block->source_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) {
164 desc->xfer.srcInc = ldmaCtrlSrcIncNone;
165 } else {
166 desc->xfer.srcInc = ldmaCtrlSrcIncOne;
167 }
168
169 if (block->dest_addr_adj == DMA_ADDR_ADJ_NO_CHANGE) {
170 desc->xfer.dstInc = ldmaCtrlDstIncNone;
171 } else {
172 desc->xfer.dstInc = ldmaCtrlDstIncOne;
173 }
174
175 desc->xfer.srcAddrMode = ldmaCtrlSrcAddrModeAbs;
176 desc->xfer.dstAddrMode = ldmaCtrlDstAddrModeAbs;
177
178 if (block->source_address == 0) {
179 LOG_WRN("source_buffer address is null.");
180 }
181 if (block->dest_address == 0) {
182 LOG_WRN("dest_buffer address is null.");
183 }
184
185 desc->xfer.srcAddr = block->source_address;
186 desc->xfer.dstAddr = block->dest_address;
187
188 return 0;
189 }
190
dma_silabs_release_descriptor(struct dma_silabs_data * data,LDMA_Descriptor_t * desc)191 static int dma_silabs_release_descriptor(struct dma_silabs_data *data, LDMA_Descriptor_t *desc)
192 {
193 LDMA_Descriptor_t *head_desc, *next_desc;
194 int ret;
195
196 head_desc = desc;
197 while (desc) {
198 next_desc = LDMA_DESCRIPTOR_LINKABS_LINKADDR_TO_ADDR(desc->xfer.linkAddr);
199 ret = sys_mem_blocks_free(data->dma_desc_pool, 1, (void **)&desc);
200 if (ret) {
201 return ret;
202 }
203 desc = next_desc;
204 /* Protection against descriptor loop*/
205 if (desc == head_desc) {
206 break;
207 }
208 }
209
210 return 0;
211 }
212
dma_silabs_configure_descriptor(struct dma_config * config,struct dma_silabs_data * data,struct dma_silabs_channel * chan_conf)213 static int dma_silabs_configure_descriptor(struct dma_config *config, struct dma_silabs_data *data,
214 struct dma_silabs_channel *chan_conf)
215 {
216 struct dma_block_config *head_block = config->head_block;
217 struct dma_block_config *block = config->head_block;
218 LDMA_Descriptor_t *desc, *prev_desc;
219 int ret;
220
221 /* Descriptors configuration
222 * block refers to user configured block (dma_block_config structure from dma.h)
223 * desc refers to driver configured block (LDMA_Descriptor_t structure from silabs
224 * hal)
225 */
226 prev_desc = NULL;
227 while (block) {
228 ret = sys_mem_blocks_alloc(data->dma_desc_pool, 1, (void **)&desc);
229 if (ret) {
230 goto err;
231 }
232
233 ret = dma_silabs_block_to_descriptor(config, chan_conf, block, desc);
234 if (ret) {
235 goto err;
236 }
237
238 if (!prev_desc) {
239 chan_conf->desc = desc;
240 } else {
241 prev_desc->xfer.linkAddr = LDMA_DESCRIPTOR_LINKABS_ADDR_TO_LINKADDR(desc);
242 prev_desc->xfer.linkMode = ldmaLinkModeAbs;
243 prev_desc->xfer.link = 1;
244 }
245
246 prev_desc = desc;
247 block = block->next_block;
248 if (block == head_block) {
249 block = NULL;
250 prev_desc->xfer.linkAddr =
251 LDMA_DESCRIPTOR_LINKABS_ADDR_TO_LINKADDR(chan_conf->desc);
252 prev_desc->xfer.linkMode = ldmaLinkModeAbs;
253 prev_desc->xfer.link = 1;
254 }
255 }
256
257 return 0;
258 err:
259 /* Free all eventually allocated descriptor */
260 (void)dma_silabs_release_descriptor(data, chan_conf->desc);
261
262 return ret;
263
264 }
265
dma_silabs_irq_handler(const struct device * dev,uint32_t id)266 static void dma_silabs_irq_handler(const struct device *dev, uint32_t id)
267 {
268 const struct dma_silabs_data *data = dev->data;
269 struct dma_silabs_channel *chan;
270 int status;
271 uint32_t pending, chnum;
272
273 pending = LDMA_IntGetEnabled();
274
275 for (chnum = 0; chnum < data->dma_ctx.dma_channels; chnum++) {
276 chan = &data->dma_chan_table[chnum];
277 status = DMA_STATUS_COMPLETE;
278
279 if (pending & LDMA_IF_ERROR) {
280 if (chan->cb) {
281 chan->cb(dev, chan->user_data, chnum, -EIO);
282 }
283 } else if (pending & BIT(chnum)) {
284 LDMA_IntClear(BIT(chnum));
285
286 /* Is it only an interrupt for the end of a descriptor and not a complete
287 * transfer.
288 */
289 if (chan->complete_callback_en) {
290 status = DMA_STATUS_BLOCK;
291 } else {
292 atomic_clear(&chan->busy);
293 }
294
295 if (chan->cb) {
296 chan->cb(dev, chan->user_data, chnum, status);
297 }
298 }
299 }
300 }
301
dma_silabs_configure(const struct device * dev,uint32_t channel,struct dma_config * config)302 static int dma_silabs_configure(const struct device *dev, uint32_t channel,
303 struct dma_config *config)
304 {
305 struct dma_silabs_data *data = dev->data;
306 struct dma_silabs_channel *chan_conf = &data->dma_chan_table[channel];
307 LDMA_TransferCfg_t *xfer_config = &chan_conf->xfer_config;
308 int ret;
309
310 if (channel > data->dma_ctx.dma_channels) {
311 return -EINVAL;
312 }
313
314 if (!config) {
315 return -EINVAL;
316 }
317
318 if (atomic_get(&chan_conf->busy)) {
319 LOG_ERR("DMA channel %u is busy", channel);
320 return -EBUSY;
321 }
322
323 /* Release previously owned descriptor for this channel*/
324 ret = dma_silabs_release_descriptor(data, chan_conf->desc);
325 if (ret) {
326 return ret;
327 }
328
329 if (config->dest_data_size != config->source_data_size) {
330 LOG_ERR("source and dest data size differ");
331 return -ENOTSUP;
332 }
333
334 if (config->source_handshake || config->dest_handshake || config->source_chaining_en ||
335 config->dest_chaining_en || config->linked_channel) {
336 return -ENOTSUP;
337 }
338
339 LDMA_StopTransfer(channel);
340
341 chan_conf->user_data = config->user_data;
342 chan_conf->cb = config->dma_callback;
343 chan_conf->dir = config->channel_direction;
344 chan_conf->complete_callback_en = config->complete_callback_en;
345
346 memset(xfer_config, 0, sizeof(*xfer_config));
347
348 switch (config->channel_direction) {
349 case MEMORY_TO_MEMORY:
350 break;
351 case PERIPHERAL_TO_MEMORY:
352 case MEMORY_TO_PERIPHERAL:
353 xfer_config->ldmaReqSel = config->dma_slot;
354 break;
355 case PERIPHERAL_TO_PERIPHERAL:
356 case HOST_TO_MEMORY:
357 case MEMORY_TO_HOST:
358 default:
359 return -ENOTSUP;
360 }
361
362 /* Directly transform channel_priority into efr priority */
363 if (config->channel_priority < ldmaCfgArbSlotsAs1 ||
364 config->channel_priority > ldmaCfgArbSlotsAs8) {
365 return -EINVAL;
366 }
367 xfer_config->ldmaCfgArbSlots = config->channel_priority;
368
369 switch (config->head_block->source_addr_adj) {
370 case DMA_ADDR_ADJ_INCREMENT:
371 xfer_config->ldmaCfgSrcIncSign = ldmaCfgSrcIncSignPos;
372 break;
373 case DMA_ADDR_ADJ_DECREMENT:
374 xfer_config->ldmaCfgSrcIncSign = ldmaCfgSrcIncSignNeg;
375 break;
376 case DMA_ADDR_ADJ_NO_CHANGE:
377 xfer_config->ldmaCfgSrcIncSign = ldmaCfgSrcIncSignPos;
378 break;
379 default:
380 LOG_ERR("Addr Adjustement error %d", config->head_block->source_addr_adj);
381 break;
382 }
383
384 switch (config->head_block->dest_addr_adj) {
385 case DMA_ADDR_ADJ_INCREMENT:
386 xfer_config->ldmaCfgDstIncSign = ldmaCfgDstIncSignPos;
387 break;
388 case DMA_ADDR_ADJ_DECREMENT:
389 xfer_config->ldmaCfgDstIncSign = ldmaCfgDstIncSignNeg;
390 break;
391 case DMA_ADDR_ADJ_NO_CHANGE:
392 xfer_config->ldmaCfgDstIncSign = ldmaCfgDstIncSignPos;
393 break;
394 default:
395 break;
396 }
397
398 ret = dma_silabs_configure_descriptor(config, data, chan_conf);
399 if (ret) {
400 return ret;
401 }
402
403 atomic_set_bit(data->dma_ctx.atomic, channel);
404
405 return 0;
406 }
407
dma_silabs_start(const struct device * dev,uint32_t channel)408 static int dma_silabs_start(const struct device *dev, uint32_t channel)
409 {
410
411 const struct dma_silabs_data *data = dev->data;
412 struct dma_silabs_channel *chan = &data->dma_chan_table[channel];
413
414 if (channel > data->dma_ctx.dma_channels) {
415 return -EINVAL;
416 }
417
418 atomic_inc(&chan->busy);
419
420 LDMA_StartTransfer(channel, &chan->xfer_config, chan->desc);
421
422 return 0;
423 }
424
dma_silabs_stop(const struct device * dev,uint32_t channel)425 static int dma_silabs_stop(const struct device *dev, uint32_t channel)
426 {
427 const struct dma_silabs_data *data = dev->data;
428 struct dma_silabs_channel *chan = &data->dma_chan_table[channel];
429
430 if (channel > data->dma_ctx.dma_channels) {
431 return -EINVAL;
432 }
433
434 LDMA_StopTransfer(channel);
435
436 atomic_clear(&chan->busy);
437
438 LDMA_IntClear(BIT(channel));
439
440 return 0;
441 }
442
dma_silabs_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)443 static int dma_silabs_get_status(const struct device *dev, uint32_t channel,
444 struct dma_status *status)
445 {
446 const struct dma_silabs_data *data = dev->data;
447
448 if (channel > data->dma_ctx.dma_channels) {
449 return -EINVAL;
450 }
451
452 if (!atomic_test_bit(data->dma_ctx.atomic, channel)) {
453 return -EINVAL;
454 }
455
456 status->busy = data->dma_chan_table[channel].busy;
457 status->dir = data->dma_chan_table[channel].dir;
458
459 return 0;
460 }
461
dma_silabs_init(const struct device * dev)462 static int dma_silabs_init(const struct device *dev)
463 {
464 const struct dma_silabs_config *config = dev->config;
465 LDMA_Init_t dmaInit = {
466 /* 0x7 indicate that the 8 channels have round robin priority. */
467 .ldmaInitCtrlNumFixed = 0x7,
468 .ldmaInitIrqPriority = DMA_IRQ_PRIORITY,
469 };
470
471 /* Clock is managed by em_ldma */
472
473 LDMA_Init(&dmaInit);
474
475 /* LDMA_Init configure IRQ but we want IRQ to match with configured one in the dts*/
476 config->config_irq(dev);
477
478 return 0;
479 }
480
481 static DEVICE_API(dma, dma_funcs) = {
482 .config = dma_silabs_configure,
483 .start = dma_silabs_start,
484 .stop = dma_silabs_stop,
485 .get_status = dma_silabs_get_status
486 };
487
488 #define SILABS_DMA_IRQ_CONNECT(n, inst) \
489 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, n, irq), DT_INST_IRQ_BY_IDX(inst, n, priority), \
490 dma_silabs_irq_handler, DEVICE_DT_INST_GET(inst), 0); \
491 irq_enable(DT_INST_IRQ_BY_IDX(inst, n, irq));
492
493 #define CONFIGURE_ALL_IRQS(inst, n) LISTIFY(n, SILABS_DMA_IRQ_CONNECT, (), inst)
494
495 #define DMA_SILABS_LDMA_INIT(inst) \
496 \
497 static void silabs_dma_irq_configure_##inst(const struct device *dev) \
498 { \
499 ARG_UNUSED(dev); \
500 CONFIGURE_ALL_IRQS(inst, DT_NUM_IRQS(DT_DRV_INST(inst))); \
501 }; \
502 \
503 const struct dma_silabs_config dma_silabs_config_##inst = { \
504 .config_irq = silabs_dma_irq_configure_##inst \
505 }; \
506 \
507 static ATOMIC_DEFINE(dma_channels_atomic_##inst, DT_INST_PROP(inst, dma_channels)); \
508 \
509 static struct dma_silabs_channel \
510 dma_silabs_channel_##inst[DT_INST_PROP(inst, dma_channels)]; \
511 \
512 SYS_MEM_BLOCKS_DEFINE_STATIC(desc_pool_##inst, sizeof(LDMA_Descriptor_t), \
513 CONFIG_DMA_MAX_DESCRIPTOR, 4); \
514 \
515 static struct dma_silabs_data dma_silabs_data_##inst = { \
516 .dma_ctx.magic = DMA_MAGIC, \
517 .dma_ctx.dma_channels = DT_INST_PROP(inst, dma_channels), \
518 .dma_ctx.atomic = dma_channels_atomic_##inst, \
519 .dma_chan_table = dma_silabs_channel_##inst, \
520 .dma_desc_pool = &desc_pool_##inst \
521 }; \
522 \
523 DEVICE_DT_INST_DEFINE(inst, &dma_silabs_init, NULL, &dma_silabs_data_##inst, \
524 &dma_silabs_config_##inst, PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \
525 &dma_funcs);
526
527 DT_INST_FOREACH_STATUS_OKAY(DMA_SILABS_LDMA_INIT);
528