1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/device.h>
7 #include <zephyr/drivers/dma.h>
8 #include <zephyr/irq.h>
9 #include <zephyr/cache.h>
10 #include <zephyr/logging/log.h>
11 #include "fsl_sdma.h"
12
13 LOG_MODULE_REGISTER(nxp_sdma);
14
15 #define DMA_NXP_SDMA_BD_COUNT 2
16 #define DMA_NXP_SDMA_CHAN_DEFAULT_PRIO 4
17
18 #define DT_DRV_COMPAT nxp_sdma
19
20 AT_NONCACHEABLE_SECTION_ALIGN(static sdma_context_data_t
21 sdma_contexts[FSL_FEATURE_SDMA_MODULE_CHANNEL], 4);
22
23 struct sdma_dev_cfg {
24 SDMAARM_Type *base;
25 void (*irq_config)(void);
26 };
27
28 struct sdma_channel_data {
29 sdma_handle_t handle;
30 sdma_transfer_config_t transfer_cfg;
31 sdma_peripheral_t peripheral;
32 uint32_t direction;
33 uint32_t index;
34 const struct device *dev;
35 sdma_buffer_descriptor_t *bd_pool; /*pre-allocated list of BD used for transfer */
36 uint32_t bd_count; /* number of bd */
37 uint32_t capacity; /* total transfer capacity for this channel */
38 struct dma_config *dma_cfg;
39 uint32_t event_source; /* DMA REQ number that trigger this channel */
40 struct dma_status stat;
41
42 void *arg; /* argument passed to user-defined DMA callback */
43 dma_callback_t cb; /* user-defined callback for DMA transfer completion */
44 };
45
46 struct sdma_dev_data {
47 struct dma_context dma_ctx;
48 atomic_t *channels_atomic;
49 struct sdma_channel_data chan[FSL_FEATURE_SDMA_MODULE_CHANNEL];
50 sdma_buffer_descriptor_t bd_pool[FSL_FEATURE_SDMA_MODULE_CHANNEL][DMA_NXP_SDMA_BD_COUNT]
51 __aligned(64);
52 };
53
dma_nxp_sdma_init_stat(struct sdma_channel_data * chan_data)54 static int dma_nxp_sdma_init_stat(struct sdma_channel_data *chan_data)
55 {
56 chan_data->stat.read_position = 0;
57 chan_data->stat.write_position = 0;
58
59 switch (chan_data->direction) {
60 case MEMORY_TO_PERIPHERAL:
61 /* buffer is full */
62 chan_data->stat.pending_length = chan_data->capacity;
63 chan_data->stat.free = 0;
64 break;
65 case PERIPHERAL_TO_MEMORY:
66 /* buffer is empty */
67 chan_data->stat.pending_length = 0;
68 chan_data->stat.free = chan_data->capacity;
69 break;
70 default:
71 return -EINVAL;
72 }
73
74 return 0;
75 }
76
dma_nxp_sdma_consume(struct sdma_channel_data * chan_data,uint32_t bytes)77 static int dma_nxp_sdma_consume(struct sdma_channel_data *chan_data, uint32_t bytes)
78 {
79 if (bytes > chan_data->stat.pending_length)
80 return -EINVAL;
81
82 chan_data->stat.read_position += bytes;
83 chan_data->stat.read_position %= chan_data->capacity;
84
85 if (chan_data->stat.read_position > chan_data->stat.write_position)
86 chan_data->stat.free = chan_data->stat.read_position -
87 chan_data->stat.write_position;
88 else
89 chan_data->stat.free = chan_data->capacity -
90 (chan_data->stat.write_position - chan_data->stat.read_position);
91
92 chan_data->stat.pending_length = chan_data->capacity - chan_data->stat.free;
93
94 return 0;
95 }
96
dma_nxp_sdma_produce(struct sdma_channel_data * chan_data,uint32_t bytes)97 static int dma_nxp_sdma_produce(struct sdma_channel_data *chan_data, uint32_t bytes)
98 {
99 if (bytes > chan_data->stat.free)
100 return -EINVAL;
101
102 chan_data->stat.write_position += bytes;
103 chan_data->stat.write_position %= chan_data->capacity;
104
105 if (chan_data->stat.write_position > chan_data->stat.read_position)
106 chan_data->stat.pending_length = chan_data->stat.write_position -
107 chan_data->stat.read_position;
108 else
109 chan_data->stat.pending_length = chan_data->capacity -
110 (chan_data->stat.read_position - chan_data->stat.write_position);
111
112 chan_data->stat.free = chan_data->capacity - chan_data->stat.pending_length;
113
114 return 0;
115 }
116
dma_nxp_sdma_isr(const void * data)117 static void dma_nxp_sdma_isr(const void *data)
118 {
119 uint32_t val;
120 uint32_t i = 1;
121 struct sdma_channel_data *chan_data;
122 struct device *dev = (struct device *)data;
123 struct sdma_dev_data *dev_data = dev->data;
124 const struct sdma_dev_cfg *dev_cfg = dev->config;
125
126 /* Clear channel 0 */
127 SDMA_ClearChannelInterruptStatus(dev_cfg->base, 1U);
128
129 /* Ignore channel 0, is used only for download */
130 val = SDMA_GetChannelInterruptStatus(dev_cfg->base) >> 1U;
131 while (val) {
132 if ((val & 0x1) != 0) {
133 chan_data = &dev_data->chan[i];
134 SDMA_ClearChannelInterruptStatus(dev_cfg->base, 1 << i);
135 SDMA_HandleIRQ(&chan_data->handle);
136
137 if (chan_data->cb)
138 chan_data->cb(chan_data->dev, chan_data->arg, i, DMA_STATUS_BLOCK);
139 }
140 i++;
141 val >>= 1;
142 }
143 }
144
sdma_set_transfer_type(struct dma_config * config,sdma_transfer_type_t * type)145 void sdma_set_transfer_type(struct dma_config *config, sdma_transfer_type_t *type)
146 {
147 switch (config->channel_direction) {
148 case MEMORY_TO_MEMORY:
149 *type = kSDMA_MemoryToMemory;
150 break;
151 case MEMORY_TO_PERIPHERAL:
152 *type = kSDMA_MemoryToPeripheral;
153 break;
154 case PERIPHERAL_TO_MEMORY:
155 *type = kSDMA_PeripheralToMemory;
156 break;
157 case PERIPHERAL_TO_PERIPHERAL:
158 *type = kSDMA_PeripheralToPeripheral;
159 break;
160 default:
161 LOG_ERR("%s: channel direction not supported %d", __func__,
162 config->channel_direction);
163 return;
164 }
165 LOG_DBG("%s: dir %d type = %d", __func__, config->channel_direction, *type);
166 }
167
sdma_set_peripheral_type(struct dma_config * config,sdma_peripheral_t * type)168 int sdma_set_peripheral_type(struct dma_config *config, sdma_peripheral_t *type)
169 {
170 switch (config->dma_slot) {
171 case kSDMA_PeripheralNormal_SP:
172 case kSDMA_PeripheralMultiFifoPDM:
173 *type = config->dma_slot;
174 break;
175 default:
176 return -EINVAL;
177 }
178
179 return 0;
180 }
181
dma_nxp_sdma_callback(sdma_handle_t * handle,void * userData,bool TransferDone,uint32_t bdIndex)182 void dma_nxp_sdma_callback(sdma_handle_t *handle, void *userData, bool TransferDone,
183 uint32_t bdIndex)
184 {
185 const struct sdma_dev_cfg *dev_cfg;
186 struct sdma_channel_data *chan_data = userData;
187 sdma_buffer_descriptor_t *bd;
188 int xfer_size;
189
190 dev_cfg = chan_data->dev->config;
191
192 xfer_size = chan_data->capacity / chan_data->bd_count;
193
194 switch (chan_data->direction) {
195 case MEMORY_TO_PERIPHERAL:
196 dma_nxp_sdma_consume(chan_data, xfer_size);
197 break;
198 case PERIPHERAL_TO_MEMORY:
199 dma_nxp_sdma_produce(chan_data, xfer_size);
200 break;
201 default:
202 break;
203 }
204
205 /* prepare next BD for transfer */
206 bd = &chan_data->bd_pool[bdIndex];
207 bd->count = xfer_size;
208 bd->status |= (uint8_t)kSDMA_BDStatusDone;
209
210 SDMA_StartChannelSoftware(dev_cfg->base, chan_data->index);
211 }
212
dma_nxp_sdma_channel_init(const struct device * dev,uint32_t channel)213 static int dma_nxp_sdma_channel_init(const struct device *dev, uint32_t channel)
214 {
215 const struct sdma_dev_cfg *dev_cfg = dev->config;
216 struct sdma_dev_data *dev_data = dev->data;
217 struct sdma_channel_data *chan_data;
218
219 chan_data = &dev_data->chan[channel];
220 SDMA_CreateHandle(&chan_data->handle, dev_cfg->base, channel, &sdma_contexts[channel]);
221
222 SDMA_SetCallback(&chan_data->handle, dma_nxp_sdma_callback, chan_data);
223
224 return 0;
225 }
226
dma_nxp_sdma_setup_bd(const struct device * dev,uint32_t channel,struct dma_config * config)227 static void dma_nxp_sdma_setup_bd(const struct device *dev, uint32_t channel,
228 struct dma_config *config)
229 {
230 struct sdma_dev_data *dev_data = dev->data;
231 struct sdma_channel_data *chan_data;
232 sdma_buffer_descriptor_t *crt_bd;
233 struct dma_block_config *block_cfg;
234 int i;
235
236 chan_data = &dev_data->chan[channel];
237
238 /* initialize bd pool */
239 chan_data->bd_pool = &dev_data->bd_pool[channel][0];
240 chan_data->bd_count = config->block_count;
241
242 memset(chan_data->bd_pool, 0, sizeof(sdma_buffer_descriptor_t) * chan_data->bd_count);
243 SDMA_InstallBDMemory(&chan_data->handle, chan_data->bd_pool, chan_data->bd_count);
244
245 crt_bd = chan_data->bd_pool;
246 block_cfg = config->head_block;
247
248 for (i = 0; i < config->block_count; i++) {
249 bool is_last = false;
250 bool is_wrap = false;
251
252 if (i == config->block_count - 1) {
253 is_last = true;
254 is_wrap = true;
255 }
256
257 SDMA_ConfigBufferDescriptor(crt_bd,
258 block_cfg->source_address, block_cfg->dest_address,
259 config->source_data_size, block_cfg->block_size,
260 is_last, true, is_wrap, chan_data->transfer_cfg.type);
261
262 chan_data->capacity += block_cfg->block_size;
263 block_cfg = block_cfg->next_block;
264 crt_bd++;
265 }
266 }
267
dma_nxp_sdma_config(const struct device * dev,uint32_t channel,struct dma_config * config)268 static int dma_nxp_sdma_config(const struct device *dev, uint32_t channel,
269 struct dma_config *config)
270 {
271 const struct sdma_dev_cfg *dev_cfg = dev->config;
272 struct sdma_dev_data *dev_data = dev->data;
273 struct sdma_channel_data *chan_data;
274 struct dma_block_config *block_cfg;
275 int ret;
276
277 if (channel >= FSL_FEATURE_SDMA_MODULE_CHANNEL) {
278 LOG_ERR("sdma_config() invalid channel %d", channel);
279 return -EINVAL;
280 }
281
282 dma_nxp_sdma_channel_init(dev, channel);
283
284 chan_data = &dev_data->chan[channel];
285 chan_data->dev = dev;
286 chan_data->direction = config->channel_direction;
287
288 chan_data->cb = config->dma_callback;
289 chan_data->arg = config->user_data;
290
291 sdma_set_transfer_type(config, &chan_data->transfer_cfg.type);
292
293 ret = sdma_set_peripheral_type(config, &chan_data->peripheral);
294 if (ret < 0) {
295 LOG_ERR("%s: failed to set peripheral type", __func__);
296 return ret;
297 }
298
299 if (chan_data->peripheral == kSDMA_PeripheralMultiFifoPDM) {
300 unsigned int n_fifos = 4; /* TODO: make this configurable */
301
302 SDMA_SetMultiFifoConfig(&chan_data->transfer_cfg, n_fifos, 0);
303 SDMA_EnableSwDone(dev_cfg->base, &chan_data->transfer_cfg, 0,
304 chan_data->peripheral);
305 }
306
307 dma_nxp_sdma_setup_bd(dev, channel, config);
308 ret = dma_nxp_sdma_init_stat(chan_data);
309 if (ret < 0) {
310 LOG_ERR("%s: failed to init stat", __func__);
311 return ret;
312 }
313
314 block_cfg = config->head_block;
315
316 /* prepare first block for transfer ...*/
317 SDMA_PrepareTransfer(&chan_data->transfer_cfg,
318 block_cfg->source_address,
319 block_cfg->dest_address,
320 config->source_data_size, config->dest_data_size,
321 /* watermark = */64,
322 block_cfg->block_size, chan_data->event_source,
323 chan_data->peripheral, chan_data->transfer_cfg.type);
324
325 /*... and submit it to SDMA engine.
326 * Note that SDMA transfer is later manually started by the dma_nxp_sdma_start()
327 */
328 chan_data->transfer_cfg.isEventIgnore = false;
329 chan_data->transfer_cfg.isSoftTriggerIgnore = false;
330 SDMA_SubmitTransfer(&chan_data->handle, &chan_data->transfer_cfg);
331
332 return 0;
333 }
334
dma_nxp_sdma_start(const struct device * dev,uint32_t channel)335 static int dma_nxp_sdma_start(const struct device *dev, uint32_t channel)
336 {
337 const struct sdma_dev_cfg *dev_cfg = dev->config;
338 struct sdma_dev_data *dev_data = dev->data;
339 struct sdma_channel_data *chan_data;
340
341 if (channel >= FSL_FEATURE_SDMA_MODULE_CHANNEL) {
342 LOG_ERR("%s: invalid channel %d", __func__, channel);
343 return -EINVAL;
344 }
345
346 chan_data = &dev_data->chan[channel];
347
348 SDMA_SetChannelPriority(dev_cfg->base, channel, DMA_NXP_SDMA_CHAN_DEFAULT_PRIO);
349 SDMA_StartChannelSoftware(dev_cfg->base, channel);
350
351 return 0;
352 }
353
dma_nxp_sdma_stop(const struct device * dev,uint32_t channel)354 static int dma_nxp_sdma_stop(const struct device *dev, uint32_t channel)
355 {
356 struct sdma_dev_data *dev_data = dev->data;
357 struct sdma_channel_data *chan_data;
358
359 if (channel >= FSL_FEATURE_SDMA_MODULE_CHANNEL) {
360 LOG_ERR("%s: invalid channel %d", __func__, channel);
361 return -EINVAL;
362 }
363
364 chan_data = &dev_data->chan[channel];
365
366 SDMA_StopTransfer(&chan_data->handle);
367 return 0;
368 }
369
dma_nxp_sdma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)370 static int dma_nxp_sdma_get_status(const struct device *dev, uint32_t channel,
371 struct dma_status *stat)
372 {
373 struct sdma_dev_data *dev_data = dev->data;
374 struct sdma_channel_data *chan_data;
375
376 chan_data = &dev_data->chan[channel];
377
378 stat->free = chan_data->stat.free;
379 stat->pending_length = chan_data->stat.pending_length;
380
381 return 0;
382 }
383
dma_nxp_sdma_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)384 static int dma_nxp_sdma_reload(const struct device *dev, uint32_t channel, uint32_t src,
385 uint32_t dst, size_t size)
386 {
387 struct sdma_dev_data *dev_data = dev->data;
388 struct sdma_channel_data *chan_data;
389
390 chan_data = &dev_data->chan[channel];
391
392 if (!size)
393 return 0;
394
395 if (chan_data->direction == MEMORY_TO_PERIPHERAL)
396 dma_nxp_sdma_produce(chan_data, size);
397 else
398 dma_nxp_sdma_consume(chan_data, size);
399
400 return 0;
401 }
402
dma_nxp_sdma_get_attribute(const struct device * dev,uint32_t type,uint32_t * val)403 static int dma_nxp_sdma_get_attribute(const struct device *dev, uint32_t type, uint32_t *val)
404 {
405 switch (type) {
406 case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
407 *val = 4;
408 break;
409 case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
410 *val = 128; /* should be dcache_align */
411 break;
412 case DMA_ATTR_MAX_BLOCK_COUNT:
413 *val = DMA_NXP_SDMA_BD_COUNT;
414 break;
415 default:
416 LOG_ERR("invalid attribute type: %d", type);
417 return -EINVAL;
418 }
419 return 0;
420 }
421
sdma_channel_filter(const struct device * dev,int chan_id,void * param)422 static bool sdma_channel_filter(const struct device *dev, int chan_id, void *param)
423 {
424 struct sdma_dev_data *dev_data = dev->data;
425
426 /* chan 0 is reserved for boot channel */
427 if (chan_id == 0)
428 return false;
429
430 if (chan_id >= FSL_FEATURE_SDMA_MODULE_CHANNEL)
431 return false;
432
433 dev_data->chan[chan_id].event_source = *((int *)param);
434 dev_data->chan[chan_id].index = chan_id;
435
436 return true;
437 }
438
439 static DEVICE_API(dma, sdma_api) = {
440 .reload = dma_nxp_sdma_reload,
441 .config = dma_nxp_sdma_config,
442 .start = dma_nxp_sdma_start,
443 .stop = dma_nxp_sdma_stop,
444 .suspend = dma_nxp_sdma_stop,
445 .resume = dma_nxp_sdma_start,
446 .get_status = dma_nxp_sdma_get_status,
447 .get_attribute = dma_nxp_sdma_get_attribute,
448 .chan_filter = sdma_channel_filter,
449 };
450
dma_nxp_sdma_init(const struct device * dev)451 static int dma_nxp_sdma_init(const struct device *dev)
452 {
453 struct sdma_dev_data *data = dev->data;
454 const struct sdma_dev_cfg *cfg = dev->config;
455 sdma_config_t defconfig;
456
457 data->dma_ctx.magic = DMA_MAGIC;
458 data->dma_ctx.dma_channels = FSL_FEATURE_SDMA_MODULE_CHANNEL;
459 data->dma_ctx.atomic = data->channels_atomic;
460
461 SDMA_GetDefaultConfig(&defconfig);
462 defconfig.ratio = kSDMA_ARMClockFreq;
463
464 SDMA_Init(cfg->base, &defconfig);
465
466 /* configure interrupts */
467 cfg->irq_config();
468
469 return 0;
470 }
471
472 #define DMA_NXP_SDMA_INIT(inst) \
473 static ATOMIC_DEFINE(dma_nxp_sdma_channels_atomic_##inst, \
474 FSL_FEATURE_SDMA_MODULE_CHANNEL); \
475 static struct sdma_dev_data sdma_data_##inst = { \
476 .channels_atomic = dma_nxp_sdma_channels_atomic_##inst, \
477 }; \
478 static void dma_nxp_sdma_##inst_irq_config(void); \
479 static const struct sdma_dev_cfg sdma_cfg_##inst = { \
480 .base = (SDMAARM_Type *)DT_INST_REG_ADDR(inst), \
481 .irq_config = dma_nxp_sdma_##inst_irq_config, \
482 }; \
483 static void dma_nxp_sdma_##inst_irq_config(void) \
484 { \
485 IRQ_CONNECT(DT_INST_IRQN(inst), \
486 DT_INST_IRQ_(inst, priority), \
487 dma_nxp_sdma_isr, DEVICE_DT_INST_GET(inst), 0); \
488 irq_enable(DT_INST_IRQN(inst)); \
489 } \
490 DEVICE_DT_INST_DEFINE(inst, &dma_nxp_sdma_init, NULL, \
491 &sdma_data_##inst, &sdma_cfg_##inst, \
492 PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \
493 &sdma_api); \
494
495 DT_INST_FOREACH_STATUS_OKAY(DMA_NXP_SDMA_INIT);
496