1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/device.h>
7 #include <zephyr/drivers/dma.h>
8 #include <zephyr/irq.h>
9 #include <zephyr/cache.h>
10 #include <zephyr/logging/log.h>
11 #include "fsl_sdma.h"
12
13 LOG_MODULE_REGISTER(nxp_sdma);
14
15 #define DMA_NXP_SDMA_BD_COUNT 2
16 #define DMA_NXP_SDMA_CHAN_DEFAULT_PRIO 4
17
18 #define DT_DRV_COMPAT nxp_sdma
19
20 AT_NONCACHEABLE_SECTION_ALIGN(static sdma_context_data_t
21 sdma_contexts[FSL_FEATURE_SDMA_MODULE_CHANNEL], 4);
22
23 struct sdma_dev_cfg {
24 SDMAARM_Type *base;
25 void (*irq_config)(void);
26 };
27
28 struct sdma_channel_data {
29 sdma_handle_t handle;
30 sdma_transfer_config_t transfer_cfg;
31 sdma_peripheral_t peripheral;
32 uint32_t direction;
33 uint32_t index;
34 const struct device *dev;
35 sdma_buffer_descriptor_t *bd_pool; /*pre-allocated list of BD used for transfer */
36 uint32_t bd_count; /* number of bd */
37 uint32_t capacity; /* total transfer capacity for this channel */
38 struct dma_config *dma_cfg;
39 uint32_t event_source; /* DMA REQ number that trigger this channel */
40 struct dma_status stat;
41
42 void *arg; /* argument passed to user-defined DMA callback */
43 dma_callback_t cb; /* user-defined callback for DMA transfer completion */
44 };
45
46 struct sdma_dev_data {
47 struct dma_context dma_ctx;
48 atomic_t *channels_atomic;
49 struct sdma_channel_data chan[FSL_FEATURE_SDMA_MODULE_CHANNEL];
50 sdma_buffer_descriptor_t bd_pool[FSL_FEATURE_SDMA_MODULE_CHANNEL][DMA_NXP_SDMA_BD_COUNT]
51 __aligned(64);
52 };
53
dma_nxp_sdma_init_stat(struct sdma_channel_data * chan_data)54 static int dma_nxp_sdma_init_stat(struct sdma_channel_data *chan_data)
55 {
56 chan_data->stat.read_position = 0;
57 chan_data->stat.write_position = 0;
58
59 switch (chan_data->direction) {
60 case MEMORY_TO_PERIPHERAL:
61 /* buffer is full */
62 chan_data->stat.pending_length = chan_data->capacity;
63 chan_data->stat.free = 0;
64 break;
65 case PERIPHERAL_TO_MEMORY:
66 /* buffer is empty */
67 chan_data->stat.pending_length = 0;
68 chan_data->stat.free = chan_data->capacity;
69 break;
70 default:
71 return -EINVAL;
72 }
73
74 return 0;
75 }
76
dma_nxp_sdma_consume(struct sdma_channel_data * chan_data,uint32_t bytes)77 static int dma_nxp_sdma_consume(struct sdma_channel_data *chan_data, uint32_t bytes)
78 {
79 if (bytes > chan_data->stat.pending_length)
80 return -EINVAL;
81
82 chan_data->stat.read_position += bytes;
83 chan_data->stat.read_position %= chan_data->capacity;
84
85 if (chan_data->stat.read_position > chan_data->stat.write_position)
86 chan_data->stat.free = chan_data->stat.read_position -
87 chan_data->stat.write_position;
88 else
89 chan_data->stat.free = chan_data->capacity -
90 (chan_data->stat.write_position - chan_data->stat.read_position);
91
92 chan_data->stat.pending_length = chan_data->capacity - chan_data->stat.free;
93
94 return 0;
95 }
96
dma_nxp_sdma_produce(struct sdma_channel_data * chan_data,uint32_t bytes)97 static int dma_nxp_sdma_produce(struct sdma_channel_data *chan_data, uint32_t bytes)
98 {
99 if (bytes > chan_data->stat.free)
100 return -EINVAL;
101
102 chan_data->stat.write_position += bytes;
103 chan_data->stat.write_position %= chan_data->capacity;
104
105 if (chan_data->stat.write_position > chan_data->stat.read_position)
106 chan_data->stat.pending_length = chan_data->stat.write_position -
107 chan_data->stat.read_position;
108 else
109 chan_data->stat.pending_length = chan_data->capacity -
110 (chan_data->stat.read_position - chan_data->stat.write_position);
111
112 chan_data->stat.free = chan_data->capacity - chan_data->stat.pending_length;
113
114 return 0;
115 }
116
dma_nxp_sdma_isr(const void * data)117 static void dma_nxp_sdma_isr(const void *data)
118 {
119 uint32_t val;
120 uint32_t i = 1;
121 struct sdma_channel_data *chan_data;
122 struct device *dev = (struct device *)data;
123 struct sdma_dev_data *dev_data = dev->data;
124 const struct sdma_dev_cfg *dev_cfg = dev->config;
125
126 /* Clear channel 0 */
127 SDMA_ClearChannelInterruptStatus(dev_cfg->base, 1U);
128
129 /* Ignore channel 0, is used only for download */
130 val = SDMA_GetChannelInterruptStatus(dev_cfg->base) >> 1U;
131 while (val) {
132 if ((val & 0x1) != 0) {
133 chan_data = &dev_data->chan[i];
134 SDMA_ClearChannelInterruptStatus(dev_cfg->base, 1 << i);
135 SDMA_HandleIRQ(&chan_data->handle);
136
137 if (chan_data->cb)
138 chan_data->cb(chan_data->dev, chan_data->arg, i, DMA_STATUS_BLOCK);
139 }
140 i++;
141 val >>= 1;
142 }
143 }
144
sdma_set_transfer_type(struct dma_config * config,sdma_transfer_type_t * type)145 void sdma_set_transfer_type(struct dma_config *config, sdma_transfer_type_t *type)
146 {
147 switch (config->channel_direction) {
148 case MEMORY_TO_MEMORY:
149 *type = kSDMA_MemoryToMemory;
150 break;
151 case MEMORY_TO_PERIPHERAL:
152 *type = kSDMA_MemoryToPeripheral;
153 break;
154 case PERIPHERAL_TO_MEMORY:
155 *type = kSDMA_PeripheralToMemory;
156 break;
157 case PERIPHERAL_TO_PERIPHERAL:
158 *type = kSDMA_PeripheralToPeripheral;
159 break;
160 default:
161 LOG_ERR("%s: channel direction not supported %d", __func__,
162 config->channel_direction);
163 return;
164 }
165 LOG_DBG("%s: dir %d type = %d", __func__, config->channel_direction, *type);
166 }
167
sdma_set_peripheral_type(struct dma_config * config,sdma_peripheral_t * type)168 int sdma_set_peripheral_type(struct dma_config *config, sdma_peripheral_t *type)
169 {
170 switch (config->dma_slot) {
171 case kSDMA_PeripheralNormal_SP:
172 case kSDMA_PeripheralMultiFifoPDM:
173 *type = config->dma_slot;
174 break;
175 default:
176 return -EINVAL;
177 }
178
179 return 0;
180 }
181
dma_nxp_sdma_callback(sdma_handle_t * handle,void * userData,bool TransferDone,uint32_t bdIndex)182 void dma_nxp_sdma_callback(sdma_handle_t *handle, void *userData, bool TransferDone,
183 uint32_t bdIndex)
184 {
185 const struct sdma_dev_cfg *dev_cfg;
186 struct sdma_channel_data *chan_data = userData;
187 sdma_buffer_descriptor_t *bd;
188 int xfer_size;
189
190 dev_cfg = chan_data->dev->config;
191
192 xfer_size = chan_data->capacity / chan_data->bd_count;
193
194 switch (chan_data->direction) {
195 case MEMORY_TO_PERIPHERAL:
196 dma_nxp_sdma_consume(chan_data, xfer_size);
197 break;
198 case PERIPHERAL_TO_MEMORY:
199 dma_nxp_sdma_produce(chan_data, xfer_size);
200 break;
201 default:
202 break;
203 }
204
205 bd = &chan_data->bd_pool[bdIndex];
206 bd->status |= (uint8_t)kSDMA_BDStatusDone;
207
208 SDMA_StartChannelSoftware(dev_cfg->base, chan_data->index);
209 }
210
dma_nxp_sdma_channel_init(const struct device * dev,uint32_t channel)211 static int dma_nxp_sdma_channel_init(const struct device *dev, uint32_t channel)
212 {
213 const struct sdma_dev_cfg *dev_cfg = dev->config;
214 struct sdma_dev_data *dev_data = dev->data;
215 struct sdma_channel_data *chan_data;
216
217 chan_data = &dev_data->chan[channel];
218 SDMA_CreateHandle(&chan_data->handle, dev_cfg->base, channel, &sdma_contexts[channel]);
219
220 SDMA_SetCallback(&chan_data->handle, dma_nxp_sdma_callback, chan_data);
221
222 return 0;
223 }
224
dma_nxp_sdma_setup_bd(const struct device * dev,uint32_t channel,struct dma_config * config)225 static void dma_nxp_sdma_setup_bd(const struct device *dev, uint32_t channel,
226 struct dma_config *config)
227 {
228 struct sdma_dev_data *dev_data = dev->data;
229 struct sdma_channel_data *chan_data;
230 sdma_buffer_descriptor_t *crt_bd;
231 struct dma_block_config *block_cfg;
232 int i;
233
234 chan_data = &dev_data->chan[channel];
235
236 /* initialize bd pool */
237 chan_data->bd_pool = &dev_data->bd_pool[channel][0];
238 chan_data->bd_count = config->block_count;
239
240 memset(chan_data->bd_pool, 0, sizeof(sdma_buffer_descriptor_t) * chan_data->bd_count);
241 SDMA_InstallBDMemory(&chan_data->handle, chan_data->bd_pool, chan_data->bd_count);
242
243 crt_bd = chan_data->bd_pool;
244 block_cfg = config->head_block;
245
246 for (i = 0; i < config->block_count; i++) {
247 bool is_last = false;
248 bool is_wrap = false;
249
250 if (i == config->block_count - 1) {
251 is_last = true;
252 is_wrap = true;
253 }
254
255 SDMA_ConfigBufferDescriptor(crt_bd,
256 block_cfg->source_address, block_cfg->dest_address,
257 config->source_data_size, block_cfg->block_size,
258 is_last, true, is_wrap, chan_data->transfer_cfg.type);
259
260 chan_data->capacity += block_cfg->block_size;
261 block_cfg = block_cfg->next_block;
262 crt_bd++;
263 }
264 }
265
dma_nxp_sdma_config(const struct device * dev,uint32_t channel,struct dma_config * config)266 static int dma_nxp_sdma_config(const struct device *dev, uint32_t channel,
267 struct dma_config *config)
268 {
269 struct sdma_dev_data *dev_data = dev->data;
270 struct sdma_channel_data *chan_data;
271 struct dma_block_config *block_cfg;
272 int ret;
273
274 if (channel >= FSL_FEATURE_SDMA_MODULE_CHANNEL) {
275 LOG_ERR("sdma_config() invalid channel %d", channel);
276 return -EINVAL;
277 }
278
279 dma_nxp_sdma_channel_init(dev, channel);
280
281 chan_data = &dev_data->chan[channel];
282 chan_data->dev = dev;
283 chan_data->direction = config->channel_direction;
284
285 chan_data->cb = config->dma_callback;
286 chan_data->arg = config->user_data;
287
288 sdma_set_transfer_type(config, &chan_data->transfer_cfg.type);
289
290 ret = sdma_set_peripheral_type(config, &chan_data->peripheral);
291 if (ret < 0) {
292 LOG_ERR("%s: failed to set peripheral type", __func__);
293 return ret;
294 }
295
296 dma_nxp_sdma_setup_bd(dev, channel, config);
297 ret = dma_nxp_sdma_init_stat(chan_data);
298 if (ret < 0) {
299 LOG_ERR("%s: failed to init stat", __func__);
300 return ret;
301 }
302
303 block_cfg = config->head_block;
304
305 /* prepare first block for transfer ...*/
306 SDMA_PrepareTransfer(&chan_data->transfer_cfg,
307 block_cfg->source_address,
308 block_cfg->dest_address,
309 config->source_data_size, config->dest_data_size,
310 /* watermark = */64,
311 block_cfg->block_size, chan_data->event_source,
312 chan_data->peripheral, chan_data->transfer_cfg.type);
313
314 /*... and submit it to SDMA engine.
315 * Note that SDMA transfer is later manually started by the dma_nxp_sdma_start()
316 */
317 chan_data->transfer_cfg.isEventIgnore = false;
318 chan_data->transfer_cfg.isSoftTriggerIgnore = false;
319 SDMA_SubmitTransfer(&chan_data->handle, &chan_data->transfer_cfg);
320
321 return 0;
322 }
323
dma_nxp_sdma_start(const struct device * dev,uint32_t channel)324 static int dma_nxp_sdma_start(const struct device *dev, uint32_t channel)
325 {
326 const struct sdma_dev_cfg *dev_cfg = dev->config;
327 struct sdma_dev_data *dev_data = dev->data;
328 struct sdma_channel_data *chan_data;
329
330 if (channel >= FSL_FEATURE_SDMA_MODULE_CHANNEL) {
331 LOG_ERR("%s: invalid channel %d", __func__, channel);
332 return -EINVAL;
333 }
334
335 chan_data = &dev_data->chan[channel];
336
337 SDMA_SetChannelPriority(dev_cfg->base, channel, DMA_NXP_SDMA_CHAN_DEFAULT_PRIO);
338 SDMA_StartChannelSoftware(dev_cfg->base, channel);
339
340 return 0;
341 }
342
dma_nxp_sdma_stop(const struct device * dev,uint32_t channel)343 static int dma_nxp_sdma_stop(const struct device *dev, uint32_t channel)
344 {
345 struct sdma_dev_data *dev_data = dev->data;
346 struct sdma_channel_data *chan_data;
347
348 if (channel >= FSL_FEATURE_SDMA_MODULE_CHANNEL) {
349 LOG_ERR("%s: invalid channel %d", __func__, channel);
350 return -EINVAL;
351 }
352
353 chan_data = &dev_data->chan[channel];
354
355 SDMA_StopTransfer(&chan_data->handle);
356 return 0;
357 }
358
dma_nxp_sdma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)359 static int dma_nxp_sdma_get_status(const struct device *dev, uint32_t channel,
360 struct dma_status *stat)
361 {
362 struct sdma_dev_data *dev_data = dev->data;
363 struct sdma_channel_data *chan_data;
364
365 chan_data = &dev_data->chan[channel];
366
367 stat->free = chan_data->stat.free;
368 stat->pending_length = chan_data->stat.pending_length;
369
370 return 0;
371 }
372
dma_nxp_sdma_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)373 static int dma_nxp_sdma_reload(const struct device *dev, uint32_t channel, uint32_t src,
374 uint32_t dst, size_t size)
375 {
376 struct sdma_dev_data *dev_data = dev->data;
377 struct sdma_channel_data *chan_data;
378
379 chan_data = &dev_data->chan[channel];
380
381 if (!size)
382 return 0;
383
384 if (chan_data->direction == MEMORY_TO_PERIPHERAL)
385 dma_nxp_sdma_produce(chan_data, size);
386 else
387 dma_nxp_sdma_consume(chan_data, size);
388
389 return 0;
390 }
391
dma_nxp_sdma_get_attribute(const struct device * dev,uint32_t type,uint32_t * val)392 static int dma_nxp_sdma_get_attribute(const struct device *dev, uint32_t type, uint32_t *val)
393 {
394 switch (type) {
395 case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
396 *val = 4;
397 break;
398 case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
399 *val = 128; /* should be dcache_align */
400 break;
401 case DMA_ATTR_MAX_BLOCK_COUNT:
402 *val = DMA_NXP_SDMA_BD_COUNT;
403 break;
404 default:
405 LOG_ERR("invalid attribute type: %d", type);
406 return -EINVAL;
407 }
408 return 0;
409 }
410
sdma_channel_filter(const struct device * dev,int chan_id,void * param)411 static bool sdma_channel_filter(const struct device *dev, int chan_id, void *param)
412 {
413 struct sdma_dev_data *dev_data = dev->data;
414
415 /* chan 0 is reserved for boot channel */
416 if (chan_id == 0)
417 return false;
418
419 if (chan_id >= FSL_FEATURE_SDMA_MODULE_CHANNEL)
420 return false;
421
422 dev_data->chan[chan_id].event_source = *((int *)param);
423 dev_data->chan[chan_id].index = chan_id;
424
425 return true;
426 }
427
428 static DEVICE_API(dma, sdma_api) = {
429 .reload = dma_nxp_sdma_reload,
430 .config = dma_nxp_sdma_config,
431 .start = dma_nxp_sdma_start,
432 .stop = dma_nxp_sdma_stop,
433 .suspend = dma_nxp_sdma_stop,
434 .resume = dma_nxp_sdma_start,
435 .get_status = dma_nxp_sdma_get_status,
436 .get_attribute = dma_nxp_sdma_get_attribute,
437 .chan_filter = sdma_channel_filter,
438 };
439
dma_nxp_sdma_init(const struct device * dev)440 static int dma_nxp_sdma_init(const struct device *dev)
441 {
442 struct sdma_dev_data *data = dev->data;
443 const struct sdma_dev_cfg *cfg = dev->config;
444 sdma_config_t defconfig;
445
446 data->dma_ctx.magic = DMA_MAGIC;
447 data->dma_ctx.dma_channels = FSL_FEATURE_SDMA_MODULE_CHANNEL;
448 data->dma_ctx.atomic = data->channels_atomic;
449
450 SDMA_GetDefaultConfig(&defconfig);
451 defconfig.ratio = kSDMA_ARMClockFreq;
452
453 SDMA_Init(cfg->base, &defconfig);
454
455 /* configure interrupts */
456 cfg->irq_config();
457
458 return 0;
459 }
460
461 #define DMA_NXP_SDMA_INIT(inst) \
462 static ATOMIC_DEFINE(dma_nxp_sdma_channels_atomic_##inst, \
463 FSL_FEATURE_SDMA_MODULE_CHANNEL); \
464 static struct sdma_dev_data sdma_data_##inst = { \
465 .channels_atomic = dma_nxp_sdma_channels_atomic_##inst, \
466 }; \
467 static void dma_nxp_sdma_##inst_irq_config(void); \
468 static const struct sdma_dev_cfg sdma_cfg_##inst = { \
469 .base = (SDMAARM_Type *)DT_INST_REG_ADDR(inst), \
470 .irq_config = dma_nxp_sdma_##inst_irq_config, \
471 }; \
472 static void dma_nxp_sdma_##inst_irq_config(void) \
473 { \
474 IRQ_CONNECT(DT_INST_IRQN(inst), \
475 DT_INST_IRQ_(inst, priority), \
476 dma_nxp_sdma_isr, DEVICE_DT_INST_GET(inst), 0); \
477 irq_enable(DT_INST_IRQN(inst)); \
478 } \
479 DEVICE_DT_INST_DEFINE(inst, &dma_nxp_sdma_init, NULL, \
480 &sdma_data_##inst, &sdma_cfg_##inst, \
481 PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \
482 &sdma_api); \
483
484 DT_INST_FOREACH_STATUS_OKAY(DMA_NXP_SDMA_INIT);
485