1 /*
2 * Copyright (c) 2017 comsuisse AG
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT atmel_sam_xdmac
8
9 /** @file
10 * @brief Atmel SAM MCU family Direct Memory Access (XDMAC) driver.
11 */
12
13 #include <errno.h>
14 #include <zephyr/sys/__assert.h>
15 #include <zephyr/device.h>
16 #include <zephyr/init.h>
17 #include <string.h>
18 #include <soc.h>
19 #include <zephyr/drivers/dma.h>
20 #include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
21 #include "dma_sam_xdmac.h"
22
23 #define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
24 #include <zephyr/logging/log.h>
25 #include <zephyr/irq.h>
26 LOG_MODULE_REGISTER(dma_sam_xdmac);
27
28 #define XDMAC_INT_ERR (XDMAC_CIE_RBIE | XDMAC_CIE_WBIE | XDMAC_CIE_ROIE)
29 #define DMA_CHANNELS_NO XDMACCHID_NUMBER
30
31 /* DMA channel configuration */
32 struct sam_xdmac_channel_cfg {
33 void *user_data;
34 dma_callback_t callback;
35 uint32_t data_size;
36 };
37
38 /* Device constant configuration parameters */
39 struct sam_xdmac_dev_cfg {
40 Xdmac *regs;
41 void (*irq_config)(void);
42 const struct atmel_sam_pmc_config clock_cfg;
43 uint8_t irq_id;
44 };
45
46 /* Device run time data */
47 struct sam_xdmac_dev_data {
48 struct sam_xdmac_channel_cfg dma_channels[DMA_CHANNELS_NO];
49 };
50
sam_xdmac_isr(const struct device * dev)51 static void sam_xdmac_isr(const struct device *dev)
52 {
53 const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config;
54 struct sam_xdmac_dev_data *const dev_data = dev->data;
55
56 Xdmac * const xdmac = dev_cfg->regs;
57 struct sam_xdmac_channel_cfg *channel_cfg;
58 uint32_t isr_status;
59 uint32_t err;
60
61 /* Get global interrupt status */
62 isr_status = xdmac->XDMAC_GIS;
63
64 for (int channel = 0; channel < DMA_CHANNELS_NO; channel++) {
65 if (!(isr_status & (1 << channel))) {
66 continue;
67 }
68
69 channel_cfg = &dev_data->dma_channels[channel];
70
71 /* Get channel errors */
72 err = xdmac->XDMAC_CHID[channel].XDMAC_CIS & XDMAC_INT_ERR;
73
74 /* Execute callback */
75 if (channel_cfg->callback) {
76 channel_cfg->callback(dev, channel_cfg->user_data,
77 channel, err);
78 }
79 }
80 }
81
sam_xdmac_channel_configure(const struct device * dev,uint32_t channel,struct sam_xdmac_channel_config * param)82 int sam_xdmac_channel_configure(const struct device *dev, uint32_t channel,
83 struct sam_xdmac_channel_config *param)
84 {
85 const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config;
86
87 Xdmac * const xdmac = dev_cfg->regs;
88
89 if (channel >= DMA_CHANNELS_NO) {
90 return -EINVAL;
91 }
92
93 /* Check if the channel is enabled */
94 if (xdmac->XDMAC_GS & (XDMAC_GS_ST0 << channel)) {
95 return -EBUSY;
96 }
97
98 /* Disable all channel interrupts */
99 xdmac->XDMAC_CHID[channel].XDMAC_CID = 0xFF;
100 /* Clear pending Interrupt Status bit(s) */
101 (void)xdmac->XDMAC_CHID[channel].XDMAC_CIS;
102
103 /* NOTE:
104 * Setting channel configuration is not required for linked list view 2
105 * to 3 modes. It is done anyway to keep the code simple. It has no
106 * negative impact on the DMA functionality.
107 */
108
109 /* Set channel configuration */
110 xdmac->XDMAC_CHID[channel].XDMAC_CC = param->cfg;
111
112 /* Set data stride memory pattern */
113 xdmac->XDMAC_CHID[channel].XDMAC_CDS_MSP = param->ds_msp;
114 /* Set source microblock stride */
115 xdmac->XDMAC_CHID[channel].XDMAC_CSUS = param->sus;
116 /* Set destination microblock stride */
117 xdmac->XDMAC_CHID[channel].XDMAC_CDUS = param->dus;
118
119 /* Enable selected channel interrupts */
120 xdmac->XDMAC_CHID[channel].XDMAC_CIE = param->cie;
121
122 return 0;
123 }
124
sam_xdmac_transfer_configure(const struct device * dev,uint32_t channel,struct sam_xdmac_transfer_config * param)125 int sam_xdmac_transfer_configure(const struct device *dev, uint32_t channel,
126 struct sam_xdmac_transfer_config *param)
127 {
128 const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config;
129
130 Xdmac * const xdmac = dev_cfg->regs;
131
132 if (channel >= DMA_CHANNELS_NO) {
133 return -EINVAL;
134 }
135
136 /* Check if the channel is enabled */
137 if (xdmac->XDMAC_GS & (XDMAC_GS_ST0 << channel)) {
138 return -EBUSY;
139 }
140
141 /* NOTE:
142 * Setting source, destination address is not required for linked list
143 * view 1 to 3 modes. It is done anyway to keep the code simple. It has
144 * no negative impact on the DMA functionality.
145 */
146
147 /* Set source address */
148 xdmac->XDMAC_CHID[channel].XDMAC_CSA = param->sa;
149 /* Set destination address */
150 xdmac->XDMAC_CHID[channel].XDMAC_CDA = param->da;
151
152 if ((param->ndc & XDMAC_CNDC_NDE) == XDMAC_CNDC_NDE_DSCR_FETCH_DIS) {
153 /*
154 * Linked List is disabled, configure additional transfer
155 * parameters.
156 */
157
158 /* Set length of data in the microblock */
159 xdmac->XDMAC_CHID[channel].XDMAC_CUBC = param->ublen;
160 /* Set block length: block length is (blen+1) microblocks */
161 xdmac->XDMAC_CHID[channel].XDMAC_CBC = param->blen;
162 } else {
163 /*
164 * Linked List is enabled, configure additional transfer
165 * parameters.
166 */
167
168 /* Set next descriptor address */
169 xdmac->XDMAC_CHID[channel].XDMAC_CNDA = param->nda;
170 }
171
172 /* Set next descriptor configuration */
173 xdmac->XDMAC_CHID[channel].XDMAC_CNDC = param->ndc;
174
175 return 0;
176 }
177
sam_xdmac_config(const struct device * dev,uint32_t channel,struct dma_config * cfg)178 static int sam_xdmac_config(const struct device *dev, uint32_t channel,
179 struct dma_config *cfg)
180 {
181 struct sam_xdmac_dev_data *const dev_data = dev->data;
182 struct sam_xdmac_channel_config channel_cfg;
183 struct sam_xdmac_transfer_config transfer_cfg;
184 uint32_t burst_size;
185 uint32_t data_size;
186 int ret;
187
188 if (channel >= DMA_CHANNELS_NO) {
189 return -EINVAL;
190 }
191
192 __ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size);
193 __ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length);
194
195 if (cfg->source_data_size != 1U && cfg->source_data_size != 2U &&
196 cfg->source_data_size != 4U) {
197 LOG_ERR("Invalid 'source_data_size' value");
198 return -EINVAL;
199 }
200
201 if (cfg->block_count != 1U) {
202 LOG_ERR("Only single block transfer is currently supported."
203 " Please submit a patch.");
204 return -EINVAL;
205 }
206
207 burst_size = find_msb_set(cfg->source_burst_length) - 1;
208 LOG_DBG("burst_size=%d", burst_size);
209 data_size = find_msb_set(cfg->source_data_size) - 1;
210 dev_data->dma_channels[channel].data_size = data_size;
211 LOG_DBG("data_size=%d", data_size);
212
213 uint32_t xdmac_inc_cfg = 0;
214
215 if (cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT
216 && cfg->channel_direction == MEMORY_TO_PERIPHERAL) {
217 xdmac_inc_cfg |= XDMAC_CC_SAM_INCREMENTED_AM;
218 } else {
219 xdmac_inc_cfg |= XDMAC_CC_SAM_FIXED_AM;
220 }
221
222 if (cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT
223 && cfg->channel_direction == PERIPHERAL_TO_MEMORY) {
224 xdmac_inc_cfg |= XDMAC_CC_DAM_INCREMENTED_AM;
225 } else {
226 xdmac_inc_cfg |= XDMAC_CC_DAM_FIXED_AM;
227 }
228
229 switch (cfg->channel_direction) {
230 case MEMORY_TO_MEMORY:
231 channel_cfg.cfg =
232 XDMAC_CC_TYPE_MEM_TRAN
233 | XDMAC_CC_MBSIZE(burst_size == 0U ? 0 : burst_size - 1)
234 | XDMAC_CC_SAM_INCREMENTED_AM
235 | XDMAC_CC_DAM_INCREMENTED_AM;
236 break;
237 case MEMORY_TO_PERIPHERAL:
238 channel_cfg.cfg =
239 XDMAC_CC_TYPE_PER_TRAN
240 | XDMAC_CC_CSIZE(burst_size)
241 | XDMAC_CC_DSYNC_MEM2PER
242 | xdmac_inc_cfg;
243 break;
244 case PERIPHERAL_TO_MEMORY:
245 channel_cfg.cfg =
246 XDMAC_CC_TYPE_PER_TRAN
247 | XDMAC_CC_CSIZE(burst_size)
248 | XDMAC_CC_DSYNC_PER2MEM
249 | xdmac_inc_cfg;
250 break;
251 default:
252 LOG_ERR("'channel_direction' value %d is not supported",
253 cfg->channel_direction);
254 return -EINVAL;
255 }
256
257 channel_cfg.cfg |=
258 XDMAC_CC_DWIDTH(data_size)
259 | XDMAC_CC_SIF_AHB_IF1
260 | XDMAC_CC_DIF_AHB_IF1
261 | XDMAC_CC_PERID(cfg->dma_slot);
262 channel_cfg.ds_msp = 0U;
263 channel_cfg.sus = 0U;
264 channel_cfg.dus = 0U;
265 channel_cfg.cie =
266 (cfg->complete_callback_en ? XDMAC_CIE_BIE : XDMAC_CIE_LIE)
267 | (cfg->error_callback_dis ? 0 : XDMAC_INT_ERR);
268
269 ret = sam_xdmac_channel_configure(dev, channel, &channel_cfg);
270 if (ret < 0) {
271 return ret;
272 }
273
274 dev_data->dma_channels[channel].callback = cfg->dma_callback;
275 dev_data->dma_channels[channel].user_data = cfg->user_data;
276
277 (void)memset(&transfer_cfg, 0, sizeof(transfer_cfg));
278 transfer_cfg.sa = cfg->head_block->source_address;
279 transfer_cfg.da = cfg->head_block->dest_address;
280 transfer_cfg.ublen = cfg->head_block->block_size >> data_size;
281
282 ret = sam_xdmac_transfer_configure(dev, channel, &transfer_cfg);
283
284 return ret;
285 }
286
sam_xdmac_transfer_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)287 static int sam_xdmac_transfer_reload(const struct device *dev, uint32_t channel,
288 uint32_t src, uint32_t dst, size_t size)
289 {
290 struct sam_xdmac_dev_data *const dev_data = dev->data;
291 struct sam_xdmac_transfer_config transfer_cfg = {
292 .sa = src,
293 .da = dst,
294 .ublen = size >> dev_data->dma_channels[channel].data_size,
295 };
296
297 return sam_xdmac_transfer_configure(dev, channel, &transfer_cfg);
298 }
299
sam_xdmac_transfer_start(const struct device * dev,uint32_t channel)300 int sam_xdmac_transfer_start(const struct device *dev, uint32_t channel)
301 {
302 const struct sam_xdmac_dev_cfg *config = dev->config;
303
304 Xdmac * const xdmac = config->regs;
305
306 if (channel >= DMA_CHANNELS_NO) {
307 LOG_DBG("Channel %d out of range", channel);
308 return -EINVAL;
309 }
310
311 /* Check if the channel is enabled */
312 if (xdmac->XDMAC_GS & (XDMAC_GS_ST0 << channel)) {
313 LOG_DBG("Channel %d already enabled", channel);
314 return -EBUSY;
315 }
316
317 /* Enable channel interrupt */
318 xdmac->XDMAC_GIE = XDMAC_GIE_IE0 << channel;
319 /* Enable channel */
320 xdmac->XDMAC_GE = XDMAC_GE_EN0 << channel;
321
322 return 0;
323 }
324
sam_xdmac_transfer_stop(const struct device * dev,uint32_t channel)325 int sam_xdmac_transfer_stop(const struct device *dev, uint32_t channel)
326 {
327 const struct sam_xdmac_dev_cfg *config = dev->config;
328
329 Xdmac * const xdmac = config->regs;
330
331 if (channel >= DMA_CHANNELS_NO) {
332 return -EINVAL;
333 }
334
335 /* Check if the channel is enabled */
336 if (!(xdmac->XDMAC_GS & (XDMAC_GS_ST0 << channel))) {
337 return 0;
338 }
339
340 /* Disable channel */
341 xdmac->XDMAC_GD = XDMAC_GD_DI0 << channel;
342 /* Disable channel interrupt */
343 xdmac->XDMAC_GID = XDMAC_GID_ID0 << channel;
344 /* Disable all channel interrupts */
345 xdmac->XDMAC_CHID[channel].XDMAC_CID = 0xFF;
346 /* Clear the pending Interrupt Status bit(s) */
347 (void)xdmac->XDMAC_CHID[channel].XDMAC_CIS;
348
349 return 0;
350 }
351
sam_xdmac_initialize(const struct device * dev)352 static int sam_xdmac_initialize(const struct device *dev)
353 {
354 const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config;
355
356 Xdmac * const xdmac = dev_cfg->regs;
357
358 /* Configure interrupts */
359 dev_cfg->irq_config();
360
361 /* Enable XDMAC clock in PMC */
362 (void)clock_control_on(SAM_DT_PMC_CONTROLLER,
363 (clock_control_subsys_t)&dev_cfg->clock_cfg);
364
365 /* Disable all channels */
366 xdmac->XDMAC_GD = UINT32_MAX;
367 /* Disable all channel interrupts */
368 xdmac->XDMAC_GID = UINT32_MAX;
369
370 /* Enable module's IRQ */
371 irq_enable(dev_cfg->irq_id);
372
373 LOG_INF("Device %s initialized", dev->name);
374
375 return 0;
376 }
377
sam_xdmac_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)378 static int sam_xdmac_get_status(const struct device *dev, uint32_t channel,
379 struct dma_status *status)
380 {
381 const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config;
382
383 Xdmac * const xdmac = dev_cfg->regs;
384 uint32_t chan_cfg = xdmac->XDMAC_CHID[channel].XDMAC_CC;
385 uint32_t ublen = xdmac->XDMAC_CHID[channel].XDMAC_CUBC;
386
387 /* we need to check some of the XDMAC_CC registers to determine the DMA direction */
388 if ((chan_cfg & XDMAC_CC_TYPE_Msk) == 0) {
389 status->dir = MEMORY_TO_MEMORY;
390 } else if ((chan_cfg & XDMAC_CC_DSYNC_Msk) == XDMAC_CC_DSYNC_MEM2PER) {
391 status->dir = MEMORY_TO_PERIPHERAL;
392 } else {
393 status->dir = PERIPHERAL_TO_MEMORY;
394 }
395
396 status->busy = ((chan_cfg & XDMAC_CC_INITD_Msk) != 0) || (ublen > 0);
397 status->pending_length = ublen;
398
399 return 0;
400 }
401
402 static DEVICE_API(dma, sam_xdmac_driver_api) = {
403 .config = sam_xdmac_config,
404 .reload = sam_xdmac_transfer_reload,
405 .start = sam_xdmac_transfer_start,
406 .stop = sam_xdmac_transfer_stop,
407 .get_status = sam_xdmac_get_status,
408 };
409
410 /* DMA0 */
411
dma0_sam_irq_config(void)412 static void dma0_sam_irq_config(void)
413 {
414 IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), sam_xdmac_isr,
415 DEVICE_DT_INST_GET(0), 0);
416 }
417
418 static const struct sam_xdmac_dev_cfg dma0_sam_config = {
419 .regs = (Xdmac *)DT_INST_REG_ADDR(0),
420 .irq_config = dma0_sam_irq_config,
421 .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0),
422 .irq_id = DT_INST_IRQN(0),
423 };
424
425 static struct sam_xdmac_dev_data dma0_sam_data;
426
427 DEVICE_DT_INST_DEFINE(0, &sam_xdmac_initialize, NULL,
428 &dma0_sam_data, &dma0_sam_config, POST_KERNEL,
429 CONFIG_DMA_INIT_PRIORITY, &sam_xdmac_driver_api);
430