1 /*
2 * Copyright (c) 2023 Microchip Technology Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT microchip_xec_dmac
8
9 #include <soc.h>
10 #include <zephyr/device.h>
11 #include <zephyr/devicetree.h>
12 #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h>
13 #include <zephyr/drivers/dma.h>
14 #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h>
15 #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h>
16 #include <zephyr/pm/device.h>
17 #include <zephyr/sys/util_macro.h>
18
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(dma_mchp_xec, CONFIG_DMA_LOG_LEVEL);
21
22 #define XEC_DMA_DEBUG 1
23 #ifdef XEC_DMA_DEBUG
24 #include <string.h>
25 #endif
26
27 #define XEC_DMA_ABORT_WAIT_LOOPS 32
28
29 #define XEC_DMA_MAIN_REGS_SIZE 0x40
30 #define XEC_DMA_CHAN_REGS_SIZE 0x40
31
32 #define XEC_DMA_CHAN_REGS_ADDR(base, channel) \
33 (((uintptr_t)(base) + (XEC_DMA_MAIN_REGS_SIZE)) + \
34 ((uintptr_t)(channel) * XEC_DMA_CHAN_REGS_SIZE))
35
36 /* main control */
37 #define XEC_DMA_MAIN_CTRL_REG_MSK 0x3u
38 #define XEC_DMA_MAIN_CTRL_EN_POS 0
39 #define XEC_DMA_MAIN_CTRL_SRST_POS 1
40
41 /* channel activate register */
42 #define XEC_DMA_CHAN_ACTV_EN_POS 0
43 /* channel control register */
44 #define XEC_DMA_CHAN_CTRL_REG_MSK 0x037fff27u
45 #define XEC_DMA_CHAN_CTRL_HWFL_RUN_POS 0
46 #define XEC_DMA_CHAN_CTRL_REQ_POS 1
47 #define XEC_DMA_CHAN_CTRL_DONE_POS 2
48 #define XEC_DMA_CHAN_CTRL_BUSY_POS 5
49 #define XEC_DMA_CHAN_CTRL_M2D_POS 8
50 #define XEC_DMA_CHAN_CTRL_HWFL_DEV_POS 9
51 #define XEC_DMA_CHAN_CTRL_HWFL_DEV_MSK 0xfe00u
52 #define XEC_DMA_CHAN_CTRL_HWFL_DEV_MSK0 0x7fu
53 #define XEC_DMA_CHAN_CTRL_INCR_MEM_POS 16
54 #define XEC_DMA_CHAN_CTRL_INCR_DEV_POS 17
55 #define XEC_DMA_CHAN_CTRL_LOCK_ARB_POS 18
56 #define XEC_DMA_CHAN_CTRL_DIS_HWFL_POS 19
57 #define XEC_DMA_CHAN_CTRL_XFR_UNIT_POS 20
58 #define XEC_DMA_CHAN_CTRL_XFR_UNIT_MSK 0x700000u
59 #define XEC_DMA_CHAN_CTRL_XFR_UNIT_MSK0 0x7u
60 #define XEC_DMA_CHAN_CTRL_SWFL_GO_POS 24
61 #define XEC_DMA_CHAN_CTRL_ABORT_POS 25
62 /* channel interrupt status and enable registers */
63 #define XEC_DMA_CHAN_IES_REG_MSK 0xfu
64 #define XEC_DMA_CHAN_IES_BERR_POS 0
65 #define XEC_DMA_CHAN_IES_OVFL_ERR_POS 1
66 #define XEC_DMA_CHAN_IES_DONE_POS 2
67 #define XEC_DMA_CHAN_IES_DEV_TERM_POS 3
68 /* channel fsm (RO) */
69 #define XEC_DMA_CHAN_FSM_REG_MSK 0xffffu
70 #define XEC_DMA_CHAN_FSM_ARB_STATE_POS 0
71 #define XEC_DMA_CHAN_FSM_ARB_STATE_MSK 0xffu
72 #define XEC_DMA_CHAN_FSM_CTRL_STATE_POS 8
73 #define XEC_DMA_CHAN_FSM_CTRL_STATE_MSK 0xff00u
74 #define XEC_DMA_CHAN_FSM_CTRL_STATE_IDLE 0
75 #define XEC_DMA_CHAN_FSM_CTRL_STATE_ARB_REQ 0x100u
76 #define XEC_DMA_CHAN_FSM_CTRL_STATE_RD_ACT 0x200u
77 #define XEC_DMA_CHAN_FSM_CTRL_STATE_WR_ACT 0x300u
78 #define XEC_DMA_CHAN_FSM_CTRL_STATE_WAIT_DONE 0x400u
79
80 #define XEC_DMA_HWFL_DEV_VAL(d) \
81 (((uint32_t)(d) & XEC_DMA_CHAN_CTRL_HWFL_DEV_MSK0) << XEC_DMA_CHAN_CTRL_HWFL_DEV_POS)
82
83 #define XEC_DMA_CHAN_CTRL_UNIT_VAL(u) \
84 (((uint32_t)(u) & XEC_DMA_CHAN_CTRL_XFR_UNIT_MSK0) << XEC_DMA_CHAN_CTRL_XFR_UNIT_POS)
85
86 struct dma_xec_chan_regs {
87 volatile uint32_t actv;
88 volatile uint32_t mem_addr;
89 volatile uint32_t mem_addr_end;
90 volatile uint32_t dev_addr;
91 volatile uint32_t control;
92 volatile uint32_t istatus;
93 volatile uint32_t ienable;
94 volatile uint32_t fsm;
95 uint32_t rsvd_20_3f[8];
96 };
97
98 struct dma_xec_regs {
99 volatile uint32_t mctrl;
100 volatile uint32_t mpkt;
101 uint32_t rsvd_08_3f[14];
102 };
103
104 struct dma_xec_irq_info {
105 uint8_t gid; /* GIRQ id [8, 26] */
106 uint8_t gpos; /* bit position in GIRQ [0, 31] */
107 uint8_t anid; /* aggregated external NVIC input */
108 uint8_t dnid; /* direct NVIC input */
109 };
110
111 struct dma_xec_config {
112 struct dma_xec_regs *regs;
113 uint8_t dma_channels;
114 uint8_t dma_requests;
115 uint8_t pcr_idx;
116 uint8_t pcr_pos;
117 int irq_info_size;
118 const struct dma_xec_irq_info *irq_info_list;
119 void (*irq_connect)(void);
120 };
121
122 struct dma_xec_channel {
123 uint32_t control;
124 uint32_t mstart;
125 uint32_t mend;
126 uint32_t dstart;
127 uint32_t isr_hw_status;
128 uint32_t block_count;
129 uint8_t unit_size;
130 uint8_t dir;
131 uint8_t flags;
132 uint8_t rsvd[1];
133 struct dma_block_config *head;
134 struct dma_block_config *curr;
135 dma_callback_t cb;
136 void *user_data;
137 uint32_t total_req_xfr_len;
138 uint32_t total_curr_xfr_len;
139 };
140
141 #define DMA_XEC_CHAN_FLAGS_CB_EOB_POS 0
142 #define DMA_XEC_CHAN_FLAGS_CB_ERR_DIS_POS 1
143
144 struct dma_xec_data {
145 struct dma_context ctx;
146 struct dma_xec_channel *channels;
147 };
148
149 #ifdef XEC_DMA_DEBUG
150 static void xec_dma_debug_clean(void);
151 #endif
152
xec_chan_regs(struct dma_xec_regs * regs,uint32_t chan)153 static inline struct dma_xec_chan_regs *xec_chan_regs(struct dma_xec_regs *regs, uint32_t chan)
154 {
155 uint8_t *pregs = (uint8_t *)regs + XEC_DMA_MAIN_REGS_SIZE;
156
157 pregs += (chan * (XEC_DMA_CHAN_REGS_SIZE));
158
159 return (struct dma_xec_chan_regs *)pregs;
160 }
161
162 static inline
xec_chan_irq_info(const struct dma_xec_config * devcfg,uint32_t channel)163 struct dma_xec_irq_info const *xec_chan_irq_info(const struct dma_xec_config *devcfg,
164 uint32_t channel)
165 {
166 return &devcfg->irq_info_list[channel];
167 }
168
is_dma_data_size_valid(uint32_t datasz)169 static int is_dma_data_size_valid(uint32_t datasz)
170 {
171 if ((datasz == 1U) || (datasz == 2U) || (datasz == 4U)) {
172 return 1;
173 }
174
175 return 0;
176 }
177
178 /* HW requires if unit size is 2 or 4 bytes the source/destination addresses
179 * to be aligned >= 2 or 4 bytes.
180 */
is_data_aligned(uint32_t src,uint32_t dest,uint32_t unitsz)181 static int is_data_aligned(uint32_t src, uint32_t dest, uint32_t unitsz)
182 {
183 if (unitsz == 1) {
184 return 1;
185 }
186
187 if ((src | dest) & (unitsz - 1U)) {
188 return 0;
189 }
190
191 return 1;
192 }
193
xec_dma_chan_clr(struct dma_xec_chan_regs * const chregs,const struct dma_xec_irq_info * info)194 static void xec_dma_chan_clr(struct dma_xec_chan_regs * const chregs,
195 const struct dma_xec_irq_info *info)
196 {
197 chregs->actv = 0;
198 chregs->control = 0;
199 chregs->mem_addr = 0;
200 chregs->mem_addr_end = 0;
201 chregs->dev_addr = 0;
202 chregs->control = 0;
203 chregs->ienable = 0;
204 chregs->istatus = 0xffu;
205 mchp_xec_ecia_girq_src_clr(info->gid, info->gpos);
206 }
207
is_dma_config_valid(const struct device * dev,struct dma_config * config)208 static int is_dma_config_valid(const struct device *dev, struct dma_config *config)
209 {
210 const struct dma_xec_config * const devcfg = dev->config;
211
212 if (config->dma_slot >= (uint32_t)devcfg->dma_requests) {
213 LOG_ERR("XEC DMA config dma slot > exceeds number of request lines");
214 return 0;
215 }
216
217 if (config->source_data_size != config->dest_data_size) {
218 LOG_ERR("XEC DMA requires source and dest data size identical");
219 return 0;
220 }
221
222 if (!((config->channel_direction == MEMORY_TO_MEMORY) ||
223 (config->channel_direction == MEMORY_TO_PERIPHERAL) ||
224 (config->channel_direction == PERIPHERAL_TO_MEMORY))) {
225 LOG_ERR("XEC DMA only support M2M, M2P, P2M");
226 return 0;
227 }
228
229 if (!is_dma_data_size_valid(config->source_data_size)) {
230 LOG_ERR("XEC DMA requires xfr unit size of 1, 2 or 4 bytes");
231 return 0;
232 }
233
234 if (config->block_count != 1) {
235 LOG_ERR("XEC DMA block count != 1");
236 return 0;
237 }
238
239 return 1;
240 }
241
check_blocks(struct dma_xec_channel * chdata,struct dma_block_config * block,uint32_t block_count,uint32_t unit_size)242 static int check_blocks(struct dma_xec_channel *chdata, struct dma_block_config *block,
243 uint32_t block_count, uint32_t unit_size)
244 {
245 if (!block || !chdata) {
246 LOG_ERR("bad pointer");
247 return -EINVAL;
248 }
249
250 chdata->total_req_xfr_len = 0;
251
252 for (uint32_t i = 0; i < block_count; i++) {
253 if ((block->source_addr_adj == DMA_ADDR_ADJ_DECREMENT) ||
254 (block->dest_addr_adj == DMA_ADDR_ADJ_DECREMENT)) {
255 LOG_ERR("XEC DMA HW does not support address decrement. Block index %u", i);
256 return -EINVAL;
257 }
258
259 if (!is_data_aligned(block->source_address, block->dest_address, unit_size)) {
260 LOG_ERR("XEC DMA block at index %u violates source/dest unit size", i);
261 return -EINVAL;
262 }
263
264 chdata->total_req_xfr_len += block->block_size;
265 }
266
267 return 0;
268 }
269
270 /*
271 * struct dma_config flags
272 * dma_slot - peripheral source/target ID. Not used for Mem2Mem
273 * channel_direction - HW supports Mem2Mem, Mem2Periph, and Periph2Mem
274 * complete_callback_en - if true invoke callback on completion (no error)
275 * error_callback_dis - if true disable callback on error
276 * source_handshake - 0=HW, 1=SW
277 * dest_handshake - 0=HW, 1=SW
278 * channel_priority - 4-bit field. HW implements round-robin only.
279 * source_chaining_en - Chaining channel together
280 * dest_chaining_en - HW does not support channel chaining.
281 * linked_channel - HW does not support
282 * cyclic - HW does not support cyclic buffer. Would have to emulate with SW.
283 * source_data_size - unit size of source data. HW supports 1, 2, or 4 bytes
284 * dest_data_size - unit size of dest data. HW requires same as source_data_size
285 * source_burst_length - HW does not support
286 * dest_burst_length - HW does not support
287 * block_count -
288 * user_data -
289 * dma_callback -
290 * head_block - pointer to struct dma_block_config
291 *
292 * struct dma_block_config
293 * source_address -
294 * source_gather_interval - N/A
295 * dest_address -
296 * dest_scatter_interval - N/A
297 * dest_scatter_count - N/A
298 * source_gather_count - N/A
299 * block_size
300 * config - flags
301 * source_gather_en - N/A
302 * dest_scatter_en - N/A
303 * source_addr_adj - 0(increment), 1(decrement), 2(no change)
304 * dest_addr_adj - 0(increment), 1(decrement), 2(no change)
305 * source_reload_en - reload source address at end of block
306 * dest_reload_en - reload destination address at end of block
307 * fifo_mode_control - N/A
308 * flow_control_mode - 0(source req service on data available) HW does this
309 * 1(source req postposed until dest req happens) N/A
310 *
311 *
312 * DMA channel implements memory start address, memory end address,
313 * and peripheral address registers. No peripheral end address.
314 * Transfer ends when memory start address increments and reaches
315 * memory end address.
316 *
317 * Memory to Memory: copy from source_address to dest_address
318 * chan direction = Mem2Dev. chan.control b[8]=1
319 * chan mem_addr = source_address
320 * chan mem_addr_end = source_address + block_size
321 * chan dev_addr = dest_address
322 *
323 * Memory to Peripheral: copy from source_address(memory) to dest_address(peripheral)
324 * chan direction = Mem2Dev. chan.control b[8]=1
325 * chan mem_addr = source_address
326 * chan mem_addr_end = chan mem_addr + block_size
327 * chan dev_addr = dest_address
328 *
329 * Peripheral to Memory:
330 * chan direction = Dev2Mem. chan.contronl b[8]=1
331 * chan mem_addr = dest_address
332 * chan mem_addr_end = chan mem_addr + block_size
333 * chan dev_addr = source_address
334 */
dma_xec_configure(const struct device * dev,uint32_t channel,struct dma_config * config)335 static int dma_xec_configure(const struct device *dev, uint32_t channel,
336 struct dma_config *config)
337 {
338 const struct dma_xec_config * const devcfg = dev->config;
339 struct dma_xec_regs * const regs = devcfg->regs;
340 struct dma_xec_data * const data = dev->data;
341 uint32_t ctrl, mstart, mend, dstart, unit_size;
342 int ret;
343
344 if (!config || (channel >= (uint32_t)devcfg->dma_channels)) {
345 return -EINVAL;
346 }
347
348 #ifdef XEC_DMA_DEBUG
349 xec_dma_debug_clean();
350 #endif
351
352 const struct dma_xec_irq_info *info = xec_chan_irq_info(devcfg, channel);
353 struct dma_xec_chan_regs * const chregs = xec_chan_regs(regs, channel);
354 struct dma_xec_channel *chdata = &data->channels[channel];
355
356 chdata->total_req_xfr_len = 0;
357 chdata->total_curr_xfr_len = 0;
358
359 xec_dma_chan_clr(chregs, info);
360
361 if (!is_dma_config_valid(dev, config)) {
362 return -EINVAL;
363 }
364
365 struct dma_block_config *block = config->head_block;
366
367 ret = check_blocks(chdata, block, config->block_count, config->source_data_size);
368 if (ret) {
369 return ret;
370 }
371
372 unit_size = config->source_data_size;
373 chdata->unit_size = unit_size;
374 chdata->head = block;
375 chdata->curr = block;
376 chdata->block_count = config->block_count;
377 chdata->dir = config->channel_direction;
378
379 chdata->flags = 0;
380 chdata->cb = config->dma_callback;
381 chdata->user_data = config->user_data;
382
383 /* invoke callback on completion of each block instead of all blocks ? */
384 if (config->complete_callback_en) {
385 chdata->flags |= BIT(DMA_XEC_CHAN_FLAGS_CB_EOB_POS);
386 }
387 if (config->error_callback_dis) { /* disable callback on errors ? */
388 chdata->flags |= BIT(DMA_XEC_CHAN_FLAGS_CB_ERR_DIS_POS);
389 }
390
391 /* Use the control member of struct dma_xec_channel to
392 * store control register value containing fields invariant
393 * for all buffers: HW flow control device, direction, unit size, ...
394 * derived from struct dma_config
395 */
396 ctrl = XEC_DMA_CHAN_CTRL_UNIT_VAL(unit_size);
397 if (config->channel_direction == MEMORY_TO_MEMORY) {
398 ctrl |= BIT(XEC_DMA_CHAN_CTRL_DIS_HWFL_POS);
399 } else {
400 ctrl |= XEC_DMA_HWFL_DEV_VAL(config->dma_slot);
401 }
402
403 if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
404 mstart = block->dest_address;
405 mend = block->dest_address + block->block_size;
406 dstart = block->source_address;
407 if (block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT) {
408 ctrl |= BIT(XEC_DMA_CHAN_CTRL_INCR_DEV_POS);
409 }
410 if (block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT) {
411 ctrl |= BIT(XEC_DMA_CHAN_CTRL_INCR_MEM_POS);
412 }
413 } else {
414 mstart = block->source_address;
415 mend = block->source_address + block->block_size;
416 dstart = block->dest_address;
417 ctrl |= BIT(XEC_DMA_CHAN_CTRL_M2D_POS);
418 if (block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT) {
419 ctrl |= BIT(XEC_DMA_CHAN_CTRL_INCR_MEM_POS);
420 }
421 if (block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT) {
422 ctrl |= BIT(XEC_DMA_CHAN_CTRL_INCR_DEV_POS);
423 }
424 }
425
426 chdata->control = ctrl;
427 chdata->mstart = mstart;
428 chdata->mend = mend;
429 chdata->dstart = dstart;
430
431 chregs->actv &= ~BIT(XEC_DMA_CHAN_ACTV_EN_POS);
432 chregs->mem_addr = mstart;
433 chregs->mem_addr_end = mend;
434 chregs->dev_addr = dstart;
435
436 chregs->control = ctrl;
437 chregs->ienable = BIT(XEC_DMA_CHAN_IES_BERR_POS) | BIT(XEC_DMA_CHAN_IES_DONE_POS);
438 chregs->actv |= BIT(XEC_DMA_CHAN_ACTV_EN_POS);
439
440 return 0;
441 }
442
443 /* Update previously configured DMA channel with new data source address,
444 * data destination address, and size in bytes.
445 * src = source address for DMA transfer
446 * dst = destination address for DMA transfer
447 * size = size of DMA transfer. Assume this is in bytes.
448 * We assume the caller will pass src, dst, and size that matches
449 * the unit size from the previous configure call.
450 */
dma_xec_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)451 static int dma_xec_reload(const struct device *dev, uint32_t channel,
452 uint32_t src, uint32_t dst, size_t size)
453 {
454 const struct dma_xec_config * const devcfg = dev->config;
455 struct dma_xec_data * const data = dev->data;
456 struct dma_xec_regs * const regs = devcfg->regs;
457 uint32_t ctrl;
458
459 if (channel >= (uint32_t)devcfg->dma_channels) {
460 return -EINVAL;
461 }
462
463 struct dma_xec_channel *chdata = &data->channels[channel];
464 struct dma_xec_chan_regs *chregs = xec_chan_regs(regs, channel);
465
466 if (chregs->control & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS)) {
467 return -EBUSY;
468 }
469
470 ctrl = chregs->control & ~(BIT(XEC_DMA_CHAN_CTRL_HWFL_RUN_POS)
471 | BIT(XEC_DMA_CHAN_CTRL_SWFL_GO_POS));
472 chregs->ienable = 0;
473 chregs->control = 0;
474 chregs->istatus = 0xffu;
475
476 if (ctrl & BIT(XEC_DMA_CHAN_CTRL_M2D_POS)) { /* Memory to Device */
477 chdata->mstart = src;
478 chdata->dstart = dst;
479 } else {
480 chdata->mstart = dst;
481 chdata->dstart = src;
482 }
483
484 chdata->mend = chdata->mstart + size;
485 chdata->total_req_xfr_len = size;
486 chdata->total_curr_xfr_len = 0;
487
488 chregs->mem_addr = chdata->mstart;
489 chregs->mem_addr_end = chdata->mend;
490 chregs->dev_addr = chdata->dstart;
491 chregs->control = ctrl;
492
493 return 0;
494 }
495
dma_xec_start(const struct device * dev,uint32_t channel)496 static int dma_xec_start(const struct device *dev, uint32_t channel)
497 {
498 const struct dma_xec_config * const devcfg = dev->config;
499 struct dma_xec_regs * const regs = devcfg->regs;
500 uint32_t chan_ctrl = 0U;
501
502 if (channel >= (uint32_t)devcfg->dma_channels) {
503 return -EINVAL;
504 }
505
506 struct dma_xec_chan_regs *chregs = xec_chan_regs(regs, channel);
507
508 if (chregs->control & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS)) {
509 return -EBUSY;
510 }
511
512 chregs->ienable = 0u;
513 chregs->istatus = 0xffu;
514 chan_ctrl = chregs->control;
515
516 if (chan_ctrl & BIT(XEC_DMA_CHAN_CTRL_DIS_HWFL_POS)) {
517 chan_ctrl |= BIT(XEC_DMA_CHAN_CTRL_SWFL_GO_POS);
518 } else {
519 chan_ctrl |= BIT(XEC_DMA_CHAN_CTRL_HWFL_RUN_POS);
520 }
521
522 chregs->ienable = BIT(XEC_DMA_CHAN_IES_BERR_POS) | BIT(XEC_DMA_CHAN_IES_DONE_POS);
523 chregs->control = chan_ctrl;
524 chregs->actv |= BIT(XEC_DMA_CHAN_ACTV_EN_POS);
525
526 return 0;
527 }
528
dma_xec_stop(const struct device * dev,uint32_t channel)529 static int dma_xec_stop(const struct device *dev, uint32_t channel)
530 {
531 const struct dma_xec_config * const devcfg = dev->config;
532 struct dma_xec_regs * const regs = devcfg->regs;
533 int wait_loops = XEC_DMA_ABORT_WAIT_LOOPS;
534
535 if (channel >= (uint32_t)devcfg->dma_channels) {
536 return -EINVAL;
537 }
538
539 struct dma_xec_chan_regs *chregs = xec_chan_regs(regs, channel);
540
541 chregs->ienable = 0;
542
543 if (chregs->control & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS)) {
544 chregs->ienable = 0;
545 chregs->control |= BIT(XEC_DMA_CHAN_CTRL_ABORT_POS);
546 /* HW stops on next unit boundary (1, 2, or 4 bytes) */
547
548 do {
549 if (!(chregs->control & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS))) {
550 break;
551 }
552 } while (wait_loops--);
553 }
554
555 chregs->mem_addr = chregs->mem_addr_end;
556 chregs->fsm = 0; /* delay */
557 chregs->control = 0;
558 chregs->istatus = 0xffu;
559 chregs->actv = 0;
560
561 return 0;
562 }
563
564 /* Get DMA transfer status.
565 * HW supports: MEMORY_TO_MEMORY, MEMORY_TO_PERIPHERAL, or
566 * PERIPHERAL_TO_MEMORY
567 * current DMA runtime status structure
568 *
569 * busy - is current DMA transfer busy or idle
570 * dir - DMA transfer direction
571 * pending_length - data length pending to be transferred in bytes
572 * or platform dependent.
573 * We don't implement a circular buffer
574 * free - free buffer space
575 * write_position - write position in a circular dma buffer
576 * read_position - read position in a circular dma buffer
577 *
578 */
dma_xec_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)579 static int dma_xec_get_status(const struct device *dev, uint32_t channel,
580 struct dma_status *status)
581 {
582 const struct dma_xec_config * const devcfg = dev->config;
583 struct dma_xec_data * const data = dev->data;
584 struct dma_xec_regs * const regs = devcfg->regs;
585 uint32_t chan_ctrl = 0U;
586
587 if ((channel >= (uint32_t)devcfg->dma_channels) || (!status)) {
588 LOG_ERR("unsupported channel");
589 return -EINVAL;
590 }
591
592 struct dma_xec_channel *chan_data = &data->channels[channel];
593 struct dma_xec_chan_regs *chregs = xec_chan_regs(regs, channel);
594
595 chan_ctrl = chregs->control;
596
597 if (chan_ctrl & BIT(XEC_DMA_CHAN_CTRL_BUSY_POS)) {
598 status->busy = true;
599 /* number of bytes remaining in channel */
600 status->pending_length = chan_data->total_req_xfr_len -
601 (chregs->mem_addr_end - chregs->mem_addr);
602 } else {
603 status->pending_length = chan_data->total_req_xfr_len -
604 chan_data->total_curr_xfr_len;
605 status->busy = false;
606 }
607
608 if (chan_ctrl & BIT(XEC_DMA_CHAN_CTRL_DIS_HWFL_POS)) {
609 status->dir = MEMORY_TO_MEMORY;
610 } else if (chan_ctrl & BIT(XEC_DMA_CHAN_CTRL_M2D_POS)) {
611 status->dir = MEMORY_TO_PERIPHERAL;
612 } else {
613 status->dir = PERIPHERAL_TO_MEMORY;
614 }
615
616 status->total_copied = chan_data->total_curr_xfr_len;
617
618 return 0;
619 }
620
xec_dma_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)621 int xec_dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
622 {
623 if ((type == DMA_ATTR_MAX_BLOCK_COUNT) && value) {
624 *value = 1;
625 return 0;
626 }
627
628 return -EINVAL;
629 }
630
631 /* returns true if filter matched otherwise returns false */
dma_xec_chan_filter(const struct device * dev,int ch,void * filter_param)632 static bool dma_xec_chan_filter(const struct device *dev, int ch, void *filter_param)
633 {
634 const struct dma_xec_config * const devcfg = dev->config;
635 uint32_t filter = 0u;
636
637 if (!filter_param && devcfg->dma_channels) {
638 filter = GENMASK(devcfg->dma_channels-1u, 0);
639 } else {
640 filter = *((uint32_t *)filter_param);
641 }
642
643 return (filter & BIT(ch));
644 }
645
646 /* API - HW does not stupport suspend/resume */
647 static DEVICE_API(dma, dma_xec_api) = {
648 .config = dma_xec_configure,
649 .reload = dma_xec_reload,
650 .start = dma_xec_start,
651 .stop = dma_xec_stop,
652 .get_status = dma_xec_get_status,
653 .chan_filter = dma_xec_chan_filter,
654 .get_attribute = xec_dma_get_attribute,
655 };
656
657 #ifdef CONFIG_PM_DEVICE
658 /* TODO - DMA block has one PCR SLP_EN and one CLK_REQ.
659 * If any channel is running the block's CLK_REQ is asserted.
660 * CLK_REQ will not clear until all channels are done or disabled.
661 * Clearing the DMA Main activate will kill DMA transactions resulting
662 * possible data corruption and HW flow control device malfunctions.
663 */
dmac_xec_pm_action(const struct device * dev,enum pm_device_action action)664 static int dmac_xec_pm_action(const struct device *dev,
665 enum pm_device_action action)
666 {
667 const struct dma_xec_config * const devcfg = dev->config;
668 struct dma_xec_regs * const regs = devcfg->regs;
669 int ret = 0;
670
671 switch (action) {
672 case PM_DEVICE_ACTION_RESUME:
673 regs->mctrl |= BIT(XEC_DMA_MAIN_CTRL_EN_POS);
674 break;
675
676 case PM_DEVICE_ACTION_SUSPEND:
677 /* regs->mctrl &= ~BIT(XEC_DMA_MAIN_CTRL_EN_POS); */
678 break;
679
680 default:
681 ret = -ENOTSUP;
682 }
683
684 return ret;
685 }
686 #endif /* CONFIG_PM_DEVICE */
687
688 /* DMA channel interrupt handler called by ISR.
689 * Callback flags in struct dma_config
690 * completion_callback_en
691 * 0 = invoke at completion of all blocks
692 * 1 = invoke at completin of each block
693 * error_callback_dis
694 * 0 = invoke on all errors
695 * 1 = disabled, do not invoke on errors
696 */
697 /* DEBUG */
698 #ifdef XEC_DMA_DEBUG
699 static volatile uint8_t channel_isr_idx[16];
700 static volatile uint8_t channel_isr_sts[16][16];
701 static volatile uint32_t channel_isr_ctrl[16][16];
702
xec_dma_debug_clean(void)703 static void xec_dma_debug_clean(void)
704 {
705 memset((void *)channel_isr_idx, 0, sizeof(channel_isr_idx));
706 memset((void *)channel_isr_sts, 0, sizeof(channel_isr_sts));
707 memset((void *)channel_isr_ctrl, 0, sizeof(channel_isr_ctrl));
708 }
709 #endif
710
dma_xec_irq_handler(const struct device * dev,uint32_t channel)711 static void dma_xec_irq_handler(const struct device *dev, uint32_t channel)
712 {
713 const struct dma_xec_config * const devcfg = dev->config;
714 const struct dma_xec_irq_info *info = devcfg->irq_info_list;
715 struct dma_xec_data * const data = dev->data;
716 struct dma_xec_channel *chan_data = &data->channels[channel];
717 struct dma_xec_chan_regs * const regs = xec_chan_regs(devcfg->regs, channel);
718 uint32_t sts = regs->istatus;
719 int cb_status = 0;
720
721 #ifdef XEC_DMA_DEBUG
722 uint8_t idx = channel_isr_idx[channel];
723
724 if (idx < 16) {
725 channel_isr_sts[channel][idx] = sts;
726 channel_isr_ctrl[channel][idx] = regs->control;
727 channel_isr_idx[channel] = ++idx;
728 }
729 #endif
730 LOG_DBG("maddr=0x%08x mend=0x%08x daddr=0x%08x ctrl=0x%08x sts=0x%02x", regs->mem_addr,
731 regs->mem_addr_end, regs->dev_addr, regs->control, sts);
732
733 regs->ienable = 0u;
734 regs->istatus = 0xffu;
735 mchp_xec_ecia_girq_src_clr(info[channel].gid, info[channel].gpos);
736
737 chan_data->isr_hw_status = sts;
738 chan_data->total_curr_xfr_len += (regs->mem_addr - chan_data->mstart);
739
740 if (sts & BIT(XEC_DMA_CHAN_IES_BERR_POS)) {/* Bus Error? */
741 if (!(chan_data->flags & BIT(DMA_XEC_CHAN_FLAGS_CB_ERR_DIS_POS))) {
742 cb_status = -EIO;
743 }
744 }
745
746 if (chan_data->cb) {
747 chan_data->cb(dev, chan_data->user_data, channel, cb_status);
748 }
749 }
750
dma_xec_init(const struct device * dev)751 static int dma_xec_init(const struct device *dev)
752 {
753 const struct dma_xec_config * const devcfg = dev->config;
754 struct dma_xec_regs * const regs = devcfg->regs;
755
756 LOG_DBG("driver init");
757
758 z_mchp_xec_pcr_periph_sleep(devcfg->pcr_idx, devcfg->pcr_pos, 0);
759
760 /* soft reset, self-clearing */
761 regs->mctrl = BIT(XEC_DMA_MAIN_CTRL_SRST_POS);
762 regs->mpkt = 0u; /* I/O delay, write to read-only register */
763 regs->mctrl = BIT(XEC_DMA_MAIN_CTRL_EN_POS);
764
765 devcfg->irq_connect();
766
767 return 0;
768 }
769
770 /* n = node-id, p = property, i = index */
771 #define DMA_XEC_GID(n, p, i) MCHP_XEC_ECIA_GIRQ(DT_PROP_BY_IDX(n, p, i))
772 #define DMA_XEC_GPOS(n, p, i) MCHP_XEC_ECIA_GIRQ_POS(DT_PROP_BY_IDX(n, p, i))
773
774 #define DMA_XEC_GIRQ_INFO(n, p, i) \
775 { \
776 .gid = DMA_XEC_GID(n, p, i), \
777 .gpos = DMA_XEC_GPOS(n, p, i), \
778 .anid = MCHP_XEC_ECIA_NVIC_AGGR(DT_PROP_BY_IDX(n, p, i)), \
779 .dnid = MCHP_XEC_ECIA_NVIC_DIRECT(DT_PROP_BY_IDX(n, p, i)), \
780 },
781
782 /* n = node-id, p = property, i = index(channel?) */
783 #define DMA_XEC_IRQ_DECLARE(node_id, p, i) \
784 static void dma_xec_chan_##i##_isr(const struct device *dev) \
785 { \
786 dma_xec_irq_handler(dev, i); \
787 } \
788
789 #define DMA_XEC_IRQ_CONNECT_SUB(node_id, p, i) \
790 IRQ_CONNECT(DT_IRQ_BY_IDX(node_id, i, irq), \
791 DT_IRQ_BY_IDX(node_id, i, priority), \
792 dma_xec_chan_##i##_isr, \
793 DEVICE_DT_GET(node_id), 0); \
794 irq_enable(DT_IRQ_BY_IDX(node_id, i, irq)); \
795 mchp_xec_ecia_enable(DMA_XEC_GID(node_id, p, i), DMA_XEC_GPOS(node_id, p, i));
796
797 /* i = instance number of DMA controller */
798 #define DMA_XEC_IRQ_CONNECT(inst) \
799 DT_INST_FOREACH_PROP_ELEM(inst, girqs, DMA_XEC_IRQ_DECLARE) \
800 void dma_xec_irq_connect##inst(void) \
801 { \
802 DT_INST_FOREACH_PROP_ELEM(inst, girqs, DMA_XEC_IRQ_CONNECT_SUB) \
803 }
804
805 #define DMA_XEC_DEVICE(i) \
806 BUILD_ASSERT(DT_INST_PROP(i, dma_channels) <= 16, "XEC DMA dma-channels > 16"); \
807 BUILD_ASSERT(DT_INST_PROP(i, dma_requests) <= 16, "XEC DMA dma-requests > 16"); \
808 \
809 static struct dma_xec_channel \
810 dma_xec_ctrl##i##_chans[DT_INST_PROP(i, dma_channels)]; \
811 ATOMIC_DEFINE(dma_xec_atomic##i, DT_INST_PROP(i, dma_channels)); \
812 \
813 static struct dma_xec_data dma_xec_data##i = { \
814 .ctx.magic = DMA_MAGIC, \
815 .ctx.dma_channels = DT_INST_PROP(i, dma_channels), \
816 .ctx.atomic = dma_xec_atomic##i, \
817 .channels = dma_xec_ctrl##i##_chans, \
818 }; \
819 \
820 DMA_XEC_IRQ_CONNECT(i) \
821 \
822 static const struct dma_xec_irq_info dma_xec_irqi##i[] = { \
823 DT_INST_FOREACH_PROP_ELEM(i, girqs, DMA_XEC_GIRQ_INFO) \
824 }; \
825 static const struct dma_xec_config dma_xec_cfg##i = { \
826 .regs = (struct dma_xec_regs *)DT_INST_REG_ADDR(i), \
827 .dma_channels = DT_INST_PROP(i, dma_channels), \
828 .dma_requests = DT_INST_PROP(i, dma_requests), \
829 .pcr_idx = DT_INST_PROP_BY_IDX(i, pcrs, 0), \
830 .pcr_pos = DT_INST_PROP_BY_IDX(i, pcrs, 1), \
831 .irq_info_size = ARRAY_SIZE(dma_xec_irqi##i), \
832 .irq_info_list = dma_xec_irqi##i, \
833 .irq_connect = dma_xec_irq_connect##i, \
834 }; \
835 PM_DEVICE_DT_DEFINE(i, dmac_xec_pm_action); \
836 DEVICE_DT_INST_DEFINE(i, &dma_xec_init, \
837 PM_DEVICE_DT_GET(i), \
838 &dma_xec_data##i, &dma_xec_cfg##i, \
839 PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \
840 &dma_xec_api);
841
842 DT_INST_FOREACH_STATUS_OKAY(DMA_XEC_DEVICE)
843