1 /*
2  * Copyright (c) 2024 Paul Wedeck <paulwedeck@gmail.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #define DT_DRV_COMPAT wch_wch_dma
7 
8 #include <zephyr/drivers/dma.h>
9 #include <zephyr/drivers/clock_control.h>
10 
11 #include <ch32fun.h>
12 
13 #define DMA_WCH_MAX_CHAN      11
14 #define DMA_WCH_MAX_CHAN_BASE 8
15 
16 #define DMA_WCH_AIF        (DMA_GIF1 | DMA_TCIF1 | DMA_HTIF1 | DMA_TEIF1)
17 #define DMA_WCH_IF_OFF(ch) (4 * (ch))
18 #define DMA_WCH_MAX_BLOCK  ((UINT32_C(2) << 16) - 1)
19 
20 struct dma_wch_chan_regs {
21 	volatile uint32_t CFGR;
22 	volatile uint32_t CNTR;
23 	volatile uint32_t PADDR;
24 	volatile uint32_t MADDR;
25 	volatile uint32_t reserved1;
26 };
27 
28 struct dma_wch_regs {
29 	DMA_TypeDef base;
30 	struct dma_wch_chan_regs channels[DMA_WCH_MAX_CHAN];
31 	DMA_TypeDef ext;
32 };
33 
34 struct dma_wch_config {
35 	struct dma_wch_regs *regs;
36 	uint32_t num_channels;
37 	const struct device *clock_dev;
38 	uint8_t clock_id;
39 	void (*irq_config_func)(const struct device *dev);
40 };
41 
42 struct dma_wch_channel {
43 	void *user_data;
44 	dma_callback_t dma_cb;
45 };
46 
47 struct dma_wch_data {
48 	struct dma_context ctx;
49 	struct dma_wch_channel *channels;
50 };
51 
dma_wch_get_ip(const struct device * dev,uint32_t chan)52 static uint8_t dma_wch_get_ip(const struct device *dev, uint32_t chan)
53 {
54 	const struct dma_wch_config *config = dev->config;
55 	struct dma_wch_regs *regs = config->regs;
56 	uint32_t intfr;
57 
58 	if (chan > DMA_WCH_MAX_CHAN_BASE) {
59 		chan -= DMA_WCH_MAX_CHAN_BASE;
60 		intfr = regs->ext.INTFR;
61 		return (intfr >> DMA_WCH_IF_OFF(chan)) & DMA_WCH_AIF;
62 	}
63 
64 	intfr = regs->base.INTFR;
65 	return (intfr >> DMA_WCH_IF_OFF(chan)) & DMA_WCH_AIF;
66 }
67 
dma_wch_busy(const struct device * dev,uint32_t ch)68 static bool dma_wch_busy(const struct device *dev, uint32_t ch)
69 {
70 	const struct dma_wch_config *config = dev->config;
71 	struct dma_wch_regs *regs = config->regs;
72 
73 	return (regs->channels[ch].CFGR & DMA_CFGR1_EN) > 0 &&
74 	       !(dma_wch_get_ip(dev, ch) & DMA_TCIF1);
75 }
76 
dma_wch_init(const struct device * dev)77 static int dma_wch_init(const struct device *dev)
78 {
79 	const struct dma_wch_config *config = dev->config;
80 	clock_control_subsys_t clock_sys = (clock_control_subsys_t *)(uintptr_t)config->clock_id;
81 
82 	if (config->num_channels > DMA_WCH_MAX_CHAN) {
83 		return -ENOTSUP;
84 	}
85 
86 	clock_control_on(config->clock_dev, clock_sys);
87 
88 	config->irq_config_func(dev);
89 	return 0;
90 }
91 
dma_wch_config(const struct device * dev,uint32_t ch,struct dma_config * dma_cfg)92 static int dma_wch_config(const struct device *dev, uint32_t ch, struct dma_config *dma_cfg)
93 {
94 	const struct dma_wch_config *config = dev->config;
95 	struct dma_wch_data *data = dev->data;
96 	struct dma_wch_regs *regs = config->regs;
97 	unsigned int key;
98 	int ret = 0;
99 
100 	uint32_t cfgr = 0;
101 	uint32_t cntr = 0;
102 	uint32_t paddr = 0;
103 	uint32_t maddr = 0;
104 
105 	if (config->num_channels <= ch) {
106 		return -EINVAL;
107 	}
108 
109 	if (dma_cfg->block_count != 1) {
110 		return -ENOTSUP;
111 	}
112 
113 	if (dma_cfg->head_block->block_size > DMA_WCH_MAX_BLOCK ||
114 	    dma_cfg->head_block->source_gather_en || dma_cfg->head_block->dest_scatter_en ||
115 	    dma_cfg->head_block->source_reload_en || dma_cfg->channel_priority > 3 ||
116 	    dma_cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_DECREMENT ||
117 	    dma_cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_DECREMENT ||
118 	    dma_cfg->head_block->dest_reload_en) {
119 		return -ENOTSUP;
120 	}
121 
122 	cntr = dma_cfg->head_block->block_size;
123 
124 	switch (dma_cfg->channel_direction) {
125 	case MEMORY_TO_MEMORY:
126 		cfgr |= DMA_CFGR1_MEM2MEM;
127 		paddr = dma_cfg->head_block->source_address;
128 		maddr = dma_cfg->head_block->dest_address;
129 
130 		if (dma_cfg->cyclic) {
131 			return -ENOTSUP;
132 		}
133 		break;
134 	case MEMORY_TO_PERIPHERAL:
135 		maddr = dma_cfg->head_block->source_address;
136 		paddr = dma_cfg->head_block->dest_address;
137 		cfgr |= DMA_CFGR1_DIR;
138 		break;
139 	case PERIPHERAL_TO_MEMORY:
140 		paddr = dma_cfg->head_block->source_address;
141 		maddr = dma_cfg->head_block->dest_address;
142 		break;
143 	default:
144 		return -ENOTSUP;
145 	}
146 	cfgr |= dma_cfg->channel_priority * DMA_CFGR1_PL_0;
147 
148 	if (dma_cfg->channel_direction == MEMORY_TO_PERIPHERAL) {
149 		cfgr |= dma_width_index(dma_cfg->source_data_size / BITS_PER_BYTE) *
150 			DMA_CFGR1_MSIZE_0;
151 		cfgr |= dma_width_index(dma_cfg->dest_data_size / BITS_PER_BYTE) *
152 			DMA_CFGR1_PSIZE_0;
153 
154 		cfgr |= (dma_cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT)
155 				? DMA_CFGR1_PINC
156 				: 0;
157 		cfgr |= (dma_cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT)
158 				? DMA_CFGR1_MINC
159 				: 0;
160 	} else {
161 		cfgr |= dma_width_index(dma_cfg->source_data_size / BITS_PER_BYTE) *
162 			DMA_CFGR1_PSIZE_0;
163 		cfgr |= dma_width_index(dma_cfg->dest_data_size / BITS_PER_BYTE) *
164 			DMA_CFGR1_MSIZE_0;
165 
166 		cfgr |= (dma_cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT)
167 				? DMA_CFGR1_MINC
168 				: 0;
169 		cfgr |= (dma_cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_INCREMENT)
170 				? DMA_CFGR1_PINC
171 				: 0;
172 	}
173 
174 	if (dma_cfg->cyclic) {
175 		cfgr |= DMA_CFGR1_CIRC;
176 	}
177 
178 	if (dma_cfg->dma_callback) {
179 		if (!dma_cfg->error_callback_dis) {
180 			cfgr |= DMA_CFGR1_TEIE;
181 		}
182 
183 		if (dma_cfg->complete_callback_en) {
184 			cfgr |= DMA_CFGR1_HTIE;
185 		}
186 
187 		cfgr |= DMA_CFGR1_TCIE;
188 	}
189 
190 	key = irq_lock();
191 
192 	if (dma_wch_busy(dev, ch)) {
193 		ret = -EBUSY;
194 		goto end;
195 	}
196 
197 	data->channels[ch].user_data = dma_cfg->user_data;
198 	data->channels[ch].dma_cb = dma_cfg->dma_callback;
199 
200 	regs->channels[ch].CFGR = 0;
201 
202 	if (ch <= DMA_WCH_MAX_CHAN_BASE) {
203 		regs->base.INTFCR = DMA_WCH_AIF << DMA_WCH_IF_OFF(ch);
204 	} else {
205 		regs->ext.INTFCR = DMA_WCH_AIF << DMA_WCH_IF_OFF(ch - DMA_WCH_MAX_CHAN_BASE);
206 	}
207 
208 	regs->channels[ch].PADDR = paddr;
209 	regs->channels[ch].MADDR = maddr;
210 	regs->channels[ch].CNTR = cntr;
211 	regs->channels[ch].CFGR = cfgr;
212 end:
213 	irq_unlock(key);
214 	return ret;
215 }
216 
217 #ifdef CONFIG_DMA_64BIT
dma_wch_reload(const struct device * dev,uint32_t ch,uint64_t src,uint64_t dst,size_t size)218 static int dma_wch_reload(const struct device *dev, uint32_t ch, uint64_t src, uint64_t dst,
219 			  size_t size)
220 #else
221 static int dma_wch_reload(const struct device *dev, uint32_t ch, uint32_t src, uint32_t dst,
222 			  size_t size)
223 #endif
224 {
225 	const struct dma_wch_config *config = dev->config;
226 	struct dma_wch_regs *regs = config->regs;
227 	uint32_t maddr, paddr;
228 	int ret = 0;
229 	unsigned int key;
230 
231 	if (config->num_channels <= ch) {
232 		return -EINVAL;
233 	}
234 
235 	key = irq_lock();
236 
237 	if (dma_wch_busy(dev, ch)) {
238 		ret = -EBUSY;
239 		goto end;
240 	}
241 
242 	if (regs->channels[ch].CFGR & DMA_CFGR1_DIR) {
243 		maddr = src;
244 		paddr = dst;
245 	} else {
246 		maddr = dst;
247 		paddr = src;
248 	}
249 
250 	regs->channels[ch].MADDR = maddr;
251 	regs->channels[ch].PADDR = paddr;
252 	regs->channels[ch].CNTR = size;
253 end:
254 	irq_unlock(key);
255 	return ret;
256 }
257 
dma_wch_start(const struct device * dev,uint32_t ch)258 static int dma_wch_start(const struct device *dev, uint32_t ch)
259 {
260 	const struct dma_wch_config *config = dev->config;
261 	struct dma_wch_regs *regs = config->regs;
262 	unsigned int key;
263 
264 	if (config->num_channels <= ch) {
265 		return -EINVAL;
266 	}
267 
268 	key = irq_lock();
269 	regs->channels[ch].CFGR |= DMA_CFGR1_EN;
270 	irq_unlock(key);
271 
272 	return 0;
273 }
274 
dma_wch_stop(const struct device * dev,uint32_t ch)275 static int dma_wch_stop(const struct device *dev, uint32_t ch)
276 {
277 	const struct dma_wch_config *config = dev->config;
278 	struct dma_wch_regs *regs = config->regs;
279 	unsigned int key;
280 
281 	if (config->num_channels <= ch) {
282 		return -EINVAL;
283 	}
284 
285 	key = irq_lock();
286 	regs->channels[ch].CFGR &= ~DMA_CFGR1_EN;
287 	irq_unlock(key);
288 
289 	return 0;
290 }
291 
dma_wch_resume(const struct device * dev,uint32_t ch)292 static int dma_wch_resume(const struct device *dev, uint32_t ch)
293 {
294 	const struct dma_wch_config *config = dev->config;
295 	struct dma_wch_regs *regs = config->regs;
296 	int ret = 0;
297 	unsigned int key;
298 
299 	if (config->num_channels <= ch) {
300 		return -EINVAL;
301 	}
302 
303 	key = irq_lock();
304 
305 	if (regs->channels[ch].CFGR & DMA_CFGR1_EN) {
306 		ret = -EINVAL;
307 		goto end;
308 	}
309 
310 	regs->channels[ch].CFGR |= DMA_CFGR1_EN;
311 end:
312 	irq_unlock(key);
313 	return ret;
314 }
315 
dma_wch_suspend(const struct device * dev,uint32_t ch)316 static int dma_wch_suspend(const struct device *dev, uint32_t ch)
317 {
318 	const struct dma_wch_config *config = dev->config;
319 	struct dma_wch_regs *regs = config->regs;
320 	int ret = 0;
321 	unsigned int key;
322 
323 	if (config->num_channels <= ch) {
324 		return -EINVAL;
325 	}
326 
327 	key = irq_lock();
328 
329 	if (!(regs->channels[ch].CFGR & DMA_CFGR1_EN)) {
330 		ret = -EINVAL;
331 		goto end;
332 	}
333 
334 	regs->channels[ch].CFGR &= ~DMA_CFGR1_EN;
335 end:
336 	irq_unlock(key);
337 	return ret;
338 }
339 
dma_wch_get_status(const struct device * dev,uint32_t ch,struct dma_status * status)340 static int dma_wch_get_status(const struct device *dev, uint32_t ch, struct dma_status *status)
341 {
342 	const struct dma_wch_config *config = dev->config;
343 	struct dma_wch_regs *regs = config->regs;
344 	uint32_t cfgr;
345 	unsigned int key;
346 
347 	if (config->num_channels <= ch) {
348 		return -EINVAL;
349 	}
350 
351 	key = irq_lock();
352 	cfgr = regs->channels[ch].CFGR;
353 
354 	status->busy = dma_wch_busy(dev, ch);
355 	if (cfgr & DMA_CFGR1_MEM2MEM) {
356 		status->dir = MEMORY_TO_MEMORY;
357 	} else if (cfgr & DMA_CFGR1_DIR) {
358 		status->dir = MEMORY_TO_PERIPHERAL;
359 	} else {
360 		status->dir = PERIPHERAL_TO_MEMORY;
361 	}
362 
363 	status->pending_length = regs->channels[ch].CNTR;
364 	if (cfgr & DMA_CFGR1_DIR) {
365 		status->read_position = regs->channels[ch].MADDR;
366 		status->write_position = regs->channels[ch].PADDR;
367 	} else {
368 		status->read_position = regs->channels[ch].PADDR;
369 		status->write_position = regs->channels[ch].MADDR;
370 	}
371 	irq_unlock(key);
372 	return 0;
373 }
374 
dma_wch_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)375 int dma_wch_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
376 {
377 	switch (type) {
378 	case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
379 	case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
380 	case DMA_ATTR_COPY_ALIGNMENT:
381 	case DMA_ATTR_MAX_BLOCK_COUNT:
382 		*value = 1;
383 		return 0;
384 	default:
385 		return -EINVAL;
386 	}
387 }
388 
dma_wch_handle_callback(const struct device * dev,uint32_t ch,uint8_t ip)389 static void dma_wch_handle_callback(const struct device *dev, uint32_t ch, uint8_t ip)
390 {
391 	const struct dma_wch_data *data = dev->data;
392 	void *cb_data;
393 	dma_callback_t cb_func;
394 	unsigned int key = irq_lock();
395 
396 	cb_data = data->channels[ch].user_data;
397 	cb_func = data->channels[ch].dma_cb;
398 	irq_unlock(key);
399 
400 	if (!cb_func) {
401 		return;
402 	}
403 
404 	if (ip & DMA_TCIF1) {
405 		cb_func(dev, cb_data, ch, DMA_STATUS_COMPLETE);
406 	} else if (ip & DMA_TEIF1) {
407 		cb_func(dev, cb_data, ch, -EIO);
408 	} else if (ip & DMA_HTIF1) {
409 		cb_func(dev, cb_data, ch, DMA_STATUS_BLOCK);
410 	}
411 }
412 
dma_wch_isr(const struct device * dev,uint32_t chan)413 static void dma_wch_isr(const struct device *dev, uint32_t chan)
414 {
415 	const struct dma_wch_config *config = dev->config;
416 	struct dma_wch_regs *regs = config->regs;
417 	uint32_t intfr = regs->base.INTFR;
418 
419 	intfr &= (DMA_WCH_AIF << DMA_WCH_IF_OFF(chan));
420 	if (intfr & DMA_TCIF1 << DMA_WCH_IF_OFF(chan)) {
421 		regs->channels[chan].CFGR &= ~DMA_CFGR1_EN;
422 	}
423 	regs->base.INTFCR = intfr;
424 
425 	dma_wch_handle_callback(dev, chan, intfr >> DMA_WCH_IF_OFF(chan));
426 }
427 
dma_wch_isr_ext(const struct device * dev,uint32_t chan)428 static void dma_wch_isr_ext(const struct device *dev, uint32_t chan)
429 {
430 	const struct dma_wch_config *config = dev->config;
431 	struct dma_wch_regs *regs = config->regs;
432 	uint32_t chan_idx = chan - DMA_WCH_MAX_CHAN_BASE;
433 	uint32_t intfr = regs->ext.INTFR;
434 
435 	intfr &= (DMA_WCH_AIF << DMA_WCH_IF_OFF(chan_idx));
436 	regs->ext.INTFCR = intfr;
437 
438 	dma_wch_handle_callback(dev, chan, intfr >> DMA_WCH_IF_OFF(chan_idx));
439 }
440 
441 static DEVICE_API(dma, dma_wch_driver_api) = {
442 	.config = dma_wch_config,
443 	.reload = dma_wch_reload,
444 	.start = dma_wch_start,
445 	.stop = dma_wch_stop,
446 	.resume = dma_wch_resume,
447 	.suspend = dma_wch_suspend,
448 	.get_status = dma_wch_get_status,
449 	.get_attribute = dma_wch_get_attribute,
450 };
451 
452 #define GENERATE_ISR(ch, _)                                                                        \
453 	static void dma_wch_isr##ch(const struct device *dev)                                      \
454 	{                                                                                          \
455 		if (ch <= DMA_WCH_MAX_CHAN_BASE) {                                                 \
456 			dma_wch_isr(dev, ch);                                                      \
457 		} else {                                                                           \
458 			dma_wch_isr_ext(dev, ch);                                                  \
459 		}                                                                                  \
460 	}
461 
462 #pragma GCC diagnostic push
463 #pragma GCC diagnostic ignored "-Wunused-function"
464 LISTIFY(DMA_WCH_MAX_CHAN, GENERATE_ISR, ())
465 #pragma GCC diagnostic pop
466 
467 #define IRQ_CONFIGURE(n, idx)                                                                      \
468 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(idx, n, irq), DT_INST_IRQ_BY_IDX(idx, n, priority),         \
469 		    dma_wch_isr##n, DEVICE_DT_INST_GET(idx), 0);                                   \
470 	irq_enable(DT_INST_IRQ_BY_IDX(idx, n, irq));
471 
472 #define CONFIGURE_ALL_IRQS(idx, n) LISTIFY(n, IRQ_CONFIGURE, (), idx)
473 
474 #define WCH_DMA_INIT(idx)                                                                          \
475 	static void dma_wch##idx##_irq_config(const struct device *dev)                            \
476 	{                                                                                          \
477 		CONFIGURE_ALL_IRQS(idx, DT_NUM_IRQS(DT_DRV_INST(idx)));                            \
478 	}                                                                                          \
479 	static const struct dma_wch_config dma_wch##idx##_config = {                               \
480 		.regs = (struct dma_wch_regs *)DT_INST_REG_ADDR(idx),                              \
481 		.num_channels = DT_INST_PROP(idx, dma_channels),                                   \
482 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)),                              \
483 		.clock_id = DT_INST_CLOCKS_CELL(idx, id),                                          \
484 		.irq_config_func = dma_wch##idx##_irq_config,                                      \
485 	};                                                                                         \
486 	static struct dma_wch_channel dma_wch##idx##_channels[DT_INST_PROP(idx, dma_channels)];    \
487 	ATOMIC_DEFINE(dma_wch_atomic##idx, DT_INST_PROP(idx, dma_channels));                       \
488 	static struct dma_wch_data dma_wch##idx##_data = {                                         \
489 		.ctx =                                                                             \
490 			{                                                                          \
491 				.magic = DMA_MAGIC,                                                \
492 				.atomic = dma_wch_atomic##idx,                                     \
493 				.dma_channels = DT_INST_PROP(idx, dma_channels),                   \
494 			},                                                                         \
495 		.channels = dma_wch##idx##_channels,                                               \
496 	};                                                                                         \
497                                                                                                    \
498 	DEVICE_DT_INST_DEFINE(idx, &dma_wch_init, NULL, &dma_wch##idx##_data,                      \
499 			      &dma_wch##idx##_config, PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY,      \
500 			      &dma_wch_driver_api);
501 
502 DT_INST_FOREACH_STATUS_OKAY(WCH_DMA_INIT)
503