1 /*
2  * Copyright (c) 2016 Linaro Limited.
3  * Copyright (c) 2019 Song Qiang <songqiang1304521@gmail.com>
4  * Copyright (c) 2022 STMicroelectronics
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 /**
10  * @brief Common part of DMA drivers for stm32U5.
11  * @note  Functions named with stm32_dma_* are SoCs related functions
12  *
13  */
14 
15 #include "dma_stm32.h"
16 
17 #include <zephyr/init.h>
18 #include <zephyr/drivers/clock_control.h>
19 #include <zephyr/drivers/dma/dma_stm32.h>
20 
21 #include <zephyr/logging/log.h>
22 #include <zephyr/irq.h>
23 LOG_MODULE_REGISTER(dma_stm32, CONFIG_DMA_LOG_LEVEL);
24 
25 #define DT_DRV_COMPAT st_stm32u5_dma
26 
27 static const uint32_t table_src_size[] = {
28 	LL_DMA_SRC_DATAWIDTH_BYTE,
29 	LL_DMA_SRC_DATAWIDTH_HALFWORD,
30 	LL_DMA_SRC_DATAWIDTH_WORD,
31 };
32 
33 static const uint32_t table_dst_size[] = {
34 	LL_DMA_DEST_DATAWIDTH_BYTE,
35 	LL_DMA_DEST_DATAWIDTH_HALFWORD,
36 	LL_DMA_DEST_DATAWIDTH_WORD,
37 };
38 
39 static const uint32_t table_priority[4] = {
40 	LL_DMA_LOW_PRIORITY_LOW_WEIGHT,
41 	LL_DMA_LOW_PRIORITY_MID_WEIGHT,
42 	LL_DMA_LOW_PRIORITY_HIGH_WEIGHT,
43 	LL_DMA_HIGH_PRIORITY,
44 };
45 
dma_stm32_dump_stream_irq(const struct device * dev,uint32_t id)46 static void dma_stm32_dump_stream_irq(const struct device *dev, uint32_t id)
47 {
48 	const struct dma_stm32_config *config = dev->config;
49 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
50 
51 	stm32_dma_dump_stream_irq(dma, id);
52 }
53 
dma_stm32_clear_stream_irq(const struct device * dev,uint32_t id)54 static void dma_stm32_clear_stream_irq(const struct device *dev, uint32_t id)
55 {
56 	const struct dma_stm32_config *config = dev->config;
57 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
58 
59 	dma_stm32_clear_tc(dma, id);
60 	dma_stm32_clear_ht(dma, id);
61 	stm32_dma_clear_stream_irq(dma, id);
62 }
63 
64 
dma_stm32_id_to_stream(uint32_t id)65 uint32_t dma_stm32_id_to_stream(uint32_t id)
66 {
67 	static const uint32_t stream_nr[] = {
68 		LL_DMA_CHANNEL_0,
69 		LL_DMA_CHANNEL_1,
70 		LL_DMA_CHANNEL_2,
71 		LL_DMA_CHANNEL_3,
72 		LL_DMA_CHANNEL_4,
73 		LL_DMA_CHANNEL_5,
74 		LL_DMA_CHANNEL_6,
75 		LL_DMA_CHANNEL_7,
76 		LL_DMA_CHANNEL_8,
77 		LL_DMA_CHANNEL_9,
78 		LL_DMA_CHANNEL_10,
79 		LL_DMA_CHANNEL_11,
80 		LL_DMA_CHANNEL_12,
81 		LL_DMA_CHANNEL_13,
82 		LL_DMA_CHANNEL_14,
83 		LL_DMA_CHANNEL_15,
84 	};
85 
86 	__ASSERT_NO_MSG(id < ARRAY_SIZE(stream_nr));
87 
88 	return stream_nr[id];
89 }
90 
dma_stm32_is_tc_active(DMA_TypeDef * DMAx,uint32_t id)91 bool dma_stm32_is_tc_active(DMA_TypeDef *DMAx, uint32_t id)
92 {
93 	return LL_DMA_IsActiveFlag_TC(DMAx, dma_stm32_id_to_stream(id));
94 }
95 
dma_stm32_clear_tc(DMA_TypeDef * DMAx,uint32_t id)96 void dma_stm32_clear_tc(DMA_TypeDef *DMAx, uint32_t id)
97 {
98 	LL_DMA_ClearFlag_TC(DMAx, dma_stm32_id_to_stream(id));
99 }
100 
101 /* data transfer error */
dma_stm32_is_dte_active(DMA_TypeDef * dma,uint32_t id)102 static inline bool dma_stm32_is_dte_active(DMA_TypeDef *dma, uint32_t id)
103 {
104 	return LL_DMA_IsActiveFlag_DTE(dma, dma_stm32_id_to_stream(id));
105 }
106 
107 /* link transfer error */
dma_stm32_is_ule_active(DMA_TypeDef * dma,uint32_t id)108 static inline bool dma_stm32_is_ule_active(DMA_TypeDef *dma, uint32_t id)
109 {
110 	return LL_DMA_IsActiveFlag_ULE(dma, dma_stm32_id_to_stream(id));
111 }
112 
113 /* user setting error */
dma_stm32_is_use_active(DMA_TypeDef * dma,uint32_t id)114 static inline bool dma_stm32_is_use_active(DMA_TypeDef *dma, uint32_t id)
115 {
116 	return LL_DMA_IsActiveFlag_USE(dma, dma_stm32_id_to_stream(id));
117 }
118 
119 /* transfer error either a data or user or link error */
dma_stm32_is_te_active(DMA_TypeDef * DMAx,uint32_t id)120 bool dma_stm32_is_te_active(DMA_TypeDef *DMAx, uint32_t id)
121 {
122 	return (
123 	LL_DMA_IsActiveFlag_DTE(DMAx, dma_stm32_id_to_stream(id)) ||
124 		LL_DMA_IsActiveFlag_ULE(DMAx, dma_stm32_id_to_stream(id)) ||
125 		LL_DMA_IsActiveFlag_USE(DMAx, dma_stm32_id_to_stream(id))
126 	);
127 }
128 /* clear transfer error either a data or user or link error */
dma_stm32_clear_te(DMA_TypeDef * DMAx,uint32_t id)129 void dma_stm32_clear_te(DMA_TypeDef *DMAx, uint32_t id)
130 {
131 	LL_DMA_ClearFlag_DTE(DMAx, dma_stm32_id_to_stream(id));
132 	LL_DMA_ClearFlag_ULE(DMAx, dma_stm32_id_to_stream(id));
133 	LL_DMA_ClearFlag_USE(DMAx, dma_stm32_id_to_stream(id));
134 }
135 
dma_stm32_is_ht_active(DMA_TypeDef * DMAx,uint32_t id)136 bool dma_stm32_is_ht_active(DMA_TypeDef *DMAx, uint32_t id)
137 {
138 	return LL_DMA_IsActiveFlag_HT(DMAx, dma_stm32_id_to_stream(id));
139 }
140 
dma_stm32_clear_ht(DMA_TypeDef * DMAx,uint32_t id)141 void dma_stm32_clear_ht(DMA_TypeDef *DMAx, uint32_t id)
142 {
143 	LL_DMA_ClearFlag_HT(DMAx, dma_stm32_id_to_stream(id));
144 }
145 
stm32_dma_dump_stream_irq(DMA_TypeDef * dma,uint32_t id)146 void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id)
147 {
148 	LOG_INF("tc: %d, ht: %d, dte: %d, ule: %d, use: %d",
149 		dma_stm32_is_tc_active(dma, id),
150 		dma_stm32_is_ht_active(dma, id),
151 		dma_stm32_is_dte_active(dma, id),
152 		dma_stm32_is_ule_active(dma, id),
153 		dma_stm32_is_use_active(dma, id)
154 	);
155 }
156 
157 /* Check if nsecure masked interrupt is active on channel */
stm32_dma_is_tc_irq_active(DMA_TypeDef * dma,uint32_t id)158 bool stm32_dma_is_tc_irq_active(DMA_TypeDef *dma, uint32_t id)
159 {
160 	return (LL_DMA_IsEnabledIT_TC(dma, dma_stm32_id_to_stream(id)) &&
161 		LL_DMA_IsActiveFlag_TC(dma, dma_stm32_id_to_stream(id)));
162 }
163 
stm32_dma_is_ht_irq_active(DMA_TypeDef * dma,uint32_t id)164 bool stm32_dma_is_ht_irq_active(DMA_TypeDef *dma, uint32_t id)
165 {
166 	return (LL_DMA_IsEnabledIT_HT(dma, dma_stm32_id_to_stream(id)) &&
167 		LL_DMA_IsActiveFlag_HT(dma, dma_stm32_id_to_stream(id)));
168 }
169 
stm32_dma_is_te_irq_active(DMA_TypeDef * dma,uint32_t id)170 static inline bool stm32_dma_is_te_irq_active(DMA_TypeDef *dma, uint32_t id)
171 {
172 	return (
173 		(LL_DMA_IsEnabledIT_DTE(dma, dma_stm32_id_to_stream(id)) &&
174 		LL_DMA_IsActiveFlag_DTE(dma, dma_stm32_id_to_stream(id))) ||
175 		(LL_DMA_IsEnabledIT_ULE(dma, dma_stm32_id_to_stream(id)) &&
176 		LL_DMA_IsActiveFlag_ULE(dma, dma_stm32_id_to_stream(id))) ||
177 		(LL_DMA_IsEnabledIT_USE(dma, dma_stm32_id_to_stream(id)) &&
178 		LL_DMA_IsActiveFlag_USE(dma, dma_stm32_id_to_stream(id)))
179 		);
180 }
181 
182 /* check if and irq of any type occurred on the channel */
183 #define stm32_dma_is_irq_active LL_DMA_IsActiveFlag_MIS
184 
stm32_dma_clear_stream_irq(DMA_TypeDef * dma,uint32_t id)185 void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id)
186 {
187 	dma_stm32_clear_te(dma, id);
188 
189 	LL_DMA_ClearFlag_TO(dma, dma_stm32_id_to_stream(id));
190 	LL_DMA_ClearFlag_SUSP(dma, dma_stm32_id_to_stream(id));
191 }
192 
stm32_dma_is_irq_happened(DMA_TypeDef * dma,uint32_t id)193 bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, uint32_t id)
194 {
195 	if (dma_stm32_is_te_active(dma, id)) {
196 		return true;
197 	}
198 
199 	return false;
200 }
201 
stm32_dma_enable_stream(DMA_TypeDef * dma,uint32_t id)202 void stm32_dma_enable_stream(DMA_TypeDef *dma, uint32_t id)
203 {
204 	LL_DMA_EnableChannel(dma, dma_stm32_id_to_stream(id));
205 }
206 
stm32_dma_is_enabled_stream(DMA_TypeDef * dma,uint32_t id)207 bool stm32_dma_is_enabled_stream(DMA_TypeDef *dma, uint32_t id)
208 {
209 	if (LL_DMA_IsEnabledChannel(dma, dma_stm32_id_to_stream(id)) == 1) {
210 		return true;
211 	}
212 	return false;
213 }
214 
stm32_dma_disable_stream(DMA_TypeDef * dma,uint32_t id)215 int stm32_dma_disable_stream(DMA_TypeDef *dma, uint32_t id)
216 {
217 	/* GPDMA channel abort sequence */
218 	LL_DMA_SuspendChannel(dma, dma_stm32_id_to_stream(id));
219 
220 	/* reset the channel will disable it */
221 	LL_DMA_ResetChannel(dma, dma_stm32_id_to_stream(id));
222 
223 	if (!stm32_dma_is_enabled_stream(dma, id)) {
224 		return 0;
225 	}
226 
227 	return -EAGAIN;
228 }
229 
stm32_dma_set_mem_periph_address(DMA_TypeDef * dma,uint32_t channel,uint32_t src_addr,uint32_t dest_addr)230 void stm32_dma_set_mem_periph_address(DMA_TypeDef *dma,
231 					     uint32_t channel,
232 					     uint32_t src_addr,
233 					     uint32_t dest_addr)
234 {
235 	LL_DMA_ConfigAddresses(dma, channel, src_addr, dest_addr);
236 }
237 
238 /* same function to set periph/mem addresses */
stm32_dma_set_periph_mem_address(DMA_TypeDef * dma,uint32_t channel,uint32_t src_addr,uint32_t dest_addr)239 void stm32_dma_set_periph_mem_address(DMA_TypeDef *dma,
240 					     uint32_t channel,
241 					     uint32_t src_addr,
242 					     uint32_t dest_addr)
243 {
244 	LL_DMA_ConfigAddresses(dma, channel, src_addr, dest_addr);
245 }
246 
dma_stm32_irq_handler(const struct device * dev,uint32_t id)247 static void dma_stm32_irq_handler(const struct device *dev, uint32_t id)
248 {
249 	const struct dma_stm32_config *config = dev->config;
250 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
251 	struct dma_stm32_stream *stream;
252 	uint32_t callback_arg;
253 
254 	__ASSERT_NO_MSG(id < config->max_streams);
255 
256 	stream = &config->streams[id];
257 	/* The busy channel is pertinent if not overridden by the HAL */
258 	if ((stream->hal_override != true) && (stream->busy == false)) {
259 		/*
260 		 * When DMA channel is not overridden by HAL,
261 		 * ignore irq if the channel is not busy anymore
262 		 */
263 		dma_stm32_clear_stream_irq(dev, id);
264 		return;
265 	}
266 	callback_arg = id + STM32_DMA_STREAM_OFFSET;
267 
268 	/* The dma stream id is in range from STM32_DMA_STREAM_OFFSET..<dma-requests> */
269 	if (stm32_dma_is_ht_irq_active(dma, id)) {
270 		/* Let HAL DMA handle flags on its own */
271 		if (!stream->hal_override) {
272 			dma_stm32_clear_ht(dma, id);
273 		}
274 		stream->dma_callback(dev, stream->user_data, callback_arg, DMA_STATUS_BLOCK);
275 	} else if (stm32_dma_is_tc_irq_active(dma, id)) {
276 		/* Assuming not cyclic transfer */
277 		stream->busy = false;
278 		/* Let HAL DMA handle flags on its own */
279 		if (!stream->hal_override) {
280 			dma_stm32_clear_tc(dma, id);
281 		}
282 		stream->dma_callback(dev, stream->user_data, callback_arg, DMA_STATUS_COMPLETE);
283 	} else {
284 		LOG_ERR("Transfer Error.");
285 		stream->busy = false;
286 		dma_stm32_dump_stream_irq(dev, id);
287 		dma_stm32_clear_stream_irq(dev, id);
288 		stream->dma_callback(dev, stream->user_data,
289 				     callback_arg, -EIO);
290 	}
291 }
292 
dma_stm32_get_priority(uint8_t priority,uint32_t * ll_priority)293 static int dma_stm32_get_priority(uint8_t priority, uint32_t *ll_priority)
294 {
295 	if (priority > ARRAY_SIZE(table_priority)) {
296 		LOG_ERR("Priority error. %d", priority);
297 		return -EINVAL;
298 	}
299 
300 	*ll_priority = table_priority[priority];
301 	return 0;
302 }
303 
dma_stm32_get_direction(enum dma_channel_direction direction,uint32_t * ll_direction)304 static int dma_stm32_get_direction(enum dma_channel_direction direction,
305 				   uint32_t *ll_direction)
306 {
307 	switch (direction) {
308 	case MEMORY_TO_MEMORY:
309 		*ll_direction = LL_DMA_DIRECTION_MEMORY_TO_MEMORY;
310 		break;
311 	case MEMORY_TO_PERIPHERAL:
312 		*ll_direction = LL_DMA_DIRECTION_MEMORY_TO_PERIPH;
313 		break;
314 	case PERIPHERAL_TO_MEMORY:
315 		*ll_direction = LL_DMA_DIRECTION_PERIPH_TO_MEMORY;
316 		break;
317 	default:
318 		LOG_ERR("Direction error. %d", direction);
319 		return -EINVAL;
320 	}
321 
322 	return 0;
323 }
324 
dma_stm32_disable_stream(DMA_TypeDef * dma,uint32_t id)325 static int dma_stm32_disable_stream(DMA_TypeDef *dma, uint32_t id)
326 {
327 	int count = 0;
328 
329 	for (;;) {
330 		if (stm32_dma_disable_stream(dma, id) == 0) {
331 			return 0;
332 		}
333 		/* After trying for 5 seconds, give up */
334 		if (count++ > (5 * 1000)) {
335 			return -EBUSY;
336 		}
337 		k_sleep(K_MSEC(1));
338 	}
339 
340 	return 0;
341 }
342 
dma_stm32_configure(const struct device * dev,uint32_t id,struct dma_config * config)343 static int dma_stm32_configure(const struct device *dev,
344 					     uint32_t id,
345 					     struct dma_config *config)
346 {
347 	const struct dma_stm32_config *dev_config = dev->config;
348 	struct dma_stm32_stream *stream =
349 				&dev_config->streams[id - STM32_DMA_STREAM_OFFSET];
350 	DMA_TypeDef *dma = (DMA_TypeDef *)dev_config->base;
351 	LL_DMA_InitTypeDef DMA_InitStruct;
352 	int ret;
353 
354 	LL_DMA_StructInit(&DMA_InitStruct);
355 
356 	/* Give channel from index 0 */
357 	id = id - STM32_DMA_STREAM_OFFSET;
358 
359 	if (id >= dev_config->max_streams) {
360 		LOG_ERR("cannot configure the dma stream %d.", id);
361 		return -EINVAL;
362 	}
363 
364 	if (stream->busy) {
365 		LOG_ERR("dma stream %d is busy.", id);
366 		return -EBUSY;
367 	}
368 
369 	if (dma_stm32_disable_stream(dma, id) != 0) {
370 		LOG_ERR("could not disable dma stream %d.", id);
371 		return -EBUSY;
372 	}
373 
374 	dma_stm32_clear_stream_irq(dev, id);
375 
376 	/* Check potential DMA override (if id parameters and stream are valid) */
377 	if (config->linked_channel == STM32_DMA_HAL_OVERRIDE) {
378 		/* DMA channel is overridden by HAL DMA
379 		 * Retain that the channel is busy and proceed to the minimal
380 		 * configuration to properly route the IRQ
381 		 */
382 		stream->busy = true;
383 		stream->hal_override = true;
384 		stream->dma_callback = config->dma_callback;
385 		stream->user_data = config->user_data;
386 		return 0;
387 	}
388 
389 	if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) {
390 		LOG_ERR("Data size too big: %d\n",
391 		       config->head_block->block_size);
392 		return -EINVAL;
393 	}
394 
395 	/* Support only the same data width for source and dest */
396 	if (config->dest_data_size != config->source_data_size) {
397 		LOG_ERR("source and dest data size differ.");
398 		return -EINVAL;
399 	}
400 
401 	if (config->source_data_size != 4U &&
402 	    config->source_data_size != 2U &&
403 	    config->source_data_size != 1U) {
404 		LOG_ERR("source and dest unit size error, %d",
405 			config->source_data_size);
406 		return -EINVAL;
407 	}
408 
409 	/* Continuous transfers are supported by hardware but not implemented
410 	 * by this driver
411 	 */
412 	if (config->head_block->source_reload_en ||
413 		config->head_block->dest_reload_en) {
414 		LOG_ERR("source_reload_en and dest_reload_en not "
415 			"implemented.");
416 		return -EINVAL;
417 	}
418 
419 	stream->busy		= true;
420 	stream->dma_callback	= config->dma_callback;
421 	stream->direction	= config->channel_direction;
422 	stream->user_data       = config->user_data;
423 	stream->src_size	= config->source_data_size;
424 	stream->dst_size	= config->dest_data_size;
425 
426 	/* Check dest or source memory address, warn if 0 */
427 	if (config->head_block->source_address == 0) {
428 		LOG_WRN("source_buffer address is null.");
429 	}
430 
431 	if (config->head_block->dest_address == 0) {
432 		LOG_WRN("dest_buffer address is null.");
433 	}
434 
435 	DMA_InitStruct.SrcAddress = config->head_block->source_address;
436 	DMA_InitStruct.DestAddress = config->head_block->dest_address;
437 	DMA_InitStruct.BlkHWRequest = LL_DMA_HWREQUEST_SINGLEBURST;
438 	DMA_InitStruct.DataAlignment = LL_DMA_DATA_ALIGN_ZEROPADD;
439 
440 	ret = dma_stm32_get_priority(config->channel_priority,
441 				     &DMA_InitStruct.Priority);
442 	if (ret < 0) {
443 		return ret;
444 	}
445 
446 	ret = dma_stm32_get_direction(config->channel_direction,
447 				      &DMA_InitStruct.Direction);
448 	if (ret < 0) {
449 		return ret;
450 	}
451 
452 	/* This part is for source */
453 	switch (config->head_block->source_addr_adj) {
454 	case DMA_ADDR_ADJ_INCREMENT:
455 		DMA_InitStruct.SrcIncMode = LL_DMA_SRC_INCREMENT;
456 		break;
457 	case DMA_ADDR_ADJ_NO_CHANGE:
458 		DMA_InitStruct.SrcIncMode = LL_DMA_SRC_FIXED;
459 		break;
460 	case DMA_ADDR_ADJ_DECREMENT:
461 		return -ENOTSUP;
462 	default:
463 		LOG_ERR("Memory increment error. %d",
464 			config->head_block->source_addr_adj);
465 		return -EINVAL;
466 	}
467 	LOG_DBG("Channel (%d) src inc (%x).",
468 				id, DMA_InitStruct.SrcIncMode);
469 
470 	/* This part is for dest */
471 	switch (config->head_block->dest_addr_adj) {
472 	case DMA_ADDR_ADJ_INCREMENT:
473 		DMA_InitStruct.DestIncMode = LL_DMA_DEST_INCREMENT;
474 		break;
475 	case DMA_ADDR_ADJ_NO_CHANGE:
476 		DMA_InitStruct.DestIncMode = LL_DMA_DEST_FIXED;
477 		break;
478 	case DMA_ADDR_ADJ_DECREMENT:
479 		return -ENOTSUP;
480 	default:
481 		LOG_ERR("Periph increment error. %d",
482 			config->head_block->dest_addr_adj);
483 		return -EINVAL;
484 	}
485 	LOG_DBG("Channel (%d) dest inc (%x).",
486 				id, DMA_InitStruct.DestIncMode);
487 
488 	stream->source_periph = (stream->direction == PERIPHERAL_TO_MEMORY);
489 
490 	/* Set the data width, when source_data_size equals dest_data_size */
491 	int index = find_lsb_set(config->source_data_size) - 1;
492 
493 	DMA_InitStruct.SrcDataWidth = table_src_size[index];
494 
495 	index = find_lsb_set(config->dest_data_size) - 1;
496 	DMA_InitStruct.DestDataWidth = table_dst_size[index];
497 
498 	DMA_InitStruct.BlkDataLength = config->head_block->block_size;
499 
500 	/* The request ID is stored in the dma_slot */
501 	DMA_InitStruct.Request = config->dma_slot;
502 
503 	LL_DMA_Init(dma, dma_stm32_id_to_stream(id), &DMA_InitStruct);
504 
505 	LL_DMA_EnableIT_TC(dma, dma_stm32_id_to_stream(id));
506 	LL_DMA_EnableIT_USE(dma, dma_stm32_id_to_stream(id));
507 	LL_DMA_EnableIT_ULE(dma, dma_stm32_id_to_stream(id));
508 	LL_DMA_EnableIT_DTE(dma, dma_stm32_id_to_stream(id));
509 
510 	/* Enable Half-Transfer irq if circular mode is enabled */
511 	if (config->head_block->source_reload_en) {
512 		LL_DMA_EnableIT_HT(dma, dma_stm32_id_to_stream(id));
513 	}
514 
515 	return ret;
516 }
517 
dma_stm32_reload(const struct device * dev,uint32_t id,uint32_t src,uint32_t dst,size_t size)518 static int dma_stm32_reload(const struct device *dev, uint32_t id,
519 					  uint32_t src, uint32_t dst,
520 					  size_t size)
521 {
522 	const struct dma_stm32_config *config = dev->config;
523 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
524 	struct dma_stm32_stream *stream;
525 
526 	/* Give channel from index 0 */
527 	id = id - STM32_DMA_STREAM_OFFSET;
528 
529 	if (id >= config->max_streams) {
530 		return -EINVAL;
531 	}
532 
533 	stream = &config->streams[id];
534 
535 	if (dma_stm32_disable_stream(dma, id) != 0) {
536 		return -EBUSY;
537 	}
538 
539 	if (stream->direction > PERIPHERAL_TO_MEMORY) {
540 		return -EINVAL;
541 	}
542 
543 	LL_DMA_ConfigAddresses(dma,
544 				dma_stm32_id_to_stream(id),
545 				src, dst);
546 
547 	LL_DMA_SetBlkDataLength(dma, dma_stm32_id_to_stream(id), size);
548 
549 	/* When reloading the dma, the stream is busy again before enabling */
550 	stream->busy = true;
551 
552 	stm32_dma_enable_stream(dma, id);
553 
554 	return 0;
555 }
556 
dma_stm32_start(const struct device * dev,uint32_t id)557 static int dma_stm32_start(const struct device *dev, uint32_t id)
558 {
559 	const struct dma_stm32_config *config = dev->config;
560 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
561 	struct dma_stm32_stream *stream;
562 
563 	/* Give channel from index 0 */
564 	id = id - STM32_DMA_STREAM_OFFSET;
565 
566 	/* Only M2P or M2M mode can be started manually. */
567 	if (id >= config->max_streams) {
568 		return -EINVAL;
569 	}
570 
571 	/* Repeated start : return now if channel is already started */
572 	if (stm32_dma_is_enabled_stream(dma, id)) {
573 		return 0;
574 	}
575 
576 	/* When starting the dma, the stream is busy before enabling */
577 	stream = &config->streams[id];
578 	stream->busy = true;
579 
580 	dma_stm32_clear_stream_irq(dev, id);
581 
582 	stm32_dma_enable_stream(dma, id);
583 
584 	return 0;
585 }
586 
dma_stm32_suspend(const struct device * dev,uint32_t id)587 static int dma_stm32_suspend(const struct device *dev, uint32_t id)
588 {
589 	const struct dma_stm32_config *config = dev->config;
590 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
591 
592 	/* Give channel from index 0 */
593 	id = id - STM32_DMA_STREAM_OFFSET;
594 
595 	if (id >= config->max_streams) {
596 		return -EINVAL;
597 	}
598 
599 	/* Suspend the channel and wait for suspend Flag set */
600 	LL_DMA_SuspendChannel(dma, dma_stm32_id_to_stream(id));
601 	/* It's not enough to wait for the SUSPF bit with LL_DMA_IsActiveFlag_SUSP */
602 	do {
603 		k_msleep(1); /* A delay is needed (1ms is valid) */
604 	} while (LL_DMA_IsActiveFlag_SUSP(dma, dma_stm32_id_to_stream(id)) != 1);
605 
606 	/* Do not Reset the channel to allow resuming later */
607 	return 0;
608 }
609 
dma_stm32_resume(const struct device * dev,uint32_t id)610 static int dma_stm32_resume(const struct device *dev, uint32_t id)
611 {
612 	const struct dma_stm32_config *config = dev->config;
613 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
614 
615 	/* Give channel from index 0 */
616 	id = id - STM32_DMA_STREAM_OFFSET;
617 
618 	if (id >= config->max_streams) {
619 		return -EINVAL;
620 	}
621 
622 	/* Resume the channel : it's enough after suspend */
623 	LL_DMA_ResumeChannel(dma, dma_stm32_id_to_stream(id));
624 
625 	return 0;
626 }
627 
dma_stm32_stop(const struct device * dev,uint32_t id)628 static int dma_stm32_stop(const struct device *dev, uint32_t id)
629 {
630 	const struct dma_stm32_config *config = dev->config;
631 	struct dma_stm32_stream *stream = &config->streams[id - STM32_DMA_STREAM_OFFSET];
632 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
633 
634 	/* Give channel from index 0 */
635 	id = id - STM32_DMA_STREAM_OFFSET;
636 
637 	if (id >= config->max_streams) {
638 		return -EINVAL;
639 	}
640 
641 	if (stream->hal_override) {
642 		stream->busy = false;
643 		return 0;
644 	}
645 
646 	/* Repeated stop : return now if channel is already stopped */
647 	if (!stm32_dma_is_enabled_stream(dma, id)) {
648 		return 0;
649 	}
650 
651 	LL_DMA_DisableIT_TC(dma, dma_stm32_id_to_stream(id));
652 	LL_DMA_DisableIT_USE(dma, dma_stm32_id_to_stream(id));
653 	LL_DMA_DisableIT_ULE(dma, dma_stm32_id_to_stream(id));
654 	LL_DMA_DisableIT_DTE(dma, dma_stm32_id_to_stream(id));
655 
656 	dma_stm32_clear_stream_irq(dev, id);
657 	dma_stm32_disable_stream(dma, id);
658 
659 	/* Finally, flag stream as free */
660 	stream->busy = false;
661 
662 	return 0;
663 }
664 
dma_stm32_init(const struct device * dev)665 static int dma_stm32_init(const struct device *dev)
666 {
667 	const struct dma_stm32_config *config = dev->config;
668 	const struct device *clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
669 
670 	if (clock_control_on(clk,
671 		(clock_control_subsys_t) &config->pclken) != 0) {
672 		LOG_ERR("clock op failed\n");
673 		return -EIO;
674 	}
675 
676 	config->config_irq(dev);
677 
678 	for (uint32_t i = 0; i < config->max_streams; i++) {
679 		config->streams[i].busy = false;
680 	}
681 
682 	((struct dma_stm32_data *)dev->data)->dma_ctx.magic = 0;
683 	((struct dma_stm32_data *)dev->data)->dma_ctx.dma_channels = 0;
684 	((struct dma_stm32_data *)dev->data)->dma_ctx.atomic = 0;
685 
686 	return 0;
687 }
688 
dma_stm32_get_status(const struct device * dev,uint32_t id,struct dma_status * stat)689 static int dma_stm32_get_status(const struct device *dev,
690 				uint32_t id, struct dma_status *stat)
691 {
692 	const struct dma_stm32_config *config = dev->config;
693 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
694 	struct dma_stm32_stream *stream;
695 
696 	/* Give channel from index 0 */
697 	id = id - STM32_DMA_STREAM_OFFSET;
698 	if (id >= config->max_streams) {
699 		return -EINVAL;
700 	}
701 
702 	stream = &config->streams[id];
703 	stat->pending_length = LL_DMA_GetBlkDataLength(dma, dma_stm32_id_to_stream(id));
704 	stat->dir = stream->direction;
705 	stat->busy = stream->busy;
706 
707 	return 0;
708 }
709 
710 static DEVICE_API(dma, dma_funcs) = {
711 	.reload		 = dma_stm32_reload,
712 	.config		 = dma_stm32_configure,
713 	.start		 = dma_stm32_start,
714 	.stop		 = dma_stm32_stop,
715 	.get_status	 = dma_stm32_get_status,
716 	.suspend	 = dma_stm32_suspend,
717 	.resume		 = dma_stm32_resume,
718 };
719 
720 /*
721  * Macro to CONNECT and enable each irq (order is given by the 'listify')
722  * chan: channel of the DMA instance (assuming one irq per channel)
723  *       stm32U5x has 16 channels
724  * dma : dma instance (one GPDMA instance on stm32U5x)
725  */
726 #define DMA_STM32_IRQ_CONNECT_CHANNEL(chan, dma)			\
727 	do {								\
728 		IRQ_CONNECT(DT_INST_IRQ_BY_IDX(dma, chan, irq),		\
729 			    DT_INST_IRQ_BY_IDX(dma, chan, priority),	\
730 			    dma_stm32_irq_##dma##_##chan,		\
731 			    DEVICE_DT_INST_GET(dma), 0);		\
732 		irq_enable(DT_INST_IRQ_BY_IDX(dma, chan, irq));		\
733 	} while (0)
734 
735 /*
736  * Macro to configure the irq for each dma instance (index)
737  * Loop to CONNECT and enable each irq for each channel
738  * Expecting as many irq as property <dma_channels>
739  */
740 #define DMA_STM32_IRQ_CONNECT(index) \
741 static void dma_stm32_config_irq_##index(const struct device *dev)	\
742 {									\
743 	ARG_UNUSED(dev);						\
744 									\
745 	LISTIFY(DT_INST_PROP(index, dma_channels),			\
746 		DMA_STM32_IRQ_CONNECT_CHANNEL, (;), index);		\
747 }
748 
749 /*
750  * Macro to instanciate the irq handler (order is given by the 'listify')
751  * chan: channel of the DMA instance (assuming one irq per channel)
752  *       stm32U5x has 16 channels
753  * dma : dma instance (one GPDMA instance on stm32U5x)
754  */
755 #define DMA_STM32_DEFINE_IRQ_HANDLER(chan, dma)				\
756 static void dma_stm32_irq_##dma##_##chan(const struct device *dev)	\
757 {									\
758 	dma_stm32_irq_handler(dev, chan);				\
759 }
760 
761 #define DMA_STM32_INIT_DEV(index)					\
762 BUILD_ASSERT(DT_INST_PROP(index, dma_channels)				\
763 	== DT_NUM_IRQS(DT_DRV_INST(index)),				\
764 	"Nb of Channels and IRQ mismatch");				\
765 									\
766 LISTIFY(DT_INST_PROP(index, dma_channels),				\
767 	DMA_STM32_DEFINE_IRQ_HANDLER, (;), index);			\
768 									\
769 DMA_STM32_IRQ_CONNECT(index);						\
770 									\
771 static struct dma_stm32_stream						\
772 	dma_stm32_streams_##index[DT_INST_PROP_OR(index, dma_channels,	\
773 		DT_NUM_IRQS(DT_DRV_INST(index)))];	\
774 									\
775 const struct dma_stm32_config dma_stm32_config_##index = {		\
776 	.pclken = { .bus = DT_INST_CLOCKS_CELL(index, bus),		\
777 		    .enr = DT_INST_CLOCKS_CELL(index, bits) },		\
778 	.config_irq = dma_stm32_config_irq_##index,			\
779 	.base = DT_INST_REG_ADDR(index),				\
780 	.max_streams = DT_INST_PROP_OR(index, dma_channels,		\
781 		DT_NUM_IRQS(DT_DRV_INST(index))				\
782 	),		\
783 	.streams = dma_stm32_streams_##index,				\
784 };									\
785 									\
786 static struct dma_stm32_data dma_stm32_data_##index = {			\
787 };									\
788 									\
789 DEVICE_DT_INST_DEFINE(index,						\
790 		    &dma_stm32_init,					\
791 		    NULL,						\
792 		    &dma_stm32_data_##index, &dma_stm32_config_##index,	\
793 		    PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY,		\
794 		    &dma_funcs);
795 
796 DT_INST_FOREACH_STATUS_OKAY(DMA_STM32_INIT_DEV)
797