1 /*
2  * Copyright (c) 2016 Linaro Limited.
3  * Copyright (c) 2019 Song Qiang <songqiang1304521@gmail.com>
4  * Copyright (c) 2022 STMicroelectronics
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 /**
10  * @brief Common part of DMA drivers for stm32U5.
11  * @note  Functions named with stm32_dma_* are SoCs related functions
12  *
13  */
14 
15 #include "dma_stm32.h"
16 
17 #include <zephyr/init.h>
18 #include <zephyr/drivers/clock_control.h>
19 #include <zephyr/drivers/dma/dma_stm32.h>
20 
21 #include <zephyr/logging/log.h>
22 #include <zephyr/irq.h>
23 LOG_MODULE_REGISTER(dma_stm32, CONFIG_DMA_LOG_LEVEL);
24 
25 #define DT_DRV_COMPAT st_stm32u5_dma
26 
27 static const uint32_t table_src_size[] = {
28 	LL_DMA_SRC_DATAWIDTH_BYTE,
29 	LL_DMA_SRC_DATAWIDTH_HALFWORD,
30 	LL_DMA_SRC_DATAWIDTH_WORD,
31 };
32 
33 static const uint32_t table_dst_size[] = {
34 	LL_DMA_DEST_DATAWIDTH_BYTE,
35 	LL_DMA_DEST_DATAWIDTH_HALFWORD,
36 	LL_DMA_DEST_DATAWIDTH_WORD,
37 };
38 
39 static const uint32_t table_priority[4] = {
40 	LL_DMA_LOW_PRIORITY_LOW_WEIGHT,
41 	LL_DMA_LOW_PRIORITY_MID_WEIGHT,
42 	LL_DMA_LOW_PRIORITY_HIGH_WEIGHT,
43 	LL_DMA_HIGH_PRIORITY,
44 };
45 
dma_stm32_dump_stream_irq(const struct device * dev,uint32_t id)46 static void dma_stm32_dump_stream_irq(const struct device *dev, uint32_t id)
47 {
48 	const struct dma_stm32_config *config = dev->config;
49 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
50 
51 	stm32_dma_dump_stream_irq(dma, id);
52 }
53 
dma_stm32_clear_stream_irq(const struct device * dev,uint32_t id)54 static void dma_stm32_clear_stream_irq(const struct device *dev, uint32_t id)
55 {
56 	const struct dma_stm32_config *config = dev->config;
57 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
58 
59 	dma_stm32_clear_tc(dma, id);
60 	dma_stm32_clear_ht(dma, id);
61 	stm32_dma_clear_stream_irq(dma, id);
62 }
63 
64 
dma_stm32_id_to_stream(uint32_t id)65 uint32_t dma_stm32_id_to_stream(uint32_t id)
66 {
67 	static const uint32_t stream_nr[] = {
68 		LL_DMA_CHANNEL_0,
69 		LL_DMA_CHANNEL_1,
70 		LL_DMA_CHANNEL_2,
71 		LL_DMA_CHANNEL_3,
72 		LL_DMA_CHANNEL_4,
73 		LL_DMA_CHANNEL_5,
74 		LL_DMA_CHANNEL_6,
75 		LL_DMA_CHANNEL_7,
76 		LL_DMA_CHANNEL_8,
77 		LL_DMA_CHANNEL_9,
78 		LL_DMA_CHANNEL_10,
79 		LL_DMA_CHANNEL_11,
80 		LL_DMA_CHANNEL_12,
81 		LL_DMA_CHANNEL_13,
82 		LL_DMA_CHANNEL_14,
83 		LL_DMA_CHANNEL_15,
84 	};
85 
86 	__ASSERT_NO_MSG(id < ARRAY_SIZE(stream_nr));
87 
88 	return stream_nr[id];
89 }
90 
dma_stm32_is_tc_active(DMA_TypeDef * DMAx,uint32_t id)91 bool dma_stm32_is_tc_active(DMA_TypeDef *DMAx, uint32_t id)
92 {
93 	return LL_DMA_IsActiveFlag_TC(DMAx, dma_stm32_id_to_stream(id));
94 }
95 
dma_stm32_clear_tc(DMA_TypeDef * DMAx,uint32_t id)96 void dma_stm32_clear_tc(DMA_TypeDef *DMAx, uint32_t id)
97 {
98 	LL_DMA_ClearFlag_TC(DMAx, dma_stm32_id_to_stream(id));
99 }
100 
101 /* data transfer error */
dma_stm32_is_dte_active(DMA_TypeDef * dma,uint32_t id)102 static inline bool dma_stm32_is_dte_active(DMA_TypeDef *dma, uint32_t id)
103 {
104 	return LL_DMA_IsActiveFlag_DTE(dma, dma_stm32_id_to_stream(id));
105 }
106 
107 /* link transfer error */
dma_stm32_is_ule_active(DMA_TypeDef * dma,uint32_t id)108 static inline bool dma_stm32_is_ule_active(DMA_TypeDef *dma, uint32_t id)
109 {
110 	return LL_DMA_IsActiveFlag_ULE(dma, dma_stm32_id_to_stream(id));
111 }
112 
113 /* user setting error */
dma_stm32_is_use_active(DMA_TypeDef * dma,uint32_t id)114 static inline bool dma_stm32_is_use_active(DMA_TypeDef *dma, uint32_t id)
115 {
116 	return LL_DMA_IsActiveFlag_USE(dma, dma_stm32_id_to_stream(id));
117 }
118 
119 /* transfer error either a data or user or link error */
dma_stm32_is_te_active(DMA_TypeDef * DMAx,uint32_t id)120 bool dma_stm32_is_te_active(DMA_TypeDef *DMAx, uint32_t id)
121 {
122 	return (
123 	LL_DMA_IsActiveFlag_DTE(DMAx, dma_stm32_id_to_stream(id)) ||
124 		LL_DMA_IsActiveFlag_ULE(DMAx, dma_stm32_id_to_stream(id)) ||
125 		LL_DMA_IsActiveFlag_USE(DMAx, dma_stm32_id_to_stream(id))
126 	);
127 }
128 /* clear transfer error either a data or user or link error */
dma_stm32_clear_te(DMA_TypeDef * DMAx,uint32_t id)129 void dma_stm32_clear_te(DMA_TypeDef *DMAx, uint32_t id)
130 {
131 	LL_DMA_ClearFlag_DTE(DMAx, dma_stm32_id_to_stream(id));
132 	LL_DMA_ClearFlag_ULE(DMAx, dma_stm32_id_to_stream(id));
133 	LL_DMA_ClearFlag_USE(DMAx, dma_stm32_id_to_stream(id));
134 }
135 
dma_stm32_is_ht_active(DMA_TypeDef * DMAx,uint32_t id)136 bool dma_stm32_is_ht_active(DMA_TypeDef *DMAx, uint32_t id)
137 {
138 	return LL_DMA_IsActiveFlag_HT(DMAx, dma_stm32_id_to_stream(id));
139 }
140 
dma_stm32_clear_ht(DMA_TypeDef * DMAx,uint32_t id)141 void dma_stm32_clear_ht(DMA_TypeDef *DMAx, uint32_t id)
142 {
143 	LL_DMA_ClearFlag_HT(DMAx, dma_stm32_id_to_stream(id));
144 }
145 
stm32_dma_dump_stream_irq(DMA_TypeDef * dma,uint32_t id)146 void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id)
147 {
148 	LOG_INF("tc: %d, ht: %d, dte: %d, ule: %d, use: %d",
149 		dma_stm32_is_tc_active(dma, id),
150 		dma_stm32_is_ht_active(dma, id),
151 		dma_stm32_is_dte_active(dma, id),
152 		dma_stm32_is_ule_active(dma, id),
153 		dma_stm32_is_use_active(dma, id)
154 	);
155 }
156 
157 /* Check if nsecure masked interrupt is active on channel */
stm32_dma_is_tc_irq_active(DMA_TypeDef * dma,uint32_t id)158 bool stm32_dma_is_tc_irq_active(DMA_TypeDef *dma, uint32_t id)
159 {
160 	return (LL_DMA_IsEnabledIT_TC(dma, dma_stm32_id_to_stream(id)) &&
161 		LL_DMA_IsActiveFlag_TC(dma, dma_stm32_id_to_stream(id)));
162 }
163 
stm32_dma_is_ht_irq_active(DMA_TypeDef * dma,uint32_t id)164 bool stm32_dma_is_ht_irq_active(DMA_TypeDef *dma, uint32_t id)
165 {
166 	return (LL_DMA_IsEnabledIT_HT(dma, dma_stm32_id_to_stream(id)) &&
167 		LL_DMA_IsActiveFlag_HT(dma, dma_stm32_id_to_stream(id)));
168 }
169 
stm32_dma_is_te_irq_active(DMA_TypeDef * dma,uint32_t id)170 static inline bool stm32_dma_is_te_irq_active(DMA_TypeDef *dma, uint32_t id)
171 {
172 	return (
173 		(LL_DMA_IsEnabledIT_DTE(dma, dma_stm32_id_to_stream(id)) &&
174 		LL_DMA_IsActiveFlag_DTE(dma, dma_stm32_id_to_stream(id))) ||
175 		(LL_DMA_IsEnabledIT_ULE(dma, dma_stm32_id_to_stream(id)) &&
176 		LL_DMA_IsActiveFlag_ULE(dma, dma_stm32_id_to_stream(id))) ||
177 		(LL_DMA_IsEnabledIT_USE(dma, dma_stm32_id_to_stream(id)) &&
178 		LL_DMA_IsActiveFlag_USE(dma, dma_stm32_id_to_stream(id)))
179 		);
180 }
181 
182 /* check if and irq of any type occurred on the channel */
183 #define stm32_dma_is_irq_active LL_DMA_IsActiveFlag_MIS
184 
stm32_dma_clear_stream_irq(DMA_TypeDef * dma,uint32_t id)185 void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id)
186 {
187 	dma_stm32_clear_te(dma, id);
188 
189 	LL_DMA_ClearFlag_TO(dma, dma_stm32_id_to_stream(id));
190 	LL_DMA_ClearFlag_SUSP(dma, dma_stm32_id_to_stream(id));
191 }
192 
stm32_dma_is_irq_happened(DMA_TypeDef * dma,uint32_t id)193 bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, uint32_t id)
194 {
195 	if (dma_stm32_is_te_active(dma, id)) {
196 		return true;
197 	}
198 
199 	return false;
200 }
201 
stm32_dma_enable_stream(DMA_TypeDef * dma,uint32_t id)202 void stm32_dma_enable_stream(DMA_TypeDef *dma, uint32_t id)
203 {
204 	LL_DMA_EnableChannel(dma, dma_stm32_id_to_stream(id));
205 }
206 
stm32_dma_is_enabled_stream(DMA_TypeDef * dma,uint32_t id)207 bool stm32_dma_is_enabled_stream(DMA_TypeDef *dma, uint32_t id)
208 {
209 	if (LL_DMA_IsEnabledChannel(dma, dma_stm32_id_to_stream(id)) == 1) {
210 		return true;
211 	}
212 	return false;
213 }
214 
stm32_dma_disable_stream(DMA_TypeDef * dma,uint32_t id)215 int stm32_dma_disable_stream(DMA_TypeDef *dma, uint32_t id)
216 {
217 	/* GPDMA channel abort sequence */
218 	LL_DMA_SuspendChannel(dma, dma_stm32_id_to_stream(id));
219 
220 	/* reset the channel will disable it */
221 	LL_DMA_ResetChannel(dma, dma_stm32_id_to_stream(id));
222 
223 	if (!stm32_dma_is_enabled_stream(dma, id)) {
224 		return 0;
225 	}
226 
227 	return -EAGAIN;
228 }
229 
stm32_dma_set_mem_periph_address(DMA_TypeDef * dma,uint32_t channel,uint32_t src_addr,uint32_t dest_addr)230 void stm32_dma_set_mem_periph_address(DMA_TypeDef *dma,
231 					     uint32_t channel,
232 					     uint32_t src_addr,
233 					     uint32_t dest_addr)
234 {
235 	LL_DMA_ConfigAddresses(dma, channel, src_addr, dest_addr);
236 }
237 
238 /* same function to set periph/mem addresses */
stm32_dma_set_periph_mem_address(DMA_TypeDef * dma,uint32_t channel,uint32_t src_addr,uint32_t dest_addr)239 void stm32_dma_set_periph_mem_address(DMA_TypeDef *dma,
240 					     uint32_t channel,
241 					     uint32_t src_addr,
242 					     uint32_t dest_addr)
243 {
244 	LL_DMA_ConfigAddresses(dma, channel, src_addr, dest_addr);
245 }
246 
dma_stm32_irq_handler(const struct device * dev,uint32_t id)247 static void dma_stm32_irq_handler(const struct device *dev, uint32_t id)
248 {
249 	const struct dma_stm32_config *config = dev->config;
250 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
251 	struct dma_stm32_stream *stream;
252 	uint32_t callback_arg;
253 
254 	__ASSERT_NO_MSG(id < config->max_streams);
255 
256 	stream = &config->streams[id];
257 	/* The busy channel is pertinent if not overridden by the HAL */
258 	if ((stream->hal_override != true) && (stream->busy == false)) {
259 		/*
260 		 * When DMA channel is not overridden by HAL,
261 		 * ignore irq if the channel is not busy anymore
262 		 */
263 		dma_stm32_clear_stream_irq(dev, id);
264 		return;
265 	}
266 	callback_arg = id + STM32_DMA_STREAM_OFFSET;
267 
268 	/* The dma stream id is in range from STM32_DMA_STREAM_OFFSET..<dma-requests> */
269 	if (stm32_dma_is_ht_irq_active(dma, id)) {
270 		/* Let HAL DMA handle flags on its own */
271 		if (!stream->hal_override) {
272 			dma_stm32_clear_ht(dma, id);
273 		}
274 		stream->dma_callback(dev, stream->user_data, callback_arg, DMA_STATUS_BLOCK);
275 	} else if (stm32_dma_is_tc_irq_active(dma, id)) {
276 		/* Assuming not cyclic transfer */
277 		if (stream->cyclic == false) {
278 			stream->busy = false;
279 		}
280 		/* Let HAL DMA handle flags on its own */
281 		if (!stream->hal_override) {
282 			dma_stm32_clear_tc(dma, id);
283 		}
284 		stream->dma_callback(dev, stream->user_data, callback_arg, DMA_STATUS_COMPLETE);
285 	} else {
286 		LOG_ERR("Transfer Error.");
287 		stream->busy = false;
288 		dma_stm32_dump_stream_irq(dev, id);
289 		dma_stm32_clear_stream_irq(dev, id);
290 		stream->dma_callback(dev, stream->user_data,
291 				     callback_arg, -EIO);
292 	}
293 }
294 
dma_stm32_get_priority(uint8_t priority,uint32_t * ll_priority)295 static int dma_stm32_get_priority(uint8_t priority, uint32_t *ll_priority)
296 {
297 	if (priority > ARRAY_SIZE(table_priority)) {
298 		LOG_ERR("Priority error. %d", priority);
299 		return -EINVAL;
300 	}
301 
302 	*ll_priority = table_priority[priority];
303 	return 0;
304 }
305 
dma_stm32_get_direction(enum dma_channel_direction direction,uint32_t * ll_direction)306 static int dma_stm32_get_direction(enum dma_channel_direction direction,
307 				   uint32_t *ll_direction)
308 {
309 	switch (direction) {
310 	case MEMORY_TO_MEMORY:
311 		*ll_direction = LL_DMA_DIRECTION_MEMORY_TO_MEMORY;
312 		break;
313 	case MEMORY_TO_PERIPHERAL:
314 		*ll_direction = LL_DMA_DIRECTION_MEMORY_TO_PERIPH;
315 		break;
316 	case PERIPHERAL_TO_MEMORY:
317 		*ll_direction = LL_DMA_DIRECTION_PERIPH_TO_MEMORY;
318 		break;
319 	default:
320 		LOG_ERR("Direction error. %d", direction);
321 		return -EINVAL;
322 	}
323 
324 	return 0;
325 }
326 
dma_stm32_disable_stream(DMA_TypeDef * dma,uint32_t id)327 static int dma_stm32_disable_stream(DMA_TypeDef *dma, uint32_t id)
328 {
329 	int count = 0;
330 
331 	for (;;) {
332 		if (stm32_dma_disable_stream(dma, id) == 0) {
333 			return 0;
334 		}
335 		/* After trying for 5 seconds, give up */
336 		if (count++ > (5 * 1000)) {
337 			return -EBUSY;
338 		}
339 		k_sleep(K_MSEC(1));
340 	}
341 
342 	return 0;
343 }
344 
dma_stm32_configure(const struct device * dev,uint32_t id,struct dma_config * config)345 static int dma_stm32_configure(const struct device *dev,
346 					     uint32_t id,
347 					     struct dma_config *config)
348 {
349 	const struct dma_stm32_config *dev_config = dev->config;
350 	struct dma_stm32_stream *stream =
351 				&dev_config->streams[id - STM32_DMA_STREAM_OFFSET];
352 	DMA_TypeDef *dma = (DMA_TypeDef *)dev_config->base;
353 	LL_DMA_InitTypeDef DMA_InitStruct;
354 	int ret;
355 
356 	/*  Linked list Node  and structure initialization */
357 	static LL_DMA_LinkNodeTypeDef Node_GPDMA_Channel;
358 	LL_DMA_InitLinkedListTypeDef DMA_InitLinkedListStruct;
359 	LL_DMA_InitNodeTypeDef NodeConfig;
360 
361 	LL_DMA_ListStructInit(&DMA_InitLinkedListStruct);
362 	LL_DMA_NodeStructInit(&NodeConfig);
363 	LL_DMA_StructInit(&DMA_InitStruct);
364 
365 	/* Give channel from index 0 */
366 	id = id - STM32_DMA_STREAM_OFFSET;
367 
368 	if (id >= dev_config->max_streams) {
369 		LOG_ERR("cannot configure the dma stream %d.", id);
370 		return -EINVAL;
371 	}
372 
373 	if (stream->busy) {
374 		LOG_ERR("dma stream %d is busy.", id);
375 		return -EBUSY;
376 	}
377 
378 	if (dma_stm32_disable_stream(dma, id) != 0) {
379 		LOG_ERR("could not disable dma stream %d.", id);
380 		return -EBUSY;
381 	}
382 
383 	dma_stm32_clear_stream_irq(dev, id);
384 
385 	/* Check potential DMA override (if id parameters and stream are valid) */
386 	if (config->linked_channel == STM32_DMA_HAL_OVERRIDE) {
387 		/* DMA channel is overridden by HAL DMA
388 		 * Retain that the channel is busy and proceed to the minimal
389 		 * configuration to properly route the IRQ
390 		 */
391 		stream->busy = true;
392 		stream->hal_override = true;
393 		stream->dma_callback = config->dma_callback;
394 		stream->user_data = config->user_data;
395 		return 0;
396 	}
397 
398 	if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) {
399 		LOG_ERR("Data size too big: %d\n",
400 		       config->head_block->block_size);
401 		return -EINVAL;
402 	}
403 
404 	/* Support only the same data width for source and dest */
405 	if (config->dest_data_size != config->source_data_size) {
406 		LOG_ERR("source and dest data size differ.");
407 		return -EINVAL;
408 	}
409 
410 	if (config->source_data_size != 4U &&
411 	    config->source_data_size != 2U &&
412 	    config->source_data_size != 1U) {
413 		LOG_ERR("source and dest unit size error, %d",
414 			config->source_data_size);
415 		return -EINVAL;
416 	}
417 
418 	stream->busy		= true;
419 	stream->dma_callback	= config->dma_callback;
420 	stream->direction	= config->channel_direction;
421 	stream->user_data       = config->user_data;
422 	stream->src_size	= config->source_data_size;
423 	stream->dst_size	= config->dest_data_size;
424 
425 	/* Check dest or source memory address, warn if 0 */
426 	if (config->head_block->source_address == 0) {
427 		LOG_WRN("source_buffer address is null.");
428 	}
429 
430 	if (config->head_block->dest_address == 0) {
431 		LOG_WRN("dest_buffer address is null.");
432 	}
433 
434 	DMA_InitStruct.SrcAddress = config->head_block->source_address;
435 	DMA_InitStruct.DestAddress = config->head_block->dest_address;
436 	NodeConfig.SrcAddress = config->head_block->source_address;
437 	NodeConfig.DestAddress = config->head_block->dest_address;
438 	NodeConfig.BlkDataLength = config->head_block->block_size;
439 
440 	ret = dma_stm32_get_priority(config->channel_priority,
441 				     &DMA_InitStruct.Priority);
442 	if (ret < 0) {
443 		return ret;
444 	}
445 
446 	ret = dma_stm32_get_direction(config->channel_direction,
447 				      &DMA_InitStruct.Direction);
448 	if (ret < 0) {
449 		return ret;
450 	}
451 
452 	/* This part is for source */
453 	switch (config->head_block->source_addr_adj) {
454 	case DMA_ADDR_ADJ_INCREMENT:
455 		DMA_InitStruct.SrcIncMode = LL_DMA_SRC_INCREMENT;
456 		break;
457 	case DMA_ADDR_ADJ_NO_CHANGE:
458 		DMA_InitStruct.SrcIncMode = LL_DMA_SRC_FIXED;
459 		break;
460 	case DMA_ADDR_ADJ_DECREMENT:
461 		return -ENOTSUP;
462 	default:
463 		LOG_ERR("Memory increment error. %d",
464 			config->head_block->source_addr_adj);
465 		return -EINVAL;
466 	}
467 	LOG_DBG("Channel (%d) src inc (%x).",
468 				id, DMA_InitStruct.SrcIncMode);
469 
470 	/* This part is for dest */
471 	switch (config->head_block->dest_addr_adj) {
472 	case DMA_ADDR_ADJ_INCREMENT:
473 		DMA_InitStruct.DestIncMode = LL_DMA_DEST_INCREMENT;
474 		break;
475 	case DMA_ADDR_ADJ_NO_CHANGE:
476 		DMA_InitStruct.DestIncMode = LL_DMA_DEST_FIXED;
477 		break;
478 	case DMA_ADDR_ADJ_DECREMENT:
479 		return -ENOTSUP;
480 	default:
481 		LOG_ERR("Periph increment error. %d",
482 			config->head_block->dest_addr_adj);
483 		return -EINVAL;
484 	}
485 	LOG_DBG("Channel (%d) dest inc (%x).",
486 				id, DMA_InitStruct.DestIncMode);
487 
488 	stream->source_periph = (stream->direction == PERIPHERAL_TO_MEMORY);
489 
490 	/* Set the data width, when source_data_size equals dest_data_size */
491 	int index = find_lsb_set(config->source_data_size) - 1;
492 
493 	DMA_InitStruct.SrcDataWidth = table_src_size[index];
494 
495 	index = find_lsb_set(config->dest_data_size) - 1;
496 	DMA_InitStruct.DestDataWidth = table_dst_size[index];
497 
498 	DMA_InitStruct.BlkDataLength = config->head_block->block_size;
499 
500 	/* The request ID is stored in the dma_slot */
501 	DMA_InitStruct.Request = config->dma_slot;
502 
503 	if (config->head_block->source_reload_en == 0) {
504 		/* Initialize the DMA structure in non-cyclic mode only */
505 		LL_DMA_Init(dma, dma_stm32_id_to_stream(id), &DMA_InitStruct);
506 	} else {/* cyclic mode */
507 		/* Setting GPDMA request */
508 		NodeConfig.DestDataWidth = DMA_InitStruct.DestDataWidth;
509 		NodeConfig.SrcDataWidth = DMA_InitStruct.SrcDataWidth;
510 		NodeConfig.DestIncMode = DMA_InitStruct.DestIncMode;
511 		NodeConfig.SrcIncMode = DMA_InitStruct.SrcIncMode;
512 		NodeConfig.Direction = DMA_InitStruct.Direction;
513 		NodeConfig.Request = DMA_InitStruct.Request;
514 
515 		/* Continuous transfers with Linked List */
516 		stream->cyclic = true;
517 		LL_DMA_List_Init(dma, dma_stm32_id_to_stream(id), &DMA_InitLinkedListStruct);
518 		LL_DMA_CreateLinkNode(&NodeConfig, &Node_GPDMA_Channel);
519 		LL_DMA_ConnectLinkNode(&Node_GPDMA_Channel, LL_DMA_CLLR_OFFSET5,
520 				       &Node_GPDMA_Channel, LL_DMA_CLLR_OFFSET5);
521 		LL_DMA_SetLinkedListBaseAddr(dma, dma_stm32_id_to_stream(id),
522 					     (uint32_t)&Node_GPDMA_Channel);
523 		LL_DMA_ConfigLinkUpdate(dma, dma_stm32_id_to_stream(id),
524 					(LL_DMA_UPDATE_CTR1 | LL_DMA_UPDATE_CTR2 |
525 					 LL_DMA_UPDATE_CBR1 | LL_DMA_UPDATE_CSAR |
526 					 LL_DMA_UPDATE_CDAR | LL_DMA_UPDATE_CLLR),
527 					(uint32_t)&Node_GPDMA_Channel);
528 
529 		LL_DMA_EnableIT_HT(dma, dma_stm32_id_to_stream(id));
530 	}
531 
532 #ifdef CONFIG_ARM_SECURE_FIRMWARE
533 	LL_DMA_ConfigChannelSecure(dma, dma_stm32_id_to_stream(id),
534 		LL_DMA_CHANNEL_SEC | LL_DMA_CHANNEL_SRC_SEC | LL_DMA_CHANNEL_DEST_SEC);
535 	LL_DMA_EnableChannelPrivilege(dma, dma_stm32_id_to_stream(id));
536 #endif
537 
538 	LL_DMA_EnableIT_TC(dma, dma_stm32_id_to_stream(id));
539 	LL_DMA_EnableIT_USE(dma, dma_stm32_id_to_stream(id));
540 	LL_DMA_EnableIT_ULE(dma, dma_stm32_id_to_stream(id));
541 	LL_DMA_EnableIT_DTE(dma, dma_stm32_id_to_stream(id));
542 
543 	return ret;
544 }
545 
dma_stm32_reload(const struct device * dev,uint32_t id,uint32_t src,uint32_t dst,size_t size)546 static int dma_stm32_reload(const struct device *dev, uint32_t id,
547 					  uint32_t src, uint32_t dst,
548 					  size_t size)
549 {
550 	const struct dma_stm32_config *config = dev->config;
551 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
552 	struct dma_stm32_stream *stream;
553 
554 	/* Give channel from index 0 */
555 	id = id - STM32_DMA_STREAM_OFFSET;
556 
557 	if (id >= config->max_streams) {
558 		return -EINVAL;
559 	}
560 
561 	stream = &config->streams[id];
562 
563 	if (dma_stm32_disable_stream(dma, id) != 0) {
564 		return -EBUSY;
565 	}
566 
567 	if (stream->direction > PERIPHERAL_TO_MEMORY) {
568 		return -EINVAL;
569 	}
570 
571 	LL_DMA_ConfigAddresses(dma,
572 				dma_stm32_id_to_stream(id),
573 				src, dst);
574 
575 	LL_DMA_SetBlkDataLength(dma, dma_stm32_id_to_stream(id), size);
576 
577 	/* When reloading the dma, the stream is busy again before enabling */
578 	stream->busy = true;
579 
580 	stm32_dma_enable_stream(dma, id);
581 
582 	return 0;
583 }
584 
dma_stm32_start(const struct device * dev,uint32_t id)585 static int dma_stm32_start(const struct device *dev, uint32_t id)
586 {
587 	const struct dma_stm32_config *config = dev->config;
588 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
589 	struct dma_stm32_stream *stream;
590 
591 	/* Give channel from index 0 */
592 	id = id - STM32_DMA_STREAM_OFFSET;
593 
594 	/* Only M2P or M2M mode can be started manually. */
595 	if (id >= config->max_streams) {
596 		return -EINVAL;
597 	}
598 
599 	/* Repeated start : return now if channel is already started */
600 	if (stm32_dma_is_enabled_stream(dma, id)) {
601 		return 0;
602 	}
603 
604 	/* When starting the dma, the stream is busy before enabling */
605 	stream = &config->streams[id];
606 	stream->busy = true;
607 
608 	dma_stm32_clear_stream_irq(dev, id);
609 
610 	stm32_dma_enable_stream(dma, id);
611 
612 	return 0;
613 }
614 
dma_stm32_suspend(const struct device * dev,uint32_t id)615 static int dma_stm32_suspend(const struct device *dev, uint32_t id)
616 {
617 	const struct dma_stm32_config *config = dev->config;
618 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
619 
620 	/* Give channel from index 0 */
621 	id = id - STM32_DMA_STREAM_OFFSET;
622 
623 	if (id >= config->max_streams) {
624 		return -EINVAL;
625 	}
626 
627 	/* Suspend the channel and wait for suspend Flag set */
628 	LL_DMA_SuspendChannel(dma, dma_stm32_id_to_stream(id));
629 	/* It's not enough to wait for the SUSPF bit with LL_DMA_IsActiveFlag_SUSP */
630 	do {
631 		k_msleep(1); /* A delay is needed (1ms is valid) */
632 	} while (LL_DMA_IsActiveFlag_SUSP(dma, dma_stm32_id_to_stream(id)) != 1);
633 
634 	/* Do not Reset the channel to allow resuming later */
635 	return 0;
636 }
637 
dma_stm32_resume(const struct device * dev,uint32_t id)638 static int dma_stm32_resume(const struct device *dev, uint32_t id)
639 {
640 	const struct dma_stm32_config *config = dev->config;
641 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
642 
643 	/* Give channel from index 0 */
644 	id = id - STM32_DMA_STREAM_OFFSET;
645 
646 	if (id >= config->max_streams) {
647 		return -EINVAL;
648 	}
649 
650 	/* Resume the channel : it's enough after suspend */
651 	LL_DMA_ResumeChannel(dma, dma_stm32_id_to_stream(id));
652 
653 	return 0;
654 }
655 
dma_stm32_stop(const struct device * dev,uint32_t id)656 static int dma_stm32_stop(const struct device *dev, uint32_t id)
657 {
658 	const struct dma_stm32_config *config = dev->config;
659 	struct dma_stm32_stream *stream = &config->streams[id - STM32_DMA_STREAM_OFFSET];
660 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
661 
662 	/* Give channel from index 0 */
663 	id = id - STM32_DMA_STREAM_OFFSET;
664 
665 	if (id >= config->max_streams) {
666 		return -EINVAL;
667 	}
668 
669 	if (stream->hal_override) {
670 		stream->busy = false;
671 		return 0;
672 	}
673 
674 	/* Repeated stop : return now if channel is already stopped */
675 	if (!stm32_dma_is_enabled_stream(dma, id)) {
676 		return 0;
677 	}
678 
679 	LL_DMA_DisableIT_TC(dma, dma_stm32_id_to_stream(id));
680 	LL_DMA_DisableIT_USE(dma, dma_stm32_id_to_stream(id));
681 	LL_DMA_DisableIT_ULE(dma, dma_stm32_id_to_stream(id));
682 	LL_DMA_DisableIT_DTE(dma, dma_stm32_id_to_stream(id));
683 
684 	dma_stm32_clear_stream_irq(dev, id);
685 	dma_stm32_disable_stream(dma, id);
686 
687 	/* Finally, flag stream as free */
688 	stream->busy = false;
689 
690 	return 0;
691 }
692 
dma_stm32_init(const struct device * dev)693 static int dma_stm32_init(const struct device *dev)
694 {
695 	const struct dma_stm32_config *config = dev->config;
696 	const struct device *clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
697 
698 	if (clock_control_on(clk,
699 		(clock_control_subsys_t) &config->pclken) != 0) {
700 		LOG_ERR("clock op failed\n");
701 		return -EIO;
702 	}
703 
704 	config->config_irq(dev);
705 
706 	for (uint32_t i = 0; i < config->max_streams; i++) {
707 		config->streams[i].busy = false;
708 	}
709 
710 	((struct dma_stm32_data *)dev->data)->dma_ctx.magic = 0;
711 	((struct dma_stm32_data *)dev->data)->dma_ctx.dma_channels = 0;
712 	((struct dma_stm32_data *)dev->data)->dma_ctx.atomic = 0;
713 
714 	return 0;
715 }
716 
dma_stm32_get_status(const struct device * dev,uint32_t id,struct dma_status * stat)717 static int dma_stm32_get_status(const struct device *dev,
718 				uint32_t id, struct dma_status *stat)
719 {
720 	const struct dma_stm32_config *config = dev->config;
721 	DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
722 	struct dma_stm32_stream *stream;
723 
724 	/* Give channel from index 0 */
725 	id = id - STM32_DMA_STREAM_OFFSET;
726 	if (id >= config->max_streams) {
727 		return -EINVAL;
728 	}
729 
730 	stream = &config->streams[id];
731 	stat->pending_length = LL_DMA_GetBlkDataLength(dma, dma_stm32_id_to_stream(id));
732 	stat->dir = stream->direction;
733 	stat->busy = stream->busy;
734 
735 	return 0;
736 }
737 
738 static DEVICE_API(dma, dma_funcs) = {
739 	.reload		 = dma_stm32_reload,
740 	.config		 = dma_stm32_configure,
741 	.start		 = dma_stm32_start,
742 	.stop		 = dma_stm32_stop,
743 	.get_status	 = dma_stm32_get_status,
744 	.suspend	 = dma_stm32_suspend,
745 	.resume		 = dma_stm32_resume,
746 };
747 
748 /*
749  * Macro to CONNECT and enable each irq (order is given by the 'listify')
750  * chan: channel of the DMA instance (assuming one irq per channel)
751  *       stm32U5x has 16 channels
752  * dma : dma instance (one GPDMA instance on stm32U5x)
753  */
754 #define DMA_STM32_IRQ_CONNECT_CHANNEL(chan, dma)			\
755 	do {								\
756 		IRQ_CONNECT(DT_INST_IRQ_BY_IDX(dma, chan, irq),		\
757 			    DT_INST_IRQ_BY_IDX(dma, chan, priority),	\
758 			    dma_stm32_irq_##dma##_##chan,		\
759 			    DEVICE_DT_INST_GET(dma), 0);		\
760 		irq_enable(DT_INST_IRQ_BY_IDX(dma, chan, irq));		\
761 	} while (0)
762 
763 /*
764  * Macro to configure the irq for each dma instance (index)
765  * Loop to CONNECT and enable each irq for each channel
766  * Expecting as many irq as property <dma_channels>
767  */
768 #define DMA_STM32_IRQ_CONNECT(index) \
769 static void dma_stm32_config_irq_##index(const struct device *dev)	\
770 {									\
771 	ARG_UNUSED(dev);						\
772 									\
773 	LISTIFY(DT_INST_PROP(index, dma_channels),			\
774 		DMA_STM32_IRQ_CONNECT_CHANNEL, (;), index);		\
775 }
776 
777 /*
778  * Macro to instanciate the irq handler (order is given by the 'listify')
779  * chan: channel of the DMA instance (assuming one irq per channel)
780  *       stm32U5x has 16 channels
781  * dma : dma instance (one GPDMA instance on stm32U5x)
782  */
783 #define DMA_STM32_DEFINE_IRQ_HANDLER(chan, dma)				\
784 static void dma_stm32_irq_##dma##_##chan(const struct device *dev)	\
785 {									\
786 	dma_stm32_irq_handler(dev, chan);				\
787 }
788 
789 #define DMA_STM32_INIT_DEV(index)					\
790 BUILD_ASSERT(DT_INST_PROP(index, dma_channels)				\
791 	== DT_NUM_IRQS(DT_DRV_INST(index)),				\
792 	"Nb of Channels and IRQ mismatch");				\
793 									\
794 LISTIFY(DT_INST_PROP(index, dma_channels),				\
795 	DMA_STM32_DEFINE_IRQ_HANDLER, (;), index);			\
796 									\
797 DMA_STM32_IRQ_CONNECT(index);						\
798 									\
799 static struct dma_stm32_stream						\
800 	dma_stm32_streams_##index[DT_INST_PROP_OR(index, dma_channels,	\
801 		DT_NUM_IRQS(DT_DRV_INST(index)))];	\
802 									\
803 const struct dma_stm32_config dma_stm32_config_##index = {		\
804 	.pclken = { .bus = DT_INST_CLOCKS_CELL(index, bus),		\
805 		    .enr = DT_INST_CLOCKS_CELL(index, bits) },		\
806 	.config_irq = dma_stm32_config_irq_##index,			\
807 	.base = DT_INST_REG_ADDR(index),				\
808 	.max_streams = DT_INST_PROP_OR(index, dma_channels,		\
809 		DT_NUM_IRQS(DT_DRV_INST(index))				\
810 	),		\
811 	.streams = dma_stm32_streams_##index,				\
812 };									\
813 									\
814 static struct dma_stm32_data dma_stm32_data_##index = {			\
815 };									\
816 									\
817 DEVICE_DT_INST_DEFINE(index,						\
818 		    &dma_stm32_init,					\
819 		    NULL,						\
820 		    &dma_stm32_data_##index, &dma_stm32_config_##index,	\
821 		    PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY,		\
822 		    &dma_funcs);
823 
824 DT_INST_FOREACH_STATUS_OKAY(DMA_STM32_INIT_DEV)
825