1 /*
2 * Copyright (c) 2023 Jeroen van Dooren, Nobleo Technology
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @brief Common part of BDMA drivers for stm32.
9 */
10
11 #include "dma_stm32_bdma.h"
12
13 #include <zephyr/init.h>
14 #include <zephyr/drivers/clock_control.h>
15 #include <zephyr/drivers/dma/dma_stm32.h>
16 #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h>
17
18 #include <zephyr/logging/log.h>
19 #include <zephyr/irq.h>
20 LOG_MODULE_REGISTER(dma_stm32_bdma, CONFIG_DMA_LOG_LEVEL);
21
22 #define DT_DRV_COMPAT st_stm32_bdma
23
24 #define BDMA_STM32_0_CHANNEL_COUNT 8
25
26 static const uint32_t table_m_size[] = {
27 LL_BDMA_MDATAALIGN_BYTE,
28 LL_BDMA_MDATAALIGN_HALFWORD,
29 LL_BDMA_MDATAALIGN_WORD,
30 };
31
32 static const uint32_t table_p_size[] = {
33 LL_BDMA_PDATAALIGN_BYTE,
34 LL_BDMA_PDATAALIGN_HALFWORD,
35 LL_BDMA_PDATAALIGN_WORD,
36 };
37
bdma_stm32_id_to_channel(uint32_t id)38 uint32_t bdma_stm32_id_to_channel(uint32_t id)
39 {
40 static const uint32_t channel_nr[] = {
41 LL_BDMA_CHANNEL_0,
42 LL_BDMA_CHANNEL_1,
43 LL_BDMA_CHANNEL_2,
44 LL_BDMA_CHANNEL_3,
45 LL_BDMA_CHANNEL_4,
46 LL_BDMA_CHANNEL_5,
47 LL_BDMA_CHANNEL_6,
48 LL_BDMA_CHANNEL_7,
49 };
50
51 __ASSERT_NO_MSG(id < ARRAY_SIZE(channel_nr));
52
53 return channel_nr[id];
54 }
55
56 #if !defined(CONFIG_DMAMUX_STM32)
bdma_stm32_slot_to_channel(uint32_t slot)57 uint32_t bdma_stm32_slot_to_channel(uint32_t slot)
58 {
59 static const uint32_t channel_nr[] = {
60 LL_BDMA_CHANNEL_0,
61 LL_BDMA_CHANNEL_1,
62 LL_BDMA_CHANNEL_2,
63 LL_BDMA_CHANNEL_3,
64 LL_BDMA_CHANNEL_4,
65 LL_BDMA_CHANNEL_5,
66 LL_BDMA_CHANNEL_6,
67 LL_BDMA_CHANNEL_7,
68 };
69
70 __ASSERT_NO_MSG(slot < ARRAY_SIZE(channel_nr));
71
72 return channel_nr[slot];
73 }
74 #endif
75
bdma_stm32_clear_ht(BDMA_TypeDef * DMAx,uint32_t id)76 void bdma_stm32_clear_ht(BDMA_TypeDef *DMAx, uint32_t id)
77 {
78 static const bdma_stm32_clear_flag_func func[] = {
79 LL_BDMA_ClearFlag_HT0,
80 LL_BDMA_ClearFlag_HT1,
81 LL_BDMA_ClearFlag_HT2,
82 LL_BDMA_ClearFlag_HT3,
83 LL_BDMA_ClearFlag_HT4,
84 LL_BDMA_ClearFlag_HT5,
85 LL_BDMA_ClearFlag_HT6,
86 LL_BDMA_ClearFlag_HT7,
87 };
88
89 __ASSERT_NO_MSG(id < ARRAY_SIZE(func));
90
91 func[id](DMAx);
92 }
93
bdma_stm32_clear_tc(BDMA_TypeDef * DMAx,uint32_t id)94 void bdma_stm32_clear_tc(BDMA_TypeDef *DMAx, uint32_t id)
95 {
96 static const bdma_stm32_clear_flag_func func[] = {
97 LL_BDMA_ClearFlag_TC0,
98 LL_BDMA_ClearFlag_TC1,
99 LL_BDMA_ClearFlag_TC2,
100 LL_BDMA_ClearFlag_TC3,
101 LL_BDMA_ClearFlag_TC4,
102 LL_BDMA_ClearFlag_TC5,
103 LL_BDMA_ClearFlag_TC6,
104 LL_BDMA_ClearFlag_TC7,
105 };
106
107 __ASSERT_NO_MSG(id < ARRAY_SIZE(func));
108
109 func[id](DMAx);
110 }
111
bdma_stm32_is_ht_active(BDMA_TypeDef * DMAx,uint32_t id)112 bool bdma_stm32_is_ht_active(BDMA_TypeDef *DMAx, uint32_t id)
113 {
114 static const bdma_stm32_check_flag_func func[] = {
115 LL_BDMA_IsActiveFlag_HT0,
116 LL_BDMA_IsActiveFlag_HT1,
117 LL_BDMA_IsActiveFlag_HT2,
118 LL_BDMA_IsActiveFlag_HT3,
119 LL_BDMA_IsActiveFlag_HT4,
120 LL_BDMA_IsActiveFlag_HT5,
121 LL_BDMA_IsActiveFlag_HT6,
122 LL_BDMA_IsActiveFlag_HT7,
123 };
124
125 __ASSERT_NO_MSG(id < ARRAY_SIZE(func));
126
127 return func[id](DMAx);
128 }
129
bdma_stm32_is_tc_active(BDMA_TypeDef * DMAx,uint32_t id)130 bool bdma_stm32_is_tc_active(BDMA_TypeDef *DMAx, uint32_t id)
131 {
132 static const bdma_stm32_check_flag_func func[] = {
133 LL_BDMA_IsActiveFlag_TC0,
134 LL_BDMA_IsActiveFlag_TC1,
135 LL_BDMA_IsActiveFlag_TC2,
136 LL_BDMA_IsActiveFlag_TC3,
137 LL_BDMA_IsActiveFlag_TC4,
138 LL_BDMA_IsActiveFlag_TC5,
139 LL_BDMA_IsActiveFlag_TC6,
140 LL_BDMA_IsActiveFlag_TC7,
141 };
142
143 __ASSERT_NO_MSG(id < ARRAY_SIZE(func));
144
145 return func[id](DMAx);
146 }
147
bdma_stm32_clear_te(BDMA_TypeDef * DMAx,uint32_t id)148 void bdma_stm32_clear_te(BDMA_TypeDef *DMAx, uint32_t id)
149 {
150 static const bdma_stm32_clear_flag_func func[] = {
151 LL_BDMA_ClearFlag_TE0,
152 LL_BDMA_ClearFlag_TE1,
153 LL_BDMA_ClearFlag_TE2,
154 LL_BDMA_ClearFlag_TE3,
155 LL_BDMA_ClearFlag_TE4,
156 LL_BDMA_ClearFlag_TE5,
157 LL_BDMA_ClearFlag_TE6,
158 LL_BDMA_ClearFlag_TE7,
159 };
160
161 __ASSERT_NO_MSG(id < ARRAY_SIZE(func));
162
163 func[id](DMAx);
164 }
165
bdma_stm32_clear_gi(BDMA_TypeDef * DMAx,uint32_t id)166 void bdma_stm32_clear_gi(BDMA_TypeDef *DMAx, uint32_t id)
167 {
168 static const bdma_stm32_clear_flag_func func[] = {
169 LL_BDMA_ClearFlag_GI0,
170 LL_BDMA_ClearFlag_GI1,
171 LL_BDMA_ClearFlag_GI2,
172 LL_BDMA_ClearFlag_GI3,
173 LL_BDMA_ClearFlag_GI4,
174 LL_BDMA_ClearFlag_GI5,
175 LL_BDMA_ClearFlag_GI6,
176 LL_BDMA_ClearFlag_GI7,
177 };
178
179 __ASSERT_NO_MSG(id < ARRAY_SIZE(func));
180
181 func[id](DMAx);
182 }
183
bdma_stm32_is_te_active(BDMA_TypeDef * DMAx,uint32_t id)184 bool bdma_stm32_is_te_active(BDMA_TypeDef *DMAx, uint32_t id)
185 {
186 static const bdma_stm32_check_flag_func func[] = {
187 LL_BDMA_IsActiveFlag_TE0,
188 LL_BDMA_IsActiveFlag_TE1,
189 LL_BDMA_IsActiveFlag_TE2,
190 LL_BDMA_IsActiveFlag_TE3,
191 LL_BDMA_IsActiveFlag_TE4,
192 LL_BDMA_IsActiveFlag_TE5,
193 LL_BDMA_IsActiveFlag_TE6,
194 LL_BDMA_IsActiveFlag_TE7,
195 };
196
197 __ASSERT_NO_MSG(id < ARRAY_SIZE(func));
198
199 return func[id](DMAx);
200 }
201
bdma_stm32_is_gi_active(BDMA_TypeDef * DMAx,uint32_t id)202 bool bdma_stm32_is_gi_active(BDMA_TypeDef *DMAx, uint32_t id)
203 {
204 static const bdma_stm32_check_flag_func func[] = {
205 LL_BDMA_IsActiveFlag_GI0,
206 LL_BDMA_IsActiveFlag_GI1,
207 LL_BDMA_IsActiveFlag_GI2,
208 LL_BDMA_IsActiveFlag_GI3,
209 LL_BDMA_IsActiveFlag_GI4,
210 LL_BDMA_IsActiveFlag_GI5,
211 LL_BDMA_IsActiveFlag_GI6,
212 LL_BDMA_IsActiveFlag_GI7,
213 };
214
215 __ASSERT_NO_MSG(id < ARRAY_SIZE(func));
216
217 return func[id](DMAx);
218 }
219
stm32_bdma_dump_channel_irq(BDMA_TypeDef * dma,uint32_t id)220 void stm32_bdma_dump_channel_irq(BDMA_TypeDef *dma, uint32_t id)
221 {
222 LOG_INF("te: %d, ht: %d, tc: %d, gi: %d",
223 bdma_stm32_is_te_active(dma, id),
224 bdma_stm32_is_ht_active(dma, id),
225 bdma_stm32_is_tc_active(dma, id),
226 bdma_stm32_is_gi_active(dma, id));
227 }
228
stm32_bdma_is_tc_irq_active(BDMA_TypeDef * dma,uint32_t id)229 inline bool stm32_bdma_is_tc_irq_active(BDMA_TypeDef *dma, uint32_t id)
230 {
231 return LL_BDMA_IsEnabledIT_TC(dma, bdma_stm32_id_to_channel(id)) &&
232 bdma_stm32_is_tc_active(dma, id);
233 }
234
stm32_bdma_is_ht_irq_active(BDMA_TypeDef * dma,uint32_t id)235 inline bool stm32_bdma_is_ht_irq_active(BDMA_TypeDef *dma, uint32_t id)
236 {
237 return LL_BDMA_IsEnabledIT_HT(dma, bdma_stm32_id_to_channel(id)) &&
238 bdma_stm32_is_ht_active(dma, id);
239 }
240
stm32_bdma_is_te_irq_active(BDMA_TypeDef * dma,uint32_t id)241 static inline bool stm32_bdma_is_te_irq_active(BDMA_TypeDef *dma, uint32_t id)
242 {
243 return LL_BDMA_IsEnabledIT_TE(dma, bdma_stm32_id_to_channel(id)) &&
244 bdma_stm32_is_te_active(dma, id);
245 }
246
stm32_bdma_is_irq_active(BDMA_TypeDef * dma,uint32_t id)247 bool stm32_bdma_is_irq_active(BDMA_TypeDef *dma, uint32_t id)
248 {
249 return stm32_bdma_is_tc_irq_active(dma, id) ||
250 stm32_bdma_is_ht_irq_active(dma, id) ||
251 stm32_bdma_is_te_irq_active(dma, id);
252 }
253
stm32_bdma_clear_channel_irq(BDMA_TypeDef * dma,uint32_t id)254 void stm32_bdma_clear_channel_irq(BDMA_TypeDef *dma, uint32_t id)
255 {
256 bdma_stm32_clear_gi(dma, id);
257 bdma_stm32_clear_tc(dma, id);
258 bdma_stm32_clear_ht(dma, id);
259 bdma_stm32_clear_te(dma, id);
260 }
261
stm32_bdma_is_enabled_channel(BDMA_TypeDef * dma,uint32_t id)262 bool stm32_bdma_is_enabled_channel(BDMA_TypeDef *dma, uint32_t id)
263 {
264 if (LL_BDMA_IsEnabledChannel(dma, bdma_stm32_id_to_channel(id)) == 1) {
265 return true;
266 }
267 return false;
268 }
269
stm32_bdma_disable_channel(BDMA_TypeDef * dma,uint32_t id)270 int stm32_bdma_disable_channel(BDMA_TypeDef *dma, uint32_t id)
271 {
272 LL_BDMA_DisableChannel(dma, bdma_stm32_id_to_channel(id));
273
274 if (!LL_BDMA_IsEnabledChannel(dma, bdma_stm32_id_to_channel(id))) {
275 return 0;
276 }
277
278 return -EAGAIN;
279 }
280
stm32_bdma_enable_channel(BDMA_TypeDef * dma,uint32_t id)281 void stm32_bdma_enable_channel(BDMA_TypeDef *dma, uint32_t id)
282 {
283 LL_BDMA_EnableChannel(dma, bdma_stm32_id_to_channel(id));
284 }
285
bdma_stm32_dump_channel_irq(const struct device * dev,uint32_t id)286 static void bdma_stm32_dump_channel_irq(const struct device *dev, uint32_t id)
287 {
288 const struct bdma_stm32_config *config = dev->config;
289 BDMA_TypeDef *dma = (BDMA_TypeDef *)(config->base);
290
291 stm32_bdma_dump_channel_irq(dma, id);
292 }
293
bdma_stm32_clear_channel_irq(const struct device * dev,uint32_t id)294 static void bdma_stm32_clear_channel_irq(const struct device *dev, uint32_t id)
295 {
296 const struct bdma_stm32_config *config = dev->config;
297 BDMA_TypeDef *dma = (BDMA_TypeDef *)(config->base);
298
299 bdma_stm32_clear_tc(dma, id);
300 bdma_stm32_clear_ht(dma, id);
301 stm32_bdma_clear_channel_irq(dma, id);
302 }
303
bdma_stm32_irq_handler(const struct device * dev,uint32_t id)304 static void bdma_stm32_irq_handler(const struct device *dev, uint32_t id)
305 {
306 const struct bdma_stm32_config *config = dev->config;
307 BDMA_TypeDef *dma = (BDMA_TypeDef *)(config->base);
308 struct bdma_stm32_channel *channel;
309 uint32_t callback_arg;
310
311 __ASSERT_NO_MSG(id < config->max_channels);
312
313 channel = &config->channels[id];
314
315 /* The busy channel is pertinent if not overridden by the HAL */
316 if ((channel->hal_override != true) && (channel->busy == false)) {
317 /*
318 * When DMA channel is not overridden by HAL,
319 * ignore irq if the channel is not busy anymore
320 */
321 bdma_stm32_clear_channel_irq(dev, id);
322 return;
323 }
324
325 #ifdef CONFIG_DMAMUX_STM32
326 callback_arg = channel->mux_channel;
327 #else
328 callback_arg = id;
329 #endif /* CONFIG_DMAMUX_STM32 */
330
331
332 /* the dma channel id is in range from 0..<dma-requests> */
333 if (stm32_bdma_is_ht_irq_active(dma, id)) {
334 /* Let HAL DMA handle flags on its own */
335 if (!channel->hal_override) {
336 bdma_stm32_clear_ht(dma, id);
337 }
338 channel->bdma_callback(dev, channel->user_data, callback_arg, 0);
339 } else if (stm32_bdma_is_tc_irq_active(dma, id)) {
340 /* Circular buffer never stops receiving as long as peripheral is enabled */
341 if (!channel->cyclic) {
342 channel->busy = false;
343 }
344 /* Let HAL DMA handle flags on its own */
345 if (!channel->hal_override) {
346 bdma_stm32_clear_tc(dma, id);
347 }
348 channel->bdma_callback(dev, channel->user_data, callback_arg, 0);
349 } else {
350 LOG_ERR("Transfer Error.");
351 channel->busy = false;
352 bdma_stm32_dump_channel_irq(dev, id);
353 bdma_stm32_clear_channel_irq(dev, id);
354 channel->bdma_callback(dev, channel->user_data,
355 callback_arg, -EIO);
356 }
357 }
358
bdma_stm32_get_priority(uint8_t priority,uint32_t * ll_priority)359 static int bdma_stm32_get_priority(uint8_t priority, uint32_t *ll_priority)
360 {
361 switch (priority) {
362 case 0x0:
363 *ll_priority = LL_BDMA_PRIORITY_LOW;
364 break;
365 case 0x1:
366 *ll_priority = LL_BDMA_PRIORITY_MEDIUM;
367 break;
368 case 0x2:
369 *ll_priority = LL_BDMA_PRIORITY_HIGH;
370 break;
371 case 0x3:
372 *ll_priority = LL_BDMA_PRIORITY_VERYHIGH;
373 break;
374 default:
375 LOG_ERR("Priority error. %d", priority);
376 return -EINVAL;
377 }
378
379 return 0;
380 }
381
bdma_stm32_get_direction(enum dma_channel_direction direction,uint32_t * ll_direction)382 static int bdma_stm32_get_direction(enum dma_channel_direction direction,
383 uint32_t *ll_direction)
384 {
385 switch (direction) {
386 case MEMORY_TO_MEMORY:
387 *ll_direction = LL_BDMA_DIRECTION_MEMORY_TO_MEMORY;
388 break;
389 case MEMORY_TO_PERIPHERAL:
390 *ll_direction = LL_BDMA_DIRECTION_MEMORY_TO_PERIPH;
391 break;
392 case PERIPHERAL_TO_MEMORY:
393 *ll_direction = LL_BDMA_DIRECTION_PERIPH_TO_MEMORY;
394 break;
395 default:
396 LOG_ERR("Direction error. %d", direction);
397 return -EINVAL;
398 }
399
400 return 0;
401 }
402
bdma_stm32_get_memory_increment(enum dma_addr_adj increment,uint32_t * ll_increment)403 static int bdma_stm32_get_memory_increment(enum dma_addr_adj increment,
404 uint32_t *ll_increment)
405 {
406 switch (increment) {
407 case DMA_ADDR_ADJ_INCREMENT:
408 *ll_increment = LL_BDMA_MEMORY_INCREMENT;
409 break;
410 case DMA_ADDR_ADJ_NO_CHANGE:
411 *ll_increment = LL_BDMA_MEMORY_NOINCREMENT;
412 break;
413 case DMA_ADDR_ADJ_DECREMENT:
414 return -ENOTSUP;
415 default:
416 LOG_ERR("Memory increment error. %d", increment);
417 return -EINVAL;
418 }
419
420 return 0;
421 }
422
bdma_stm32_get_periph_increment(enum dma_addr_adj increment,uint32_t * ll_increment)423 static int bdma_stm32_get_periph_increment(enum dma_addr_adj increment,
424 uint32_t *ll_increment)
425 {
426 switch (increment) {
427 case DMA_ADDR_ADJ_INCREMENT:
428 *ll_increment = LL_BDMA_PERIPH_INCREMENT;
429 break;
430 case DMA_ADDR_ADJ_NO_CHANGE:
431 *ll_increment = LL_BDMA_PERIPH_NOINCREMENT;
432 break;
433 case DMA_ADDR_ADJ_DECREMENT:
434 return -ENOTSUP;
435 default:
436 LOG_ERR("Periph increment error. %d", increment);
437 return -EINVAL;
438 }
439
440 return 0;
441 }
442
bdma_stm32_disable_channel(BDMA_TypeDef * bdma,uint32_t id)443 static int bdma_stm32_disable_channel(BDMA_TypeDef *bdma, uint32_t id)
444 {
445 int count = 0;
446
447 for (;;) {
448 if (stm32_bdma_disable_channel(bdma, id) == 0) {
449 return 0;
450 }
451 /* After trying for 5 seconds, give up */
452 if (count++ > (5 * 1000)) {
453 return -EBUSY;
454 }
455 k_sleep(K_MSEC(1));
456 }
457
458 return 0;
459 }
460
bdma_stm32_is_valid_memory_address(const uint32_t address,const uint32_t size)461 static bool bdma_stm32_is_valid_memory_address(const uint32_t address, const uint32_t size)
462 {
463 /* The BDMA can only access memory addresses in SRAM4 */
464
465 const uint32_t sram4_start = DT_REG_ADDR(DT_NODELABEL(sram4));
466 const uint32_t sram4_end = sram4_start + DT_REG_SIZE(DT_NODELABEL(sram4));
467
468 if (address < sram4_start) {
469 return false;
470 }
471
472 if (address + size > sram4_end) {
473 return false;
474 }
475
476 return true;
477 }
478
479
bdma_stm32_configure(const struct device * dev,uint32_t id,struct dma_config * config)480 BDMA_STM32_EXPORT_API int bdma_stm32_configure(const struct device *dev,
481 uint32_t id,
482 struct dma_config *config)
483 {
484 const struct bdma_stm32_config *dev_config = dev->config;
485 struct bdma_stm32_channel *channel =
486 &dev_config->channels[id];
487 BDMA_TypeDef *bdma = (BDMA_TypeDef *)dev_config->base;
488 LL_BDMA_InitTypeDef BDMA_InitStruct;
489 int index;
490 int ret;
491
492 LL_BDMA_StructInit(&BDMA_InitStruct);
493
494 if (id >= dev_config->max_channels) {
495 LOG_ERR("cannot configure the bdma channel %d.", id);
496 return -EINVAL;
497 }
498
499 if (channel->busy) {
500 LOG_ERR("bdma channel %d is busy.", id);
501 return -EBUSY;
502 }
503
504 if (bdma_stm32_disable_channel(bdma, id) != 0) {
505 LOG_ERR("could not disable bdma channel %d.", id);
506 return -EBUSY;
507 }
508
509 bdma_stm32_clear_channel_irq(dev, id);
510
511 if (config->head_block->block_size > BDMA_STM32_MAX_DATA_ITEMS) {
512 LOG_ERR("Data size too big: %d\n",
513 config->head_block->block_size);
514 return -EINVAL;
515 }
516
517 if ((config->channel_direction == MEMORY_TO_MEMORY) &&
518 (!dev_config->support_m2m)) {
519 LOG_ERR("Memcopy not supported for device %s",
520 dev->name);
521 return -ENOTSUP;
522 }
523
524 /* support only the same data width for source and dest */
525 if (config->dest_data_size != config->source_data_size) {
526 LOG_ERR("source and dest data size differ.");
527 return -EINVAL;
528 }
529
530 if (config->source_data_size != 4U &&
531 config->source_data_size != 2U &&
532 config->source_data_size != 1U) {
533 LOG_ERR("source and dest unit size error, %d",
534 config->source_data_size);
535 return -EINVAL;
536 }
537
538 /*
539 * STM32's circular mode will auto reset both source address
540 * counter and destination address counter.
541 */
542 if (config->head_block->source_reload_en !=
543 config->head_block->dest_reload_en) {
544 LOG_ERR("source_reload_en and dest_reload_en must "
545 "be the same.");
546 return -EINVAL;
547 }
548
549 channel->busy = true;
550 channel->bdma_callback = config->dma_callback;
551 channel->direction = config->channel_direction;
552 channel->user_data = config->user_data;
553 channel->src_size = config->source_data_size;
554 channel->dst_size = config->dest_data_size;
555 channel->cyclic = config->head_block->source_reload_en;
556
557 /* check dest or source memory address, warn if 0 */
558 if (config->head_block->source_address == 0) {
559 LOG_WRN("source_buffer address is null.");
560 }
561
562 if (config->head_block->dest_address == 0) {
563 LOG_WRN("dest_buffer address is null.");
564 }
565
566 /* ensure all memory addresses are in SRAM4 */
567 if (channel->direction == MEMORY_TO_PERIPHERAL || channel->direction == MEMORY_TO_MEMORY) {
568 if (!bdma_stm32_is_valid_memory_address(config->head_block->source_address,
569 config->head_block->block_size)) {
570 LOG_ERR("invalid source address");
571 return -EINVAL;
572 }
573 }
574 if (channel->direction == PERIPHERAL_TO_MEMORY || channel->direction == MEMORY_TO_MEMORY) {
575 if (!bdma_stm32_is_valid_memory_address(config->head_block->dest_address,
576 config->head_block->block_size)) {
577 LOG_ERR("invalid destination address");
578 return -EINVAL;
579 }
580 }
581
582 if (channel->direction == MEMORY_TO_PERIPHERAL) {
583 BDMA_InitStruct.MemoryOrM2MDstAddress =
584 config->head_block->source_address;
585 BDMA_InitStruct.PeriphOrM2MSrcAddress =
586 config->head_block->dest_address;
587 } else {
588 BDMA_InitStruct.PeriphOrM2MSrcAddress =
589 config->head_block->source_address;
590 BDMA_InitStruct.MemoryOrM2MDstAddress =
591 config->head_block->dest_address;
592 }
593
594 uint16_t memory_addr_adj = 0, periph_addr_adj = 0;
595
596 ret = bdma_stm32_get_priority(config->channel_priority,
597 &BDMA_InitStruct.Priority);
598 if (ret < 0) {
599 return ret;
600 }
601
602 ret = bdma_stm32_get_direction(config->channel_direction,
603 &BDMA_InitStruct.Direction);
604 if (ret < 0) {
605 return ret;
606 }
607
608 switch (config->channel_direction) {
609 case MEMORY_TO_MEMORY:
610 case PERIPHERAL_TO_MEMORY:
611 memory_addr_adj = config->head_block->dest_addr_adj;
612 periph_addr_adj = config->head_block->source_addr_adj;
613 break;
614 case MEMORY_TO_PERIPHERAL:
615 memory_addr_adj = config->head_block->source_addr_adj;
616 periph_addr_adj = config->head_block->dest_addr_adj;
617 break;
618 /* Direction has been asserted in bdma_stm32_get_direction. */
619 default:
620 LOG_ERR("Channel direction error (%d).",
621 config->channel_direction);
622 return -EINVAL;
623 }
624
625 ret = bdma_stm32_get_memory_increment(memory_addr_adj,
626 &BDMA_InitStruct.MemoryOrM2MDstIncMode);
627 if (ret < 0) {
628 return ret;
629 }
630 ret = bdma_stm32_get_periph_increment(periph_addr_adj,
631 &BDMA_InitStruct.PeriphOrM2MSrcIncMode);
632 if (ret < 0) {
633 return ret;
634 }
635
636 if (channel->cyclic) {
637 BDMA_InitStruct.Mode = LL_BDMA_MODE_CIRCULAR;
638 } else {
639 BDMA_InitStruct.Mode = LL_BDMA_MODE_NORMAL;
640 }
641
642 channel->source_periph = (channel->direction == PERIPHERAL_TO_MEMORY);
643
644 /* set the data width, when source_data_size equals dest_data_size */
645 index = find_lsb_set(config->source_data_size) - 1;
646 BDMA_InitStruct.PeriphOrM2MSrcDataSize = table_p_size[index];
647 index = find_lsb_set(config->dest_data_size) - 1;
648 BDMA_InitStruct.MemoryOrM2MDstDataSize = table_m_size[index];
649
650 if (channel->source_periph) {
651 BDMA_InitStruct.NbData = config->head_block->block_size /
652 config->source_data_size;
653 } else {
654 BDMA_InitStruct.NbData = config->head_block->block_size /
655 config->dest_data_size;
656 }
657
658 #if defined(CONFIG_DMAMUX_STM32)
659 /*
660 * with bdma mux,
661 * the request ID is stored in the dma_slot
662 */
663 BDMA_InitStruct.PeriphRequest = config->dma_slot;
664 #endif
665 LL_BDMA_Init(bdma, bdma_stm32_id_to_channel(id), &BDMA_InitStruct);
666
667 LL_BDMA_EnableIT_TC(bdma, bdma_stm32_id_to_channel(id));
668
669 /* Enable Half-Transfer irq if circular mode is enabled */
670 if (channel->cyclic) {
671 LL_BDMA_EnableIT_HT(bdma, bdma_stm32_id_to_channel(id));
672 }
673
674 return ret;
675 }
676
bdma_stm32_reload(const struct device * dev,uint32_t id,uint32_t src,uint32_t dst,size_t size)677 BDMA_STM32_EXPORT_API int bdma_stm32_reload(const struct device *dev, uint32_t id,
678 uint32_t src, uint32_t dst,
679 size_t size)
680 {
681 const struct bdma_stm32_config *config = dev->config;
682 BDMA_TypeDef *bdma = (BDMA_TypeDef *)(config->base);
683 struct bdma_stm32_channel *channel;
684
685 if (id >= config->max_channels) {
686 return -EINVAL;
687 }
688
689 channel = &config->channels[id];
690
691 if (bdma_stm32_disable_channel(bdma, id) != 0) {
692 return -EBUSY;
693 }
694
695 switch (channel->direction) {
696 case MEMORY_TO_PERIPHERAL:
697 LL_BDMA_SetMemoryAddress(bdma, bdma_stm32_id_to_channel(id), src);
698 LL_BDMA_SetPeriphAddress(bdma, bdma_stm32_id_to_channel(id), dst);
699 break;
700 case MEMORY_TO_MEMORY:
701 case PERIPHERAL_TO_MEMORY:
702 LL_BDMA_SetPeriphAddress(bdma, bdma_stm32_id_to_channel(id), src);
703 LL_BDMA_SetMemoryAddress(bdma, bdma_stm32_id_to_channel(id), dst);
704 break;
705 default:
706 return -EINVAL;
707 }
708
709 if (channel->source_periph) {
710 LL_BDMA_SetDataLength(bdma, bdma_stm32_id_to_channel(id),
711 size / channel->src_size);
712 } else {
713 LL_BDMA_SetDataLength(bdma, bdma_stm32_id_to_channel(id),
714 size / channel->dst_size);
715 }
716
717 /* When reloading the dma, the channel is busy again before enabling */
718 channel->busy = true;
719
720 stm32_bdma_enable_channel(bdma, id);
721
722 return 0;
723 }
724
bdma_stm32_start(const struct device * dev,uint32_t id)725 BDMA_STM32_EXPORT_API int bdma_stm32_start(const struct device *dev, uint32_t id)
726 {
727 const struct bdma_stm32_config *config = dev->config;
728 BDMA_TypeDef *bdma = (BDMA_TypeDef *)(config->base);
729 struct bdma_stm32_channel *channel;
730
731 /* Only M2P or M2M mode can be started manually. */
732 if (id >= config->max_channels) {
733 return -EINVAL;
734 }
735
736 /* Repeated start : return now if channel is already started */
737 if (stm32_bdma_is_enabled_channel(bdma, id)) {
738 return 0;
739 }
740
741 /* When starting the dma, the channel is busy before enabling */
742 channel = &config->channels[id];
743 channel->busy = true;
744
745 bdma_stm32_clear_channel_irq(dev, id);
746 stm32_bdma_enable_channel(bdma, id);
747
748 return 0;
749 }
750
bdma_stm32_stop(const struct device * dev,uint32_t id)751 BDMA_STM32_EXPORT_API int bdma_stm32_stop(const struct device *dev, uint32_t id)
752 {
753 const struct bdma_stm32_config *config = dev->config;
754 struct bdma_stm32_channel *channel = &config->channels[id];
755 BDMA_TypeDef *bdma = (BDMA_TypeDef *)(config->base);
756
757 if (id >= config->max_channels) {
758 return -EINVAL;
759 }
760
761 if (channel->hal_override) {
762 channel->busy = false;
763 return 0;
764 }
765
766 /* Repeated stop : return now if channel is already stopped */
767 if (!stm32_bdma_is_enabled_channel(bdma, id)) {
768 return 0;
769 }
770
771 /* in bdma_stm32_configure, enabling is done regardless of defines */
772 LL_BDMA_DisableIT_TC(bdma, bdma_stm32_id_to_channel(id));
773 LL_BDMA_DisableIT_HT(bdma, bdma_stm32_id_to_channel(id));
774
775 bdma_stm32_disable_channel(bdma, id);
776 bdma_stm32_clear_channel_irq(dev, id);
777
778 /* Finally, flag channel as free */
779 channel->busy = false;
780
781 return 0;
782 }
783
bdma_stm32_init(const struct device * dev)784 static int bdma_stm32_init(const struct device *dev)
785 {
786 const struct bdma_stm32_config *config = dev->config;
787 const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
788
789 if (!device_is_ready(clk)) {
790 LOG_ERR("clock control device not ready");
791 return -ENODEV;
792 }
793
794 if (clock_control_on(clk,
795 (clock_control_subsys_t) &config->pclken) != 0) {
796 LOG_ERR("clock op failed\n");
797 return -EIO;
798 }
799
800 config->config_irq(dev);
801
802 for (uint32_t i = 0; i < config->max_channels; i++) {
803 config->channels[i].busy = false;
804 #ifdef CONFIG_DMAMUX_STM32
805 /* each further channel->mux_channel is fixed here */
806 config->channels[i].mux_channel = i + config->offset;
807 #endif /* CONFIG_DMAMUX_STM32 */
808 }
809
810 ((struct bdma_stm32_data *)dev->data)->dma_ctx.magic = 0;
811 ((struct bdma_stm32_data *)dev->data)->dma_ctx.dma_channels = 0;
812 ((struct bdma_stm32_data *)dev->data)->dma_ctx.atomic = 0;
813
814 /* The BDMA can only access SRAM4 and assumes it's uncached
815 * This check verifies that the nocache memory attribute is set in the devicetree.
816 * For example:
817 * &sram4 {
818 * zephyr,memory-attr = <DT_MEM_ARM_MPU_RAM_NOCACHE>;
819 * };
820 */
821 #if DT_NODE_HAS_PROP(DT_NODELABEL(sram4), zephyr_memory_attr)
822 if ((DT_PROP(DT_NODELABEL(sram4), zephyr_memory_attr) & DT_MEM_ARM_MPU_RAM_NOCACHE) == 0) {
823 LOG_ERR("SRAM4 is not set as uncached.");
824 return -EIO;
825 }
826 #else
827 #error "BDMA driver expects SRAM4 to be set as uncached in devicetree"
828 #endif
829
830 return 0;
831 }
832
bdma_stm32_get_status(const struct device * dev,uint32_t id,struct dma_status * stat)833 BDMA_STM32_EXPORT_API int bdma_stm32_get_status(const struct device *dev,
834 uint32_t id, struct dma_status *stat)
835 {
836 const struct bdma_stm32_config *config = dev->config;
837 BDMA_TypeDef *bdma = (BDMA_TypeDef *)(config->base);
838 struct bdma_stm32_channel *channel;
839
840 if (id >= config->max_channels) {
841 return -EINVAL;
842 }
843
844 channel = &config->channels[id];
845 stat->pending_length = LL_BDMA_GetDataLength(bdma, bdma_stm32_id_to_channel(id));
846 stat->dir = channel->direction;
847 stat->busy = channel->busy;
848
849 return 0;
850 }
851
852 static DEVICE_API(dma, dma_funcs) = {
853 .reload = bdma_stm32_reload,
854 .config = bdma_stm32_configure,
855 .start = bdma_stm32_start,
856 .stop = bdma_stm32_stop,
857 .get_status = bdma_stm32_get_status,
858 };
859
860 #ifdef CONFIG_DMAMUX_STM32
861 #define BDMA_STM32_OFFSET_INIT(index) \
862 .offset = DT_INST_PROP(index, dma_offset),
863 #else
864 #define BDMA_STM32_OFFSET_INIT(index)
865 #endif /* CONFIG_DMAMUX_STM32 */
866
867 #define BDMA_STM32_INIT_DEV(index) \
868 static struct bdma_stm32_channel \
869 bdma_stm32_channels_##index[BDMA_STM32_##index##_CHANNEL_COUNT];\
870 \
871 const struct bdma_stm32_config bdma_stm32_config_##index = { \
872 .pclken = { .bus = DT_INST_CLOCKS_CELL(index, bus), \
873 .enr = DT_INST_CLOCKS_CELL(index, bits) }, \
874 .config_irq = bdma_stm32_config_irq_##index, \
875 .base = DT_INST_REG_ADDR(index), \
876 .support_m2m = DT_INST_PROP(index, st_mem2mem), \
877 .max_channels = BDMA_STM32_##index##_CHANNEL_COUNT, \
878 .channels = bdma_stm32_channels_##index, \
879 BDMA_STM32_OFFSET_INIT(index) \
880 }; \
881 \
882 static struct bdma_stm32_data bdma_stm32_data_##index = { \
883 }; \
884 \
885 DEVICE_DT_INST_DEFINE(index, \
886 &bdma_stm32_init, \
887 NULL, \
888 &bdma_stm32_data_##index, &bdma_stm32_config_##index, \
889 PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \
890 &dma_funcs)
891
892 #define BDMA_STM32_DEFINE_IRQ_HANDLER(bdma, chan) \
893 static void bdma_stm32_irq_##bdma##_##chan(const struct device *dev) \
894 { \
895 bdma_stm32_irq_handler(dev, chan); \
896 }
897
898
899 #define BDMA_STM32_IRQ_CONNECT(bdma, chan) \
900 do { \
901 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(bdma, chan, irq), \
902 DT_INST_IRQ_BY_IDX(bdma, chan, priority), \
903 bdma_stm32_irq_##bdma##_##chan, \
904 DEVICE_DT_INST_GET(bdma), 0); \
905 irq_enable(DT_INST_IRQ_BY_IDX(bdma, chan, irq)); \
906 } while (false)
907
908
909 #if DT_NODE_HAS_STATUS_OKAY(DT_DRV_INST(0))
910
911 #define BDMA_STM32_DEFINE_IRQ_HANDLER_GEN(i, _) \
912 BDMA_STM32_DEFINE_IRQ_HANDLER(0, i)
913 LISTIFY(DT_NUM_IRQS(DT_DRV_INST(0)), BDMA_STM32_DEFINE_IRQ_HANDLER_GEN, (;));
914
bdma_stm32_config_irq_0(const struct device * dev)915 static void bdma_stm32_config_irq_0(const struct device *dev)
916 {
917 ARG_UNUSED(dev);
918
919 #define BDMA_STM32_IRQ_CONNECT_GEN(i, _) \
920 BDMA_STM32_IRQ_CONNECT(0, i);
921 LISTIFY(DT_NUM_IRQS(DT_DRV_INST(0)), BDMA_STM32_IRQ_CONNECT_GEN, (;));
922 }
923
924 BDMA_STM32_INIT_DEV(0);
925
926 #endif /* DT_NODE_HAS_STATUS_OKAY(DT_DRV_INST(0)) */
927