1 /**
2 * @file
3 *
4 * @brief Public APIs for the DMA drivers.
5 */
6
7 /*
8 * Copyright (c) 2016 Intel Corporation
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 */
12
13 #ifndef ZEPHYR_INCLUDE_DRIVERS_DMA_H_
14 #define ZEPHYR_INCLUDE_DRIVERS_DMA_H_
15
16 #include <zephyr/kernel.h>
17 #include <zephyr/device.h>
18
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22
23
24 /**
25 * @brief DMA Interface
26 * @defgroup dma_interface DMA Interface
27 * @since 1.5
28 * @version 1.0.0
29 * @ingroup io_interfaces
30 * @{
31 */
32
33 /**
34 * @brief DMA channel direction
35 */
36 enum dma_channel_direction {
37 /** Memory to memory */
38 MEMORY_TO_MEMORY = 0x0,
39 /** Memory to peripheral */
40 MEMORY_TO_PERIPHERAL,
41 /** Peripheral to memory */
42 PERIPHERAL_TO_MEMORY,
43 /** Peripheral to peripheral */
44 PERIPHERAL_TO_PERIPHERAL,
45 /** Host to memory */
46 HOST_TO_MEMORY,
47 /** Memory to host */
48 MEMORY_TO_HOST,
49
50 /**
51 * Number of all common channel directions.
52 */
53 DMA_CHANNEL_DIRECTION_COMMON_COUNT,
54
55 /**
56 * This and higher values are dma controller or soc specific.
57 * Refer to the specified dma driver header file.
58 */
59 DMA_CHANNEL_DIRECTION_PRIV_START = DMA_CHANNEL_DIRECTION_COMMON_COUNT,
60
61 /**
62 * Maximum allowed value (3 bit field!)
63 */
64 DMA_CHANNEL_DIRECTION_MAX = 0x7
65 };
66
67 /**
68 * @brief DMA address adjustment
69 *
70 * Valid values for @a source_addr_adj and @a dest_addr_adj
71 */
72 enum dma_addr_adj {
73 /** Increment the address */
74 DMA_ADDR_ADJ_INCREMENT,
75 /** Decrement the address */
76 DMA_ADDR_ADJ_DECREMENT,
77 /** No change the address */
78 DMA_ADDR_ADJ_NO_CHANGE,
79 };
80
81 /**
82 * @brief DMA channel attributes
83 */
84 enum dma_channel_filter {
85 DMA_CHANNEL_NORMAL, /* normal DMA channel */
86 DMA_CHANNEL_PERIODIC, /* can be triggered by periodic sources */
87 };
88
89 /**
90 * @brief DMA attributes
91 */
92 enum dma_attribute_type {
93 DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT,
94 DMA_ATTR_BUFFER_SIZE_ALIGNMENT,
95 DMA_ATTR_COPY_ALIGNMENT,
96 DMA_ATTR_MAX_BLOCK_COUNT,
97 };
98
99 /**
100 * @struct dma_block_config
101 * @brief DMA block configuration structure.
102 *
103 * Aside from source address, destination address, and block size many of these options are hardware
104 * and driver dependent.
105 */
106 struct dma_block_config {
107 #ifdef CONFIG_DMA_64BIT
108 /** block starting address at source */
109 uint64_t source_address;
110 /** block starting address at destination */
111 uint64_t dest_address;
112 #else
113 /** block starting address at source */
114 uint32_t source_address;
115 /** block starting address at destination */
116 uint32_t dest_address;
117 #endif
118 /** Address adjustment at gather boundary */
119 uint32_t source_gather_interval;
120 /** Address adjustment at scatter boundary */
121 uint32_t dest_scatter_interval;
122 /** Continuous transfer count between scatter boundaries */
123 uint16_t dest_scatter_count;
124 /** Continuous transfer count between gather boundaries */
125 uint16_t source_gather_count;
126 /** Number of bytes to be transferred for this block */
127 uint32_t block_size;
128 /** Pointer to next block in a transfer list */
129 struct dma_block_config *next_block;
130 /** Enable source gathering when set to 1 */
131 uint16_t source_gather_en : 1;
132 /** Enable destination scattering when set to 1 */
133 uint16_t dest_scatter_en : 1;
134 /**
135 * Source address adjustment option
136 *
137 * - 0b00 increment
138 * - 0b01 decrement
139 * - 0b10 no change
140 */
141 uint16_t source_addr_adj : 2;
142 /**
143 * Destination address adjustment
144 *
145 * - 0b00 increment
146 * - 0b01 decrement
147 * - 0b10 no change
148 */
149 uint16_t dest_addr_adj : 2;
150 /** Reload source address at the end of block transfer */
151 uint16_t source_reload_en : 1;
152 /** Reload destination address at the end of block transfer */
153 uint16_t dest_reload_en : 1;
154 /** FIFO fill before starting transfer, HW specific meaning */
155 uint16_t fifo_mode_control : 4;
156 /**
157 * Transfer flow control mode
158 *
159 * - 0b0 source request service upon data availability
160 * - 0b1 source request postponed until destination request happens
161 */
162 uint16_t flow_control_mode : 1;
163
164 uint16_t _reserved : 3;
165 };
166
167 /** The DMA callback event has occurred at the completion of a transfer list */
168 #define DMA_STATUS_COMPLETE 0
169 /** The DMA callback has occurred at the completion of a single transfer block in a transfer list */
170 #define DMA_STATUS_BLOCK 1
171
172 /**
173 * @typedef dma_callback_t
174 * @brief Callback function for DMA transfer completion
175 *
176 * If enabled, callback function will be invoked at transfer or block completion,
177 * or when an error happens.
178 * In circular mode, @p status indicates that the DMA device has reached either
179 * the end of the buffer (DMA_STATUS_COMPLETE) or a water mark (DMA_STATUS_BLOCK).
180 *
181 * @param dev Pointer to the DMA device calling the callback.
182 * @param user_data A pointer to some user data or NULL
183 * @param channel The channel number
184 * @param status Status of the transfer
185 * - DMA_STATUS_COMPLETE buffer fully consumed
186 * - DMA_STATUS_BLOCK buffer consumption reached a configured block
187 * or water mark
188 * - A negative errno otherwise
189 */
190 typedef void (*dma_callback_t)(const struct device *dev, void *user_data,
191 uint32_t channel, int status);
192
193 /**
194 * @struct dma_config
195 * @brief DMA configuration structure.
196 */
197 struct dma_config {
198 /** Which peripheral and direction, HW specific */
199 uint32_t dma_slot : 8;
200 /**
201 * Direction the transfers are occurring
202 *
203 * - 0b000 memory to memory,
204 * - 0b001 memory to peripheral,
205 * - 0b010 peripheral to memory,
206 * - 0b011 peripheral to peripheral,
207 * - 0b100 host to memory
208 * - 0b101 memory to host
209 * - others hardware specific
210 */
211 uint32_t channel_direction : 3;
212 /**
213 * Completion callback enable
214 *
215 * - 0b0 callback invoked at transfer list completion only
216 * - 0b1 callback invoked at completion of each block
217 */
218 uint32_t complete_callback_en : 1;
219 /**
220 * Error callback disable
221 *
222 * - 0b0 error callback enabled
223 * - 0b1 error callback disabled
224 */
225 uint32_t error_callback_dis : 1;
226 /**
227 * Source handshake, HW specific
228 *
229 * - 0b0 HW
230 * - 0b1 SW
231 */
232 uint32_t source_handshake : 1;
233 /**
234 * Destination handshake, HW specific
235 *
236 * - 0b0 HW
237 * - 0b1 SW
238 */
239 uint32_t dest_handshake : 1;
240 /**
241 * Channel priority for arbitration, HW specific
242 */
243 uint32_t channel_priority : 4;
244 /** Source chaining enable, HW specific */
245 uint32_t source_chaining_en : 1;
246 /** Destination chaining enable, HW specific */
247 uint32_t dest_chaining_en : 1;
248 /** Linked channel, HW specific */
249 uint32_t linked_channel : 7;
250 /** Cyclic transfer list, HW specific */
251 uint32_t cyclic : 1;
252
253 uint32_t _reserved : 3;
254 /** Width of source data (in bytes) */
255 uint32_t source_data_size : 16;
256 /** Width of destination data (in bytes) */
257 uint32_t dest_data_size : 16;
258 /** Source burst length in bytes */
259 uint32_t source_burst_length : 16;
260 /** Destination burst length in bytes */
261 uint32_t dest_burst_length : 16;
262 /** Number of blocks in transfer list */
263 uint32_t block_count;
264 /** Pointer to the first block in the transfer list */
265 struct dma_block_config *head_block;
266 /** Optional attached user data for callbacks */
267 void *user_data;
268 /** Optional callback for completion and error events */
269 dma_callback_t dma_callback;
270 };
271
272 /**
273 * DMA runtime status structure
274 */
275 struct dma_status {
276 /** Is the current DMA transfer busy or idle */
277 bool busy;
278 /** Direction for the transfer */
279 enum dma_channel_direction dir;
280 /** Pending length to be transferred in bytes, HW specific */
281 uint32_t pending_length;
282 /** Available buffers space, HW specific */
283 uint32_t free;
284 /** Write position in circular DMA buffer, HW specific */
285 uint32_t write_position;
286 /** Read position in circular DMA buffer, HW specific */
287 uint32_t read_position;
288 /** Total copied, HW specific */
289 uint64_t total_copied;
290 };
291
292 /**
293 * DMA context structure
294 * Note: the dma_context shall be the first member
295 * of DMA client driver Data, got by dev->data
296 */
297 struct dma_context {
298 /** magic code to identify the context */
299 int32_t magic;
300 /** number of dma channels */
301 int dma_channels;
302 /** atomic holding bit flags for each channel to mark as used/unused */
303 atomic_t *atomic;
304 };
305
306 /** Magic code to identify context content */
307 #define DMA_MAGIC 0x47494749
308
309 /**
310 * @cond INTERNAL_HIDDEN
311 *
312 * These are for internal use only, so skip these in
313 * public documentation.
314 */
315 typedef int (*dma_api_config)(const struct device *dev, uint32_t channel,
316 struct dma_config *config);
317
318 #ifdef CONFIG_DMA_64BIT
319 typedef int (*dma_api_reload)(const struct device *dev, uint32_t channel,
320 uint64_t src, uint64_t dst, size_t size);
321 #else
322 typedef int (*dma_api_reload)(const struct device *dev, uint32_t channel,
323 uint32_t src, uint32_t dst, size_t size);
324 #endif
325
326 typedef int (*dma_api_start)(const struct device *dev, uint32_t channel);
327
328 typedef int (*dma_api_stop)(const struct device *dev, uint32_t channel);
329
330 typedef int (*dma_api_suspend)(const struct device *dev, uint32_t channel);
331
332 typedef int (*dma_api_resume)(const struct device *dev, uint32_t channel);
333
334 typedef int (*dma_api_get_status)(const struct device *dev, uint32_t channel,
335 struct dma_status *status);
336
337 typedef int (*dma_api_get_attribute)(const struct device *dev, uint32_t type, uint32_t *value);
338
339 /**
340 * @typedef dma_chan_filter
341 * @brief channel filter function call
342 *
343 * filter function that is used to find the matched internal dma channel
344 * provide by caller
345 *
346 * @param dev Pointer to the DMA device instance
347 * @param channel the channel id to use
348 * @param filter_param filter function parameter, can be NULL
349 *
350 * @retval True on filter matched otherwise return False.
351 */
352 typedef bool (*dma_api_chan_filter)(const struct device *dev,
353 int channel, void *filter_param);
354
355 __subsystem struct dma_driver_api {
356 dma_api_config config;
357 dma_api_reload reload;
358 dma_api_start start;
359 dma_api_stop stop;
360 dma_api_suspend suspend;
361 dma_api_resume resume;
362 dma_api_get_status get_status;
363 dma_api_get_attribute get_attribute;
364 dma_api_chan_filter chan_filter;
365 };
366 /**
367 * @endcond
368 */
369
370 /**
371 * @brief Configure individual channel for DMA transfer.
372 *
373 * @param dev Pointer to the device structure for the driver instance.
374 * @param channel Numeric identification of the channel to configure
375 * @param config Data structure containing the intended configuration for the
376 * selected channel
377 *
378 * @retval 0 if successful.
379 * @retval Negative errno code if failure.
380 */
dma_config(const struct device * dev,uint32_t channel,struct dma_config * config)381 static inline int dma_config(const struct device *dev, uint32_t channel,
382 struct dma_config *config)
383 {
384 const struct dma_driver_api *api =
385 (const struct dma_driver_api *)dev->api;
386
387 return api->config(dev, channel, config);
388 }
389
390 /**
391 * @brief Reload buffer(s) for a DMA channel
392 *
393 * @param dev Pointer to the device structure for the driver instance.
394 * @param channel Numeric identification of the channel to configure
395 * selected channel
396 * @param src source address for the DMA transfer
397 * @param dst destination address for the DMA transfer
398 * @param size size of DMA transfer
399 *
400 * @retval 0 if successful.
401 * @retval Negative errno code if failure.
402 */
403 #ifdef CONFIG_DMA_64BIT
dma_reload(const struct device * dev,uint32_t channel,uint64_t src,uint64_t dst,size_t size)404 static inline int dma_reload(const struct device *dev, uint32_t channel,
405 uint64_t src, uint64_t dst, size_t size)
406 #else
407 static inline int dma_reload(const struct device *dev, uint32_t channel,
408 uint32_t src, uint32_t dst, size_t size)
409 #endif
410 {
411 const struct dma_driver_api *api =
412 (const struct dma_driver_api *)dev->api;
413
414 if (api->reload) {
415 return api->reload(dev, channel, src, dst, size);
416 }
417
418 return -ENOSYS;
419 }
420
421 /**
422 * @brief Enables DMA channel and starts the transfer, the channel must be
423 * configured beforehand.
424 *
425 * Implementations must check the validity of the channel ID passed in and
426 * return -EINVAL if it is invalid.
427 *
428 * Start is allowed on channels that have already been started and must report
429 * success.
430 *
431 * @funcprops \isr_ok
432 *
433 * @param dev Pointer to the device structure for the driver instance.
434 * @param channel Numeric identification of the channel where the transfer will
435 * be processed
436 *
437 * @retval 0 if successful.
438 * @retval Negative errno code if failure.
439 */
440 __syscall int dma_start(const struct device *dev, uint32_t channel);
441
z_impl_dma_start(const struct device * dev,uint32_t channel)442 static inline int z_impl_dma_start(const struct device *dev, uint32_t channel)
443 {
444 const struct dma_driver_api *api =
445 (const struct dma_driver_api *)dev->api;
446
447 return api->start(dev, channel);
448 }
449
450 /**
451 * @brief Stops the DMA transfer and disables the channel.
452 *
453 * Implementations must check the validity of the channel ID passed in and
454 * return -EINVAL if it is invalid.
455 *
456 * Stop is allowed on channels that have already been stopped and must report
457 * success.
458 *
459 * @funcprops \isr_ok
460 *
461 * @param dev Pointer to the device structure for the driver instance.
462 * @param channel Numeric identification of the channel where the transfer was
463 * being processed
464 *
465 * @retval 0 if successful.
466 * @retval Negative errno code if failure.
467 */
468 __syscall int dma_stop(const struct device *dev, uint32_t channel);
469
z_impl_dma_stop(const struct device * dev,uint32_t channel)470 static inline int z_impl_dma_stop(const struct device *dev, uint32_t channel)
471 {
472 const struct dma_driver_api *api =
473 (const struct dma_driver_api *)dev->api;
474
475 return api->stop(dev, channel);
476 }
477
478
479 /**
480 * @brief Suspend a DMA channel transfer
481 *
482 * Implementations must check the validity of the channel state and ID passed
483 * in and return -EINVAL if either are invalid.
484 *
485 * @funcprops \isr_ok
486 *
487 * @param dev Pointer to the device structure for the driver instance.
488 * @param channel Numeric identification of the channel to suspend
489 *
490 * @retval 0 If successful.
491 * @retval -ENOSYS If not implemented.
492 * @retval -EINVAL If invalid channel id or state.
493 * @retval -errno Other negative errno code failure.
494 */
495 __syscall int dma_suspend(const struct device *dev, uint32_t channel);
496
z_impl_dma_suspend(const struct device * dev,uint32_t channel)497 static inline int z_impl_dma_suspend(const struct device *dev, uint32_t channel)
498 {
499 const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
500
501 if (api->suspend == NULL) {
502 return -ENOSYS;
503 }
504 return api->suspend(dev, channel);
505 }
506
507 /**
508 * @brief Resume a DMA channel transfer
509 *
510 * Implementations must check the validity of the channel state and ID passed
511 * in and return -EINVAL if either are invalid.
512 *
513 * @funcprops \isr_ok
514 *
515 * @param dev Pointer to the device structure for the driver instance.
516 * @param channel Numeric identification of the channel to resume
517 *
518 * @retval 0 If successful.
519 * @retval -ENOSYS If not implemented
520 * @retval -EINVAL If invalid channel id or state.
521 * @retval -errno Other negative errno code failure.
522 */
523 __syscall int dma_resume(const struct device *dev, uint32_t channel);
524
z_impl_dma_resume(const struct device * dev,uint32_t channel)525 static inline int z_impl_dma_resume(const struct device *dev, uint32_t channel)
526 {
527 const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
528
529 if (api->resume == NULL) {
530 return -ENOSYS;
531 }
532 return api->resume(dev, channel);
533 }
534
535 /**
536 * @brief request DMA channel.
537 *
538 * request DMA channel resources
539 * return -EINVAL if there is no valid channel available.
540 *
541 * @funcprops \isr_ok
542 *
543 * @param dev Pointer to the device structure for the driver instance.
544 * @param filter_param filter function parameter
545 *
546 * @retval dma channel if successful.
547 * @retval Negative errno code if failure.
548 */
549 __syscall int dma_request_channel(const struct device *dev,
550 void *filter_param);
551
z_impl_dma_request_channel(const struct device * dev,void * filter_param)552 static inline int z_impl_dma_request_channel(const struct device *dev,
553 void *filter_param)
554 {
555 int i = 0;
556 int channel = -EINVAL;
557 const struct dma_driver_api *api =
558 (const struct dma_driver_api *)dev->api;
559 /* dma_context shall be the first one in dev data */
560 struct dma_context *dma_ctx = (struct dma_context *)dev->data;
561
562 if (dma_ctx->magic != DMA_MAGIC) {
563 return channel;
564 }
565
566 for (i = 0; i < dma_ctx->dma_channels; i++) {
567 if (!atomic_test_and_set_bit(dma_ctx->atomic, i)) {
568 if (api->chan_filter &&
569 !api->chan_filter(dev, i, filter_param)) {
570 atomic_clear_bit(dma_ctx->atomic, i);
571 continue;
572 }
573 channel = i;
574 break;
575 }
576 }
577
578 return channel;
579 }
580
581 /**
582 * @brief release DMA channel.
583 *
584 * release DMA channel resources
585 *
586 * @funcprops \isr_ok
587 *
588 * @param dev Pointer to the device structure for the driver instance.
589 * @param channel channel number
590 *
591 */
592 __syscall void dma_release_channel(const struct device *dev,
593 uint32_t channel);
594
z_impl_dma_release_channel(const struct device * dev,uint32_t channel)595 static inline void z_impl_dma_release_channel(const struct device *dev,
596 uint32_t channel)
597 {
598 struct dma_context *dma_ctx = (struct dma_context *)dev->data;
599
600 if (dma_ctx->magic != DMA_MAGIC) {
601 return;
602 }
603
604 if ((int)channel < dma_ctx->dma_channels) {
605 atomic_clear_bit(dma_ctx->atomic, channel);
606 }
607
608 }
609
610 /**
611 * @brief DMA channel filter.
612 *
613 * filter channel by attribute
614 *
615 * @param dev Pointer to the device structure for the driver instance.
616 * @param channel channel number
617 * @param filter_param filter attribute
618 *
619 * @retval Negative errno code if not support
620 *
621 */
622 __syscall int dma_chan_filter(const struct device *dev,
623 int channel, void *filter_param);
624
z_impl_dma_chan_filter(const struct device * dev,int channel,void * filter_param)625 static inline int z_impl_dma_chan_filter(const struct device *dev,
626 int channel, void *filter_param)
627 {
628 const struct dma_driver_api *api =
629 (const struct dma_driver_api *)dev->api;
630
631 if (api->chan_filter) {
632 return api->chan_filter(dev, channel, filter_param);
633 }
634
635 return -ENOSYS;
636 }
637
638 /**
639 * @brief get current runtime status of DMA transfer
640 *
641 * Implementations must check the validity of the channel ID passed in and
642 * return -EINVAL if it is invalid or -ENOSYS if not supported.
643 *
644 * @funcprops \isr_ok
645 *
646 * @param dev Pointer to the device structure for the driver instance.
647 * @param channel Numeric identification of the channel where the transfer was
648 * being processed
649 * @param stat a non-NULL dma_status object for storing DMA status
650 *
651 * @retval non-negative if successful.
652 * @retval Negative errno code if failure.
653 */
dma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)654 static inline int dma_get_status(const struct device *dev, uint32_t channel,
655 struct dma_status *stat)
656 {
657 const struct dma_driver_api *api =
658 (const struct dma_driver_api *)dev->api;
659
660 if (api->get_status) {
661 return api->get_status(dev, channel, stat);
662 }
663
664 return -ENOSYS;
665 }
666
667 /**
668 * @brief get attribute of a dma controller
669 *
670 * This function allows to get a device specific static or runtime attribute like required address
671 * and size alignment of a buffer.
672 * Implementations must check the validity of the type passed in and
673 * return -EINVAL if it is invalid or -ENOSYS if not supported.
674 *
675 * @funcprops \isr_ok
676 *
677 * @param dev Pointer to the device structure for the driver instance.
678 * @param type Numeric identification of the attribute
679 * @param value A non-NULL pointer to the variable where the read value is to be placed
680 *
681 * @retval non-negative if successful.
682 * @retval Negative errno code if failure.
683 */
dma_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)684 static inline int dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
685 {
686 const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
687
688 if (api->get_attribute) {
689 return api->get_attribute(dev, type, value);
690 }
691
692 return -ENOSYS;
693 }
694
695 /**
696 * @brief Look-up generic width index to be used in registers
697 *
698 * @warning This look-up works for most controllers, but *may* not work for
699 * yours. Ensure your controller expects the most common register
700 * bit values before using this convenience function. If your
701 * controller does not support these values, you will have to write
702 * your own look-up inside the controller driver.
703 *
704 * @param size: width of bus (in bytes)
705 *
706 * @retval common DMA index to be placed into registers.
707 */
dma_width_index(uint32_t size)708 static inline uint32_t dma_width_index(uint32_t size)
709 {
710 /* Check boundaries (max supported width is 32 Bytes) */
711 if (size < 1 || size > 32) {
712 return 0; /* Zero is the default (8 Bytes) */
713 }
714
715 /* Ensure size is a power of 2 */
716 if (!is_power_of_two(size)) {
717 return 0; /* Zero is the default (8 Bytes) */
718 }
719
720 /* Convert to bit pattern for writing to a register */
721 return find_msb_set(size);
722 }
723
724 /**
725 * @brief Look-up generic burst index to be used in registers
726 *
727 * @warning This look-up works for most controllers, but *may* not work for
728 * yours. Ensure your controller expects the most common register
729 * bit values before using this convenience function. If your
730 * controller does not support these values, you will have to write
731 * your own look-up inside the controller driver.
732 *
733 * @param burst: number of bytes to be sent in a single burst
734 *
735 * @retval common DMA index to be placed into registers.
736 */
dma_burst_index(uint32_t burst)737 static inline uint32_t dma_burst_index(uint32_t burst)
738 {
739 /* Check boundaries (max supported burst length is 256) */
740 if (burst < 1 || burst > 256) {
741 return 0; /* Zero is the default (1 burst length) */
742 }
743
744 /* Ensure burst is a power of 2 */
745 if (!(burst & (burst - 1))) {
746 return 0; /* Zero is the default (1 burst length) */
747 }
748
749 /* Convert to bit pattern for writing to a register */
750 return find_msb_set(burst);
751 }
752
753 /**
754 * @brief Get the device tree property describing the buffer address alignment
755 *
756 * Useful when statically defining or allocating buffers for DMA usage where
757 * memory alignment often matters.
758 *
759 * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
760 * @return alignment Memory byte alignment required for DMA buffers
761 */
762 #define DMA_BUF_ADDR_ALIGNMENT(node) DT_PROP(node, dma_buf_addr_alignment)
763
764 /**
765 * @brief Get the device tree property describing the buffer size alignment
766 *
767 * Useful when statically defining or allocating buffers for DMA usage where
768 * memory alignment often matters.
769 *
770 * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
771 * @return alignment Memory byte alignment required for DMA buffers
772 */
773 #define DMA_BUF_SIZE_ALIGNMENT(node) DT_PROP(node, dma_buf_size_alignment)
774
775 /**
776 * @brief Get the device tree property describing the minimal chunk of data possible to be copied
777 *
778 * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
779 * @return minimal Minimal chunk of data possible to be copied
780 */
781 #define DMA_COPY_ALIGNMENT(node) DT_PROP(node, dma_copy_alignment)
782
783 /**
784 * @}
785 */
786
787 #ifdef __cplusplus
788 }
789 #endif
790
791 #include <zephyr/syscalls/dma.h>
792
793 #endif /* ZEPHYR_INCLUDE_DRIVERS_DMA_H_ */
794