1 /**
2  * @file
3  *
4  * @brief Public APIs for the DMA drivers.
5  */
6 
7 /*
8  * Copyright (c) 2016 Intel Corporation
9  *
10  * SPDX-License-Identifier: Apache-2.0
11  */
12 
13 #ifndef ZEPHYR_INCLUDE_DRIVERS_DMA_H_
14 #define ZEPHYR_INCLUDE_DRIVERS_DMA_H_
15 
16 #include <zephyr/kernel.h>
17 #include <zephyr/device.h>
18 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 
24 /**
25  * @brief DMA Interface
26  * @defgroup dma_interface DMA Interface
27  * @ingroup io_interfaces
28  * @{
29  */
30 
31 /**
32  * @brief DMA channel direction
33  */
34 enum dma_channel_direction {
35 	/** Memory to memory */
36 	MEMORY_TO_MEMORY = 0x0,
37 	/** Memory to peripheral */
38 	MEMORY_TO_PERIPHERAL,
39 	/** Peripheral to memory */
40 	PERIPHERAL_TO_MEMORY,
41 	/** Peripheral to peripheral */
42 	PERIPHERAL_TO_PERIPHERAL,
43 	/** Host to memory */
44 	HOST_TO_MEMORY,
45 	/** Memory to host */
46 	MEMORY_TO_HOST,
47 
48 	/**
49 	 * Number of all common channel directions.
50 	 */
51 	DMA_CHANNEL_DIRECTION_COMMON_COUNT,
52 
53 	/**
54 	 * This and higher values are dma controller or soc specific.
55 	 * Refer to the specified dma driver header file.
56 	 */
57 	DMA_CHANNEL_DIRECTION_PRIV_START = DMA_CHANNEL_DIRECTION_COMMON_COUNT,
58 
59 	/**
60 	 * Maximum allowed value (3 bit field!)
61 	 */
62 	DMA_CHANNEL_DIRECTION_MAX = 0x7
63 };
64 
65 /**
66  * @brief DMA address adjustment
67  *
68  * Valid values for @a source_addr_adj and @a dest_addr_adj
69  */
70 enum dma_addr_adj {
71 	/** Increment the address */
72 	DMA_ADDR_ADJ_INCREMENT,
73 	/** Decrement the address */
74 	DMA_ADDR_ADJ_DECREMENT,
75 	/** No change the address */
76 	DMA_ADDR_ADJ_NO_CHANGE,
77 };
78 
79 /**
80  * @brief DMA channel attributes
81  */
82 enum dma_channel_filter {
83 	DMA_CHANNEL_NORMAL, /* normal DMA channel */
84 	DMA_CHANNEL_PERIODIC, /* can be triggered by periodic sources */
85 };
86 
87 /**
88  * @brief DMA attributes
89  */
90 enum dma_attribute_type {
91 	DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT,
92 	DMA_ATTR_BUFFER_SIZE_ALIGNMENT,
93 	DMA_ATTR_COPY_ALIGNMENT,
94 	DMA_ATTR_MAX_BLOCK_COUNT,
95 };
96 
97 /**
98  * @struct dma_block_config
99  * @brief DMA block configuration structure.
100  *
101  * Aside from source address, destination address, and block size many of these options are hardware
102  * and driver dependent.
103  */
104 struct dma_block_config {
105 #ifdef CONFIG_DMA_64BIT
106 	/** block starting address at source */
107 	uint64_t source_address;
108 	/** block starting address at destination */
109 	uint64_t dest_address;
110 #else
111 	/** block starting address at source */
112 	uint32_t source_address;
113 	/** block starting address at destination */
114 	uint32_t dest_address;
115 #endif
116 	/** Address adjustment at gather boundary */
117 	uint32_t source_gather_interval;
118 	/** Address adjustment at scatter boundary */
119 	uint32_t dest_scatter_interval;
120 	/** Continuous transfer count between scatter boundaries */
121 	uint16_t dest_scatter_count;
122 	/** Continuous transfer count between gather boundaries */
123 	uint16_t source_gather_count;
124 	/** Number of bytes to be transferred for this block */
125 	uint32_t block_size;
126 	/** Pointer to next block in a transfer list */
127 	struct dma_block_config *next_block;
128 	/** Enable source gathering when set to 1 */
129 	uint16_t  source_gather_en :  1;
130 	/** Enable destination scattering when set to 1 */
131 	uint16_t  dest_scatter_en :   1;
132 	/**
133 	 * Source address adjustment option
134 	 *
135 	 * - 0b00 increment
136 	 * - 0b01 decrement
137 	 * - 0b10 no change
138 	 */
139 	uint16_t  source_addr_adj :   2;
140 	/**
141 	 * Destination address adjustment
142 	 *
143 	 * - 0b00 increment
144 	 * - 0b01 decrement
145 	 * - 0b10 no change
146 	 */
147 	uint16_t  dest_addr_adj :     2;
148 	/** Reload source address at the end of block transfer */
149 	uint16_t  source_reload_en :  1;
150 	/** Reload destination address at the end of block transfer */
151 	uint16_t  dest_reload_en :    1;
152 	/** FIFO fill before starting transfer, HW specific meaning */
153 	uint16_t  fifo_mode_control : 4;
154 	/**
155 	 * Transfer flow control mode
156 	 *
157 	 * - 0b0 source request service upon data availability
158 	 * - 0b1 source request postponed until destination request happens
159 	 */
160 	uint16_t  flow_control_mode : 1;
161 
162 	uint16_t  _reserved :          3;
163 };
164 
165 /** The DMA callback event has occurred at the completion of a transfer list */
166 #define DMA_STATUS_COMPLETE	0
167 /** The DMA callback has occurred at the completion of a single transfer block in a transfer list */
168 #define DMA_STATUS_BLOCK	1
169 
170 /**
171  * @typedef dma_callback_t
172  * @brief Callback function for DMA transfer completion
173  *
174  *  If enabled, callback function will be invoked at transfer or block completion,
175  *  or when an error happens.
176  *  In circular mode, @p status indicates that the DMA device has reached either
177  *  the end of the buffer (DMA_STATUS_COMPLETE) or a water mark (DMA_STATUS_BLOCK).
178  *
179  * @param dev           Pointer to the DMA device calling the callback.
180  * @param user_data     A pointer to some user data or NULL
181  * @param channel       The channel number
182  * @param status        Status of the transfer
183  *                      - DMA_STATUS_COMPLETE buffer fully consumed
184  *                      - DMA_STATUS_BLOCK buffer consumption reached a configured block
185  *                        or water mark
186  *                      - A negative errno otherwise
187  */
188 typedef void (*dma_callback_t)(const struct device *dev, void *user_data,
189 			       uint32_t channel, int status);
190 
191 /**
192  * @struct dma_config
193  * @brief DMA configuration structure.
194  */
195 struct dma_config {
196 	/** Which peripheral and direction, HW specific */
197 	uint32_t  dma_slot :             8;
198 	/**
199 	 * Direction the transfers are occurring
200 	 *
201 	 * - 0b000 memory to memory,
202 	 * - 0b001 memory to peripheral,
203 	 * - 0b010 peripheral to memory,
204 	 * - 0b011 peripheral to peripheral,
205 	 * - 0b100 host to memory
206 	 * - 0b101 memory to host
207 	 * - others hardware specific
208 	 */
209 	uint32_t  channel_direction :    3;
210 	/**
211 	 * Completion callback enable
212 	 *
213 	 * - 0b0 callback invoked at transfer list completion only
214 	 * - 0b1 callback invoked at completion of each block
215 	 */
216 	uint32_t  complete_callback_en : 1;
217 	/**
218 	 * Error callback enable
219 	 *
220 	 * - 0b0 error callback enabled
221 	 * - 0b1 error callback disabled
222 	 */
223 	uint32_t  error_callback_en :    1;
224 	/**
225 	 * Source handshake, HW specific
226 	 *
227 	 * - 0b0 HW
228 	 * - 0b1 SW
229 	 */
230 	uint32_t  source_handshake :     1;
231 	/**
232 	 * Destination handshake, HW specific
233 	 *
234 	 * - 0b0 HW
235 	 * - 0b1 SW
236 	 */
237 	uint32_t  dest_handshake :       1;
238 	/**
239 	 * Channel priority for arbitration, HW specific
240 	 */
241 	uint32_t  channel_priority :     4;
242 	/** Source chaining enable, HW specific */
243 	uint32_t  source_chaining_en :   1;
244 	/** Destination chaining enable, HW specific */
245 	uint32_t  dest_chaining_en :     1;
246 	/** Linked channel, HW specific */
247 	uint32_t  linked_channel   :     7;
248 	/** Cyclic transfer list, HW specific */
249 	uint32_t  cyclic :				 1;
250 
251 	uint32_t  _reserved :             3;
252 	/** Width of source data (in bytes) */
253 	uint32_t  source_data_size :    16;
254 	/** Width of destination data (in bytes) */
255 	uint32_t  dest_data_size :      16;
256 	/** Source burst length in bytes */
257 	uint32_t  source_burst_length : 16;
258 	/** Destination burst length in bytes */
259 	uint32_t  dest_burst_length :   16;
260 	/** Number of blocks in transfer list */
261 	uint32_t block_count;
262 	/** Pointer to the first block in the transfer list */
263 	struct dma_block_config *head_block;
264 	/** Optional attached user data for callbacks */
265 	void *user_data;
266 	/** Optional callback for completion and error events */
267 	dma_callback_t dma_callback;
268 };
269 
270 /**
271  * DMA runtime status structure
272  */
273 struct dma_status {
274 	/** Is the current DMA transfer busy or idle */
275 	bool busy;
276 	/** Direction fo the transfer */
277 	enum dma_channel_direction dir;
278 	/** Pending length to be transferred in bytes, HW specific */
279 	uint32_t pending_length;
280 	/** Available buffers space, HW specific */
281 	uint32_t free;
282 	/** Write position in circular DMA buffer, HW specific */
283 	uint32_t write_position;
284 	/** Read position in circular DMA buffer, HW specific */
285 	uint32_t read_position;
286 	/** Total copied, HW specific */
287 	uint64_t total_copied;
288 };
289 
290 /**
291  * DMA context structure
292  * Note: the dma_context shall be the first member
293  *       of DMA client driver Data, got by dev->data
294  */
295 struct dma_context {
296 	/** magic code to identify the context */
297 	int32_t magic;
298 	/** number of dma channels */
299 	int dma_channels;
300 	/** atomic holding bit flags for each channel to mark as used/unused */
301 	atomic_t *atomic;
302 };
303 
304 /** Magic code to identify context content */
305 #define DMA_MAGIC 0x47494749
306 
307 /**
308  * @cond INTERNAL_HIDDEN
309  *
310  * These are for internal use only, so skip these in
311  * public documentation.
312  */
313 typedef int (*dma_api_config)(const struct device *dev, uint32_t channel,
314 			      struct dma_config *config);
315 
316 #ifdef CONFIG_DMA_64BIT
317 typedef int (*dma_api_reload)(const struct device *dev, uint32_t channel,
318 			      uint64_t src, uint64_t dst, size_t size);
319 #else
320 typedef int (*dma_api_reload)(const struct device *dev, uint32_t channel,
321 			      uint32_t src, uint32_t dst, size_t size);
322 #endif
323 
324 typedef int (*dma_api_start)(const struct device *dev, uint32_t channel);
325 
326 typedef int (*dma_api_stop)(const struct device *dev, uint32_t channel);
327 
328 typedef int (*dma_api_suspend)(const struct device *dev, uint32_t channel);
329 
330 typedef int (*dma_api_resume)(const struct device *dev, uint32_t channel);
331 
332 typedef int (*dma_api_get_status)(const struct device *dev, uint32_t channel,
333 				  struct dma_status *status);
334 
335 typedef int (*dma_api_get_attribute)(const struct device *dev, uint32_t type, uint32_t *value);
336 
337 /**
338  * @typedef dma_chan_filter
339  * @brief channel filter function call
340  *
341  * filter function that is used to find the matched internal dma channel
342  * provide by caller
343  *
344  * @param dev Pointer to the DMA device instance
345  * @param channel the channel id to use
346  * @param filter_param filter function parameter, can be NULL
347  *
348  * @retval True on filter matched otherwise return False.
349  */
350 typedef bool (*dma_api_chan_filter)(const struct device *dev,
351 				int channel, void *filter_param);
352 
353 __subsystem struct dma_driver_api {
354 	dma_api_config config;
355 	dma_api_reload reload;
356 	dma_api_start start;
357 	dma_api_stop stop;
358 	dma_api_suspend suspend;
359 	dma_api_resume resume;
360 	dma_api_get_status get_status;
361 	dma_api_get_attribute get_attribute;
362 	dma_api_chan_filter chan_filter;
363 };
364 /**
365  * @endcond
366  */
367 
368 /**
369  * @brief Configure individual channel for DMA transfer.
370  *
371  * @param dev     Pointer to the device structure for the driver instance.
372  * @param channel Numeric identification of the channel to configure
373  * @param config  Data structure containing the intended configuration for the
374  *                selected channel
375  *
376  * @retval 0 if successful.
377  * @retval Negative errno code if failure.
378  */
dma_config(const struct device * dev,uint32_t channel,struct dma_config * config)379 static inline int dma_config(const struct device *dev, uint32_t channel,
380 			     struct dma_config *config)
381 {
382 	const struct dma_driver_api *api =
383 		(const struct dma_driver_api *)dev->api;
384 
385 	return api->config(dev, channel, config);
386 }
387 
388 /**
389  * @brief Reload buffer(s) for a DMA channel
390  *
391  * @param dev     Pointer to the device structure for the driver instance.
392  * @param channel Numeric identification of the channel to configure
393  *                selected channel
394  * @param src     source address for the DMA transfer
395  * @param dst     destination address for the DMA transfer
396  * @param size    size of DMA transfer
397  *
398  * @retval 0 if successful.
399  * @retval Negative errno code if failure.
400  */
401 #ifdef CONFIG_DMA_64BIT
dma_reload(const struct device * dev,uint32_t channel,uint64_t src,uint64_t dst,size_t size)402 static inline int dma_reload(const struct device *dev, uint32_t channel,
403 			     uint64_t src, uint64_t dst, size_t size)
404 #else
405 static inline int dma_reload(const struct device *dev, uint32_t channel,
406 		uint32_t src, uint32_t dst, size_t size)
407 #endif
408 {
409 	const struct dma_driver_api *api =
410 		(const struct dma_driver_api *)dev->api;
411 
412 	if (api->reload) {
413 		return api->reload(dev, channel, src, dst, size);
414 	}
415 
416 	return -ENOSYS;
417 }
418 
419 /**
420  * @brief Enables DMA channel and starts the transfer, the channel must be
421  *        configured beforehand.
422  *
423  * Implementations must check the validity of the channel ID passed in and
424  * return -EINVAL if it is invalid.
425  *
426  * Start is allowed on channels that have already been started and must report
427  * success.
428  *
429  * @funcprops \isr_ok
430  *
431  * @param dev     Pointer to the device structure for the driver instance.
432  * @param channel Numeric identification of the channel where the transfer will
433  *                be processed
434  *
435  * @retval 0 if successful.
436  * @retval Negative errno code if failure.
437  */
438 __syscall int dma_start(const struct device *dev, uint32_t channel);
439 
z_impl_dma_start(const struct device * dev,uint32_t channel)440 static inline int z_impl_dma_start(const struct device *dev, uint32_t channel)
441 {
442 	const struct dma_driver_api *api =
443 		(const struct dma_driver_api *)dev->api;
444 
445 	return api->start(dev, channel);
446 }
447 
448 /**
449  * @brief Stops the DMA transfer and disables the channel.
450  *
451  * Implementations must check the validity of the channel ID passed in and
452  * return -EINVAL if it is invalid.
453  *
454  * Stop is allowed on channels that have already been stopped and must report
455  * success.
456  *
457  * @funcprops \isr_ok
458  *
459  * @param dev     Pointer to the device structure for the driver instance.
460  * @param channel Numeric identification of the channel where the transfer was
461  *                being processed
462  *
463  * @retval 0 if successful.
464  * @retval Negative errno code if failure.
465  */
466 __syscall int dma_stop(const struct device *dev, uint32_t channel);
467 
z_impl_dma_stop(const struct device * dev,uint32_t channel)468 static inline int z_impl_dma_stop(const struct device *dev, uint32_t channel)
469 {
470 	const struct dma_driver_api *api =
471 		(const struct dma_driver_api *)dev->api;
472 
473 	return api->stop(dev, channel);
474 }
475 
476 
477 /**
478  * @brief Suspend a DMA channel transfer
479  *
480  * Implementations must check the validity of the channel state and ID passed
481  * in and return -EINVAL if either are invalid.
482  *
483  * @funcprops \isr_ok
484  *
485  * @param dev Pointer to the device structure for the driver instance.
486  * @param channel Numeric identification of the channel to suspend
487  *
488  * @retval 0 If successful.
489  * @retval -ENOSYS If not implemented.
490  * @retval -EINVAL If invalid channel id or state.
491  * @retval -errno Other negative errno code failure.
492  */
493 __syscall int dma_suspend(const struct device *dev, uint32_t channel);
494 
z_impl_dma_suspend(const struct device * dev,uint32_t channel)495 static inline int z_impl_dma_suspend(const struct device *dev, uint32_t channel)
496 {
497 	const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
498 
499 	if (api->suspend == NULL) {
500 		return -ENOSYS;
501 	}
502 	return api->suspend(dev, channel);
503 }
504 
505 /**
506  * @brief Resume a DMA channel transfer
507  *
508  * Implementations must check the validity of the channel state and ID passed
509  * in and return -EINVAL if either are invalid.
510  *
511  * @funcprops \isr_ok
512  *
513  * @param dev Pointer to the device structure for the driver instance.
514  * @param channel Numeric identification of the channel to resume
515  *
516  * @retval 0 If successful.
517  * @retval -ENOSYS If not implemented
518  * @retval -EINVAL If invalid channel id or state.
519  * @retval -errno Other negative errno code failure.
520  */
521 __syscall int dma_resume(const struct device *dev, uint32_t channel);
522 
z_impl_dma_resume(const struct device * dev,uint32_t channel)523 static inline int z_impl_dma_resume(const struct device *dev, uint32_t channel)
524 {
525 	const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
526 
527 	if (api->resume == NULL) {
528 		return -ENOSYS;
529 	}
530 	return api->resume(dev, channel);
531 }
532 
533 /**
534  * @brief request DMA channel.
535  *
536  * request DMA channel resources
537  * return -EINVAL if there is no valid channel available.
538  *
539  * @funcprops \isr_ok
540  *
541  * @param dev Pointer to the device structure for the driver instance.
542  * @param filter_param filter function parameter
543  *
544  * @retval dma channel if successful.
545  * @retval Negative errno code if failure.
546  */
547 __syscall int dma_request_channel(const struct device *dev,
548 				  void *filter_param);
549 
z_impl_dma_request_channel(const struct device * dev,void * filter_param)550 static inline int z_impl_dma_request_channel(const struct device *dev,
551 					     void *filter_param)
552 {
553 	int i = 0;
554 	int channel = -EINVAL;
555 	const struct dma_driver_api *api =
556 		(const struct dma_driver_api *)dev->api;
557 	/* dma_context shall be the first one in dev data */
558 	struct dma_context *dma_ctx = (struct dma_context *)dev->data;
559 
560 	if (dma_ctx->magic != DMA_MAGIC) {
561 		return channel;
562 	}
563 
564 	for (i = 0; i < dma_ctx->dma_channels; i++) {
565 		if (!atomic_test_and_set_bit(dma_ctx->atomic, i)) {
566 			if (api->chan_filter &&
567 			    !api->chan_filter(dev, i, filter_param)) {
568 				atomic_clear_bit(dma_ctx->atomic, i);
569 				continue;
570 			}
571 			channel = i;
572 			break;
573 		}
574 	}
575 
576 	return channel;
577 }
578 
579 /**
580  * @brief release DMA channel.
581  *
582  * release DMA channel resources
583  *
584  * @funcprops \isr_ok
585  *
586  * @param dev  Pointer to the device structure for the driver instance.
587  * @param channel  channel number
588  *
589  */
590 __syscall void dma_release_channel(const struct device *dev,
591 				   uint32_t channel);
592 
z_impl_dma_release_channel(const struct device * dev,uint32_t channel)593 static inline void z_impl_dma_release_channel(const struct device *dev,
594 					      uint32_t channel)
595 {
596 	struct dma_context *dma_ctx = (struct dma_context *)dev->data;
597 
598 	if (dma_ctx->magic != DMA_MAGIC) {
599 		return;
600 	}
601 
602 	if ((int)channel < dma_ctx->dma_channels) {
603 		atomic_clear_bit(dma_ctx->atomic, channel);
604 	}
605 
606 }
607 
608 /**
609  * @brief DMA channel filter.
610  *
611  * filter channel by attribute
612  *
613  * @param dev  Pointer to the device structure for the driver instance.
614  * @param channel  channel number
615  * @param filter_param filter attribute
616  *
617  * @retval Negative errno code if not support
618  *
619  */
620 __syscall int dma_chan_filter(const struct device *dev,
621 				   int channel, void *filter_param);
622 
z_impl_dma_chan_filter(const struct device * dev,int channel,void * filter_param)623 static inline int z_impl_dma_chan_filter(const struct device *dev,
624 					      int channel, void *filter_param)
625 {
626 	const struct dma_driver_api *api =
627 		(const struct dma_driver_api *)dev->api;
628 
629 	if (api->chan_filter) {
630 		return api->chan_filter(dev, channel, filter_param);
631 	}
632 
633 	return -ENOSYS;
634 }
635 
636 /**
637  * @brief get current runtime status of DMA transfer
638  *
639  * Implementations must check the validity of the channel ID passed in and
640  * return -EINVAL if it is invalid or -ENOSYS if not supported.
641  *
642  * @funcprops \isr_ok
643  *
644  * @param dev     Pointer to the device structure for the driver instance.
645  * @param channel Numeric identification of the channel where the transfer was
646  *                being processed
647  * @param stat   a non-NULL dma_status object for storing DMA status
648  *
649  * @retval non-negative if successful.
650  * @retval Negative errno code if failure.
651  */
dma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)652 static inline int dma_get_status(const struct device *dev, uint32_t channel,
653 				 struct dma_status *stat)
654 {
655 	const struct dma_driver_api *api =
656 		(const struct dma_driver_api *)dev->api;
657 
658 	if (api->get_status) {
659 		return api->get_status(dev, channel, stat);
660 	}
661 
662 	return -ENOSYS;
663 }
664 
665 /**
666  * @brief get attribute of a dma controller
667  *
668  * This function allows to get a device specific static or runtime attribute like required address
669  * and size alignment of a buffer.
670  * Implementations must check the validity of the type passed in and
671  * return -EINVAL if it is invalid or -ENOSYS if not supported.
672  *
673  * @funcprops \isr_ok
674  *
675  * @param dev     Pointer to the device structure for the driver instance.
676  * @param type    Numeric identification of the attribute
677  * @param value   A non-NULL pointer to the variable where the read value is to be placed
678  *
679  * @retval non-negative if successful.
680  * @retval Negative errno code if failure.
681  */
dma_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)682 static inline int dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
683 {
684 	const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
685 
686 	if (api->get_attribute) {
687 		return api->get_attribute(dev, type, value);
688 	}
689 
690 	return -ENOSYS;
691 }
692 
693 /**
694  * @brief Look-up generic width index to be used in registers
695  *
696  * @warning This look-up works for most controllers, but *may* not work for
697  *          yours.  Ensure your controller expects the most common register
698  *          bit values before using this convenience function.  If your
699  *          controller does not support these values, you will have to write
700  *          your own look-up inside the controller driver.
701  *
702  * @param size: width of bus (in bytes)
703  *
704  * @retval common DMA index to be placed into registers.
705  */
dma_width_index(uint32_t size)706 static inline uint32_t dma_width_index(uint32_t size)
707 {
708 	/* Check boundaries (max supported width is 32 Bytes) */
709 	if (size < 1 || size > 32) {
710 		return 0; /* Zero is the default (8 Bytes) */
711 	}
712 
713 	/* Ensure size is a power of 2 */
714 	if (!is_power_of_two(size)) {
715 		return 0; /* Zero is the default (8 Bytes) */
716 	}
717 
718 	/* Convert to bit pattern for writing to a register */
719 	return find_msb_set(size);
720 }
721 
722 /**
723  * @brief Look-up generic burst index to be used in registers
724  *
725  * @warning This look-up works for most controllers, but *may* not work for
726  *          yours.  Ensure your controller expects the most common register
727  *          bit values before using this convenience function.  If your
728  *          controller does not support these values, you will have to write
729  *          your own look-up inside the controller driver.
730  *
731  * @param burst: number of bytes to be sent in a single burst
732  *
733  * @retval common DMA index to be placed into registers.
734  */
dma_burst_index(uint32_t burst)735 static inline uint32_t dma_burst_index(uint32_t burst)
736 {
737 	/* Check boundaries (max supported burst length is 256) */
738 	if (burst < 1 || burst > 256) {
739 		return 0; /* Zero is the default (1 burst length) */
740 	}
741 
742 	/* Ensure burst is a power of 2 */
743 	if (!(burst & (burst - 1))) {
744 		return 0; /* Zero is the default (1 burst length) */
745 	}
746 
747 	/* Convert to bit pattern for writing to a register */
748 	return find_msb_set(burst);
749 }
750 
751 /**
752  * @brief Get the device tree property describing the buffer address alignment
753  *
754  * Useful when statically defining or allocating buffers for DMA usage where
755  * memory alignment often matters.
756  *
757  * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
758  * @return alignment Memory byte alignment required for DMA buffers
759  */
760 #define DMA_BUF_ADDR_ALIGNMENT(node) DT_PROP(node, dma_buf_addr_alignment)
761 
762 /**
763  * @brief Get the device tree property describing the buffer size alignment
764  *
765  * Useful when statically defining or allocating buffers for DMA usage where
766  * memory alignment often matters.
767  *
768  * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
769  * @return alignment Memory byte alignment required for DMA buffers
770  */
771 #define DMA_BUF_SIZE_ALIGNMENT(node) DT_PROP(node, dma_buf_size_alignment)
772 
773 /**
774  * @brief Get the device tree property describing the minimal chunk of data possible to be copied
775  *
776  * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
777  * @return minimal Minimal chunk of data possible to be copied
778  */
779 #define DMA_COPY_ALIGNMENT(node) DT_PROP(node, dma_copy_alignment)
780 
781 /**
782  * @}
783  */
784 
785 #ifdef __cplusplus
786 }
787 #endif
788 
789 #include <syscalls/dma.h>
790 
791 #endif /* ZEPHYR_INCLUDE_DRIVERS_DMA_H_ */
792