1 /**
2  * @file
3  *
4  * @brief Public APIs for the DMA drivers.
5  */
6 
7 /*
8  * Copyright (c) 2016 Intel Corporation
9  *
10  * SPDX-License-Identifier: Apache-2.0
11  */
12 
13 #ifndef ZEPHYR_INCLUDE_DRIVERS_DMA_H_
14 #define ZEPHYR_INCLUDE_DRIVERS_DMA_H_
15 
16 #include <zephyr/kernel.h>
17 #include <zephyr/device.h>
18 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 
24 /**
25  * @brief DMA Interface
26  * @defgroup dma_interface DMA Interface
27  * @since 1.5
28  * @version 1.0.0
29  * @ingroup io_interfaces
30  * @{
31  */
32 
33 /**
34  * @brief DMA channel direction
35  */
36 enum dma_channel_direction {
37 	/** Memory to memory */
38 	MEMORY_TO_MEMORY = 0x0,
39 	/** Memory to peripheral */
40 	MEMORY_TO_PERIPHERAL,
41 	/** Peripheral to memory */
42 	PERIPHERAL_TO_MEMORY,
43 	/** Peripheral to peripheral */
44 	PERIPHERAL_TO_PERIPHERAL,
45 	/** Host to memory */
46 	HOST_TO_MEMORY,
47 	/** Memory to host */
48 	MEMORY_TO_HOST,
49 
50 	/**
51 	 * Number of all common channel directions.
52 	 */
53 	DMA_CHANNEL_DIRECTION_COMMON_COUNT,
54 
55 	/**
56 	 * This and higher values are dma controller or soc specific.
57 	 * Refer to the specified dma driver header file.
58 	 */
59 	DMA_CHANNEL_DIRECTION_PRIV_START = DMA_CHANNEL_DIRECTION_COMMON_COUNT,
60 
61 	/**
62 	 * Maximum allowed value (3 bit field!)
63 	 */
64 	DMA_CHANNEL_DIRECTION_MAX = 0x7
65 };
66 
67 /**
68  * @brief DMA address adjustment
69  *
70  * Valid values for @a source_addr_adj and @a dest_addr_adj
71  */
72 enum dma_addr_adj {
73 	/** Increment the address */
74 	DMA_ADDR_ADJ_INCREMENT,
75 	/** Decrement the address */
76 	DMA_ADDR_ADJ_DECREMENT,
77 	/** No change the address */
78 	DMA_ADDR_ADJ_NO_CHANGE,
79 };
80 
81 /**
82  * @brief DMA channel attributes
83  */
84 enum dma_channel_filter {
85 	DMA_CHANNEL_NORMAL, /* normal DMA channel */
86 	DMA_CHANNEL_PERIODIC, /* can be triggered by periodic sources */
87 };
88 
89 /**
90  * @brief DMA attributes
91  */
92 enum dma_attribute_type {
93 	DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT,
94 	DMA_ATTR_BUFFER_SIZE_ALIGNMENT,
95 	DMA_ATTR_COPY_ALIGNMENT,
96 	DMA_ATTR_MAX_BLOCK_COUNT,
97 };
98 
99 /**
100  * @struct dma_block_config
101  * @brief DMA block configuration structure.
102  *
103  * Aside from source address, destination address, and block size many of these options are hardware
104  * and driver dependent.
105  */
106 struct dma_block_config {
107 #ifdef CONFIG_DMA_64BIT
108 	/** block starting address at source */
109 	uint64_t source_address;
110 	/** block starting address at destination */
111 	uint64_t dest_address;
112 #else
113 	/** block starting address at source */
114 	uint32_t source_address;
115 	/** block starting address at destination */
116 	uint32_t dest_address;
117 #endif
118 	/** Address adjustment at gather boundary */
119 	uint32_t source_gather_interval;
120 	/** Address adjustment at scatter boundary */
121 	uint32_t dest_scatter_interval;
122 	/** Continuous transfer count between scatter boundaries */
123 	uint16_t dest_scatter_count;
124 	/** Continuous transfer count between gather boundaries */
125 	uint16_t source_gather_count;
126 	/** Number of bytes to be transferred for this block */
127 	uint32_t block_size;
128 	/** Pointer to next block in a transfer list */
129 	struct dma_block_config *next_block;
130 	/** Enable source gathering when set to 1 */
131 	uint16_t  source_gather_en :  1;
132 	/** Enable destination scattering when set to 1 */
133 	uint16_t  dest_scatter_en :   1;
134 	/**
135 	 * Source address adjustment option
136 	 *
137 	 * - 0b00 increment
138 	 * - 0b01 decrement
139 	 * - 0b10 no change
140 	 */
141 	uint16_t  source_addr_adj :   2;
142 	/**
143 	 * Destination address adjustment
144 	 *
145 	 * - 0b00 increment
146 	 * - 0b01 decrement
147 	 * - 0b10 no change
148 	 */
149 	uint16_t  dest_addr_adj :     2;
150 	/** Reload source address at the end of block transfer */
151 	uint16_t  source_reload_en :  1;
152 	/** Reload destination address at the end of block transfer */
153 	uint16_t  dest_reload_en :    1;
154 	/** FIFO fill before starting transfer, HW specific meaning */
155 	uint16_t  fifo_mode_control : 4;
156 	/**
157 	 * Transfer flow control mode
158 	 *
159 	 * - 0b0 source request service upon data availability
160 	 * - 0b1 source request postponed until destination request happens
161 	 */
162 	uint16_t  flow_control_mode : 1;
163 
164 	uint16_t  _reserved :          3;
165 };
166 
167 /** The DMA callback event has occurred at the completion of a transfer list */
168 #define DMA_STATUS_COMPLETE	0
169 /** The DMA callback has occurred at the completion of a single transfer block in a transfer list */
170 #define DMA_STATUS_BLOCK	1
171 
172 /**
173  * @typedef dma_callback_t
174  * @brief Callback function for DMA transfer completion
175  *
176  *  If enabled, callback function will be invoked at transfer or block completion,
177  *  or when an error happens.
178  *  In circular mode, @p status indicates that the DMA device has reached either
179  *  the end of the buffer (DMA_STATUS_COMPLETE) or a water mark (DMA_STATUS_BLOCK).
180  *
181  * @param dev           Pointer to the DMA device calling the callback.
182  * @param user_data     A pointer to some user data or NULL
183  * @param channel       The channel number
184  * @param status        Status of the transfer
185  *                      - DMA_STATUS_COMPLETE buffer fully consumed
186  *                      - DMA_STATUS_BLOCK buffer consumption reached a configured block
187  *                        or water mark
188  *                      - A negative errno otherwise
189  */
190 typedef void (*dma_callback_t)(const struct device *dev, void *user_data,
191 			       uint32_t channel, int status);
192 
193 /**
194  * @struct dma_config
195  * @brief DMA configuration structure.
196  */
197 struct dma_config {
198 	/** Which peripheral and direction, HW specific */
199 	uint32_t  dma_slot :             8;
200 	/**
201 	 * Direction the transfers are occurring
202 	 *
203 	 * - 0b000 memory to memory,
204 	 * - 0b001 memory to peripheral,
205 	 * - 0b010 peripheral to memory,
206 	 * - 0b011 peripheral to peripheral,
207 	 * - 0b100 host to memory
208 	 * - 0b101 memory to host
209 	 * - others hardware specific
210 	 */
211 	uint32_t  channel_direction :    3;
212 	/**
213 	 * Completion callback enable
214 	 *
215 	 * - 0b0 callback invoked at transfer list completion only
216 	 * - 0b1 callback invoked at completion of each block
217 	 */
218 	uint32_t  complete_callback_en : 1;
219 	/**
220 	 * Error callback disable
221 	 *
222 	 * - 0b0 error callback enabled
223 	 * - 0b1 error callback disabled
224 	 */
225 	uint32_t  error_callback_dis :    1;
226 	/**
227 	 * Source handshake, HW specific
228 	 *
229 	 * - 0b0 HW
230 	 * - 0b1 SW
231 	 */
232 	uint32_t  source_handshake :     1;
233 	/**
234 	 * Destination handshake, HW specific
235 	 *
236 	 * - 0b0 HW
237 	 * - 0b1 SW
238 	 */
239 	uint32_t  dest_handshake :       1;
240 	/**
241 	 * Channel priority for arbitration, HW specific
242 	 */
243 	uint32_t  channel_priority :     4;
244 	/** Source chaining enable, HW specific */
245 	uint32_t  source_chaining_en :   1;
246 	/** Destination chaining enable, HW specific */
247 	uint32_t  dest_chaining_en :     1;
248 	/** Linked channel, HW specific */
249 	uint32_t  linked_channel   :     7;
250 	/** Cyclic transfer list, HW specific */
251 	uint32_t  cyclic :				 1;
252 
253 	uint32_t  _reserved :             3;
254 	/** Width of source data (in bytes) */
255 	uint32_t  source_data_size :    16;
256 	/** Width of destination data (in bytes) */
257 	uint32_t  dest_data_size :      16;
258 	/** Source burst length in bytes */
259 	uint32_t  source_burst_length : 16;
260 	/** Destination burst length in bytes */
261 	uint32_t  dest_burst_length :   16;
262 	/** Number of blocks in transfer list */
263 	uint32_t block_count;
264 	/** Pointer to the first block in the transfer list */
265 	struct dma_block_config *head_block;
266 	/** Optional attached user data for callbacks */
267 	void *user_data;
268 	/** Optional callback for completion and error events */
269 	dma_callback_t dma_callback;
270 };
271 
272 /**
273  * DMA runtime status structure
274  */
275 struct dma_status {
276 	/** Is the current DMA transfer busy or idle */
277 	bool busy;
278 	/** Direction for the transfer */
279 	enum dma_channel_direction dir;
280 	/** Pending length to be transferred in bytes, HW specific */
281 	uint32_t pending_length;
282 	/** Available buffers space, HW specific */
283 	uint32_t free;
284 	/** Write position in circular DMA buffer, HW specific */
285 	uint32_t write_position;
286 	/** Read position in circular DMA buffer, HW specific */
287 	uint32_t read_position;
288 	/** Total copied, HW specific */
289 	uint64_t total_copied;
290 };
291 
292 /**
293  * DMA context structure
294  * Note: the dma_context shall be the first member
295  *       of DMA client driver Data, got by dev->data
296  */
297 struct dma_context {
298 	/** magic code to identify the context */
299 	int32_t magic;
300 	/** number of dma channels */
301 	int dma_channels;
302 	/** atomic holding bit flags for each channel to mark as used/unused */
303 	atomic_t *atomic;
304 };
305 
306 /** Magic code to identify context content */
307 #define DMA_MAGIC 0x47494749
308 
309 /**
310  * @cond INTERNAL_HIDDEN
311  *
312  * These are for internal use only, so skip these in
313  * public documentation.
314  */
315 typedef int (*dma_api_config)(const struct device *dev, uint32_t channel,
316 			      struct dma_config *config);
317 
318 #ifdef CONFIG_DMA_64BIT
319 typedef int (*dma_api_reload)(const struct device *dev, uint32_t channel,
320 			      uint64_t src, uint64_t dst, size_t size);
321 #else
322 typedef int (*dma_api_reload)(const struct device *dev, uint32_t channel,
323 			      uint32_t src, uint32_t dst, size_t size);
324 #endif
325 
326 typedef int (*dma_api_start)(const struct device *dev, uint32_t channel);
327 
328 typedef int (*dma_api_stop)(const struct device *dev, uint32_t channel);
329 
330 typedef int (*dma_api_suspend)(const struct device *dev, uint32_t channel);
331 
332 typedef int (*dma_api_resume)(const struct device *dev, uint32_t channel);
333 
334 typedef int (*dma_api_get_status)(const struct device *dev, uint32_t channel,
335 				  struct dma_status *status);
336 
337 typedef int (*dma_api_get_attribute)(const struct device *dev, uint32_t type, uint32_t *value);
338 
339 /**
340  * @typedef dma_chan_filter
341  * @brief channel filter function call
342  *
343  * filter function that is used to find the matched internal dma channel
344  * provide by caller
345  *
346  * @param dev Pointer to the DMA device instance
347  * @param channel the channel id to use
348  * @param filter_param filter function parameter, can be NULL
349  *
350  * @retval True on filter matched otherwise return False.
351  */
352 typedef bool (*dma_api_chan_filter)(const struct device *dev,
353 				int channel, void *filter_param);
354 
355 /**
356  * @typedef dma_chan_release
357  * @brief channel release function call
358  *
359  * used to release channel resources "allocated" during the
360  * request phase. These resources can refer to enabled PDs, IRQs
361  * etc...
362  *
363  * @param dev Pointer to the DMA device instance
364  * @param channel channel id to use
365  */
366 typedef void (*dma_api_chan_release)(const struct device *dev,
367 				     uint32_t channel);
368 
369 __subsystem struct dma_driver_api {
370 	dma_api_config config;
371 	dma_api_reload reload;
372 	dma_api_start start;
373 	dma_api_stop stop;
374 	dma_api_suspend suspend;
375 	dma_api_resume resume;
376 	dma_api_get_status get_status;
377 	dma_api_get_attribute get_attribute;
378 	dma_api_chan_filter chan_filter;
379 	dma_api_chan_release chan_release;
380 };
381 /**
382  * @endcond
383  */
384 
385 /**
386  * @brief Configure individual channel for DMA transfer.
387  *
388  * @param dev     Pointer to the device structure for the driver instance.
389  * @param channel Numeric identification of the channel to configure
390  * @param config  Data structure containing the intended configuration for the
391  *                selected channel
392  *
393  * @retval 0 if successful.
394  * @retval Negative errno code if failure.
395  */
dma_config(const struct device * dev,uint32_t channel,struct dma_config * config)396 static inline int dma_config(const struct device *dev, uint32_t channel,
397 			     struct dma_config *config)
398 {
399 	const struct dma_driver_api *api =
400 		(const struct dma_driver_api *)dev->api;
401 
402 	return api->config(dev, channel, config);
403 }
404 
405 /**
406  * @brief Reload buffer(s) for a DMA channel
407  *
408  * @param dev     Pointer to the device structure for the driver instance.
409  * @param channel Numeric identification of the channel to configure
410  *                selected channel
411  * @param src     source address for the DMA transfer
412  * @param dst     destination address for the DMA transfer
413  * @param size    size of DMA transfer
414  *
415  * @retval 0 if successful.
416  * @retval Negative errno code if failure.
417  */
418 #ifdef CONFIG_DMA_64BIT
dma_reload(const struct device * dev,uint32_t channel,uint64_t src,uint64_t dst,size_t size)419 static inline int dma_reload(const struct device *dev, uint32_t channel,
420 			     uint64_t src, uint64_t dst, size_t size)
421 #else
422 static inline int dma_reload(const struct device *dev, uint32_t channel,
423 		uint32_t src, uint32_t dst, size_t size)
424 #endif
425 {
426 	const struct dma_driver_api *api =
427 		(const struct dma_driver_api *)dev->api;
428 
429 	if (api->reload) {
430 		return api->reload(dev, channel, src, dst, size);
431 	}
432 
433 	return -ENOSYS;
434 }
435 
436 /**
437  * @brief Enables DMA channel and starts the transfer, the channel must be
438  *        configured beforehand.
439  *
440  * Implementations must check the validity of the channel ID passed in and
441  * return -EINVAL if it is invalid.
442  *
443  * Start is allowed on channels that have already been started and must report
444  * success.
445  *
446  * @funcprops \isr_ok
447  *
448  * @param dev     Pointer to the device structure for the driver instance.
449  * @param channel Numeric identification of the channel where the transfer will
450  *                be processed
451  *
452  * @retval 0 if successful.
453  * @retval Negative errno code if failure.
454  */
455 __syscall int dma_start(const struct device *dev, uint32_t channel);
456 
z_impl_dma_start(const struct device * dev,uint32_t channel)457 static inline int z_impl_dma_start(const struct device *dev, uint32_t channel)
458 {
459 	const struct dma_driver_api *api =
460 		(const struct dma_driver_api *)dev->api;
461 
462 	return api->start(dev, channel);
463 }
464 
465 /**
466  * @brief Stops the DMA transfer and disables the channel.
467  *
468  * Implementations must check the validity of the channel ID passed in and
469  * return -EINVAL if it is invalid.
470  *
471  * Stop is allowed on channels that have already been stopped and must report
472  * success.
473  *
474  * @funcprops \isr_ok
475  *
476  * @param dev     Pointer to the device structure for the driver instance.
477  * @param channel Numeric identification of the channel where the transfer was
478  *                being processed
479  *
480  * @retval 0 if successful.
481  * @retval Negative errno code if failure.
482  */
483 __syscall int dma_stop(const struct device *dev, uint32_t channel);
484 
z_impl_dma_stop(const struct device * dev,uint32_t channel)485 static inline int z_impl_dma_stop(const struct device *dev, uint32_t channel)
486 {
487 	const struct dma_driver_api *api =
488 		(const struct dma_driver_api *)dev->api;
489 
490 	return api->stop(dev, channel);
491 }
492 
493 
494 /**
495  * @brief Suspend a DMA channel transfer
496  *
497  * Implementations must check the validity of the channel state and ID passed
498  * in and return -EINVAL if either are invalid.
499  *
500  * @funcprops \isr_ok
501  *
502  * @param dev Pointer to the device structure for the driver instance.
503  * @param channel Numeric identification of the channel to suspend
504  *
505  * @retval 0 If successful.
506  * @retval -ENOSYS If not implemented.
507  * @retval -EINVAL If invalid channel id or state.
508  * @retval -errno Other negative errno code failure.
509  */
510 __syscall int dma_suspend(const struct device *dev, uint32_t channel);
511 
z_impl_dma_suspend(const struct device * dev,uint32_t channel)512 static inline int z_impl_dma_suspend(const struct device *dev, uint32_t channel)
513 {
514 	const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
515 
516 	if (api->suspend == NULL) {
517 		return -ENOSYS;
518 	}
519 	return api->suspend(dev, channel);
520 }
521 
522 /**
523  * @brief Resume a DMA channel transfer
524  *
525  * Implementations must check the validity of the channel state and ID passed
526  * in and return -EINVAL if either are invalid.
527  *
528  * @funcprops \isr_ok
529  *
530  * @param dev Pointer to the device structure for the driver instance.
531  * @param channel Numeric identification of the channel to resume
532  *
533  * @retval 0 If successful.
534  * @retval -ENOSYS If not implemented
535  * @retval -EINVAL If invalid channel id or state.
536  * @retval -errno Other negative errno code failure.
537  */
538 __syscall int dma_resume(const struct device *dev, uint32_t channel);
539 
z_impl_dma_resume(const struct device * dev,uint32_t channel)540 static inline int z_impl_dma_resume(const struct device *dev, uint32_t channel)
541 {
542 	const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
543 
544 	if (api->resume == NULL) {
545 		return -ENOSYS;
546 	}
547 	return api->resume(dev, channel);
548 }
549 
550 /**
551  * @brief request DMA channel.
552  *
553  * request DMA channel resources
554  * return -EINVAL if there is no valid channel available.
555  *
556  * @note It is safe to use this function in contexts where blocking
557  * is not allowed, e.g. ISR, provided the implementation of the filter
558  * function does not block.
559  *
560  * @param dev Pointer to the device structure for the driver instance.
561  * @param filter_param filter function parameter
562  *
563  * @retval dma channel if successful.
564  * @retval Negative errno code if failure.
565  */
566 __syscall int dma_request_channel(const struct device *dev,
567 				  void *filter_param);
568 
z_impl_dma_request_channel(const struct device * dev,void * filter_param)569 static inline int z_impl_dma_request_channel(const struct device *dev,
570 					     void *filter_param)
571 {
572 	int i = 0;
573 	int channel = -EINVAL;
574 	const struct dma_driver_api *api =
575 		(const struct dma_driver_api *)dev->api;
576 	/* dma_context shall be the first one in dev data */
577 	struct dma_context *dma_ctx = (struct dma_context *)dev->data;
578 
579 	if (dma_ctx->magic != DMA_MAGIC) {
580 		return channel;
581 	}
582 
583 	for (i = 0; i < dma_ctx->dma_channels; i++) {
584 		if (!atomic_test_and_set_bit(dma_ctx->atomic, i)) {
585 			if (api->chan_filter &&
586 			    !api->chan_filter(dev, i, filter_param)) {
587 				atomic_clear_bit(dma_ctx->atomic, i);
588 				continue;
589 			}
590 			channel = i;
591 			break;
592 		}
593 	}
594 
595 	return channel;
596 }
597 
598 /**
599  * @brief release DMA channel.
600  *
601  * release DMA channel resources
602  *
603  * @note It is safe to use this function in contexts where blocking
604  * is not allowed, e.g. ISR, provided the implementation of the release
605  * function does not block.
606  *
607  * @param dev  Pointer to the device structure for the driver instance.
608  * @param channel  channel number
609  *
610  */
611 __syscall void dma_release_channel(const struct device *dev,
612 				   uint32_t channel);
613 
z_impl_dma_release_channel(const struct device * dev,uint32_t channel)614 static inline void z_impl_dma_release_channel(const struct device *dev,
615 					      uint32_t channel)
616 {
617 	const struct dma_driver_api *api =
618 		(const struct dma_driver_api *)dev->api;
619 	struct dma_context *dma_ctx = (struct dma_context *)dev->data;
620 
621 	if (dma_ctx->magic != DMA_MAGIC) {
622 		return;
623 	}
624 
625 	if ((int)channel < dma_ctx->dma_channels) {
626 		if (api->chan_release) {
627 			api->chan_release(dev, channel);
628 		}
629 
630 		atomic_clear_bit(dma_ctx->atomic, channel);
631 	}
632 
633 }
634 
635 /**
636  * @brief DMA channel filter.
637  *
638  * filter channel by attribute
639  *
640  * @param dev  Pointer to the device structure for the driver instance.
641  * @param channel  channel number
642  * @param filter_param filter attribute
643  *
644  * @retval Negative errno code if not support
645  *
646  */
647 __syscall int dma_chan_filter(const struct device *dev,
648 				   int channel, void *filter_param);
649 
z_impl_dma_chan_filter(const struct device * dev,int channel,void * filter_param)650 static inline int z_impl_dma_chan_filter(const struct device *dev,
651 					      int channel, void *filter_param)
652 {
653 	const struct dma_driver_api *api =
654 		(const struct dma_driver_api *)dev->api;
655 
656 	if (api->chan_filter) {
657 		return api->chan_filter(dev, channel, filter_param);
658 	}
659 
660 	return -ENOSYS;
661 }
662 
663 /**
664  * @brief get current runtime status of DMA transfer
665  *
666  * Implementations must check the validity of the channel ID passed in and
667  * return -EINVAL if it is invalid or -ENOSYS if not supported.
668  *
669  * @funcprops \isr_ok
670  *
671  * @param dev     Pointer to the device structure for the driver instance.
672  * @param channel Numeric identification of the channel where the transfer was
673  *                being processed
674  * @param stat   a non-NULL dma_status object for storing DMA status
675  *
676  * @retval non-negative if successful.
677  * @retval Negative errno code if failure.
678  */
dma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)679 static inline int dma_get_status(const struct device *dev, uint32_t channel,
680 				 struct dma_status *stat)
681 {
682 	const struct dma_driver_api *api =
683 		(const struct dma_driver_api *)dev->api;
684 
685 	if (api->get_status) {
686 		return api->get_status(dev, channel, stat);
687 	}
688 
689 	return -ENOSYS;
690 }
691 
692 /**
693  * @brief get attribute of a dma controller
694  *
695  * This function allows to get a device specific static or runtime attribute like required address
696  * and size alignment of a buffer.
697  * Implementations must check the validity of the type passed in and
698  * return -EINVAL if it is invalid or -ENOSYS if not supported.
699  *
700  * @funcprops \isr_ok
701  *
702  * @param dev     Pointer to the device structure for the driver instance.
703  * @param type    Numeric identification of the attribute
704  * @param value   A non-NULL pointer to the variable where the read value is to be placed
705  *
706  * @retval non-negative if successful.
707  * @retval Negative errno code if failure.
708  */
dma_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)709 static inline int dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
710 {
711 	const struct dma_driver_api *api = (const struct dma_driver_api *)dev->api;
712 
713 	if (api->get_attribute) {
714 		return api->get_attribute(dev, type, value);
715 	}
716 
717 	return -ENOSYS;
718 }
719 
720 /**
721  * @brief Look-up generic width index to be used in registers
722  *
723  * @warning This look-up works for most controllers, but *may* not work for
724  *          yours.  Ensure your controller expects the most common register
725  *          bit values before using this convenience function.  If your
726  *          controller does not support these values, you will have to write
727  *          your own look-up inside the controller driver.
728  *
729  * @param size: width of bus (in bytes)
730  *
731  * @retval common DMA index to be placed into registers.
732  */
dma_width_index(uint32_t size)733 static inline uint32_t dma_width_index(uint32_t size)
734 {
735 	/* Check boundaries (max supported width is 32 Bytes) */
736 	if (size < 1 || size > 32) {
737 		return 0; /* Zero is the default (8 Bytes) */
738 	}
739 
740 	/* Ensure size is a power of 2 */
741 	if (!is_power_of_two(size)) {
742 		return 0; /* Zero is the default (8 Bytes) */
743 	}
744 
745 	/* Convert to bit pattern for writing to a register */
746 	return find_msb_set(size);
747 }
748 
749 /**
750  * @brief Look-up generic burst index to be used in registers
751  *
752  * @warning This look-up works for most controllers, but *may* not work for
753  *          yours.  Ensure your controller expects the most common register
754  *          bit values before using this convenience function.  If your
755  *          controller does not support these values, you will have to write
756  *          your own look-up inside the controller driver.
757  *
758  * @param burst: number of bytes to be sent in a single burst
759  *
760  * @retval common DMA index to be placed into registers.
761  */
dma_burst_index(uint32_t burst)762 static inline uint32_t dma_burst_index(uint32_t burst)
763 {
764 	/* Check boundaries (max supported burst length is 256) */
765 	if (burst < 1 || burst > 256) {
766 		return 0; /* Zero is the default (1 burst length) */
767 	}
768 
769 	/* Ensure burst is a power of 2 */
770 	if (!(burst & (burst - 1))) {
771 		return 0; /* Zero is the default (1 burst length) */
772 	}
773 
774 	/* Convert to bit pattern for writing to a register */
775 	return find_msb_set(burst);
776 }
777 
778 /**
779  * @brief Get the device tree property describing the buffer address alignment
780  *
781  * Useful when statically defining or allocating buffers for DMA usage where
782  * memory alignment often matters.
783  *
784  * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
785  * @return alignment Memory byte alignment required for DMA buffers
786  */
787 #define DMA_BUF_ADDR_ALIGNMENT(node) DT_PROP(node, dma_buf_addr_alignment)
788 
789 /**
790  * @brief Get the device tree property describing the buffer size alignment
791  *
792  * Useful when statically defining or allocating buffers for DMA usage where
793  * memory alignment often matters.
794  *
795  * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
796  * @return alignment Memory byte alignment required for DMA buffers
797  */
798 #define DMA_BUF_SIZE_ALIGNMENT(node) DT_PROP(node, dma_buf_size_alignment)
799 
800 /**
801  * @brief Get the device tree property describing the minimal chunk of data possible to be copied
802  *
803  * @param node Node identifier, e.g. DT_NODELABEL(dma_0)
804  * @return minimal Minimal chunk of data possible to be copied
805  */
806 #define DMA_COPY_ALIGNMENT(node) DT_PROP(node, dma_copy_alignment)
807 
808 /**
809  * @}
810  */
811 
812 #ifdef __cplusplus
813 }
814 #endif
815 
816 #include <zephyr/syscalls/dma.h>
817 
818 #endif /* ZEPHYR_INCLUDE_DRIVERS_DMA_H_ */
819