1 /**
2 * @file
3 *
4 * @brief Public APIs for the DMA drivers.
5 */
6
7 /*
8 * Copyright (c) 2016 Intel Corporation
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 */
12
13 #ifndef ZEPHYR_INCLUDE_DRIVERS_DMA_H_
14 #define ZEPHYR_INCLUDE_DRIVERS_DMA_H_
15
16 #include <kernel.h>
17 #include <device.h>
18
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22
23
24 /**
25 * @brief DMA Interface
26 * @defgroup dma_interface DMA Interface
27 * @ingroup io_interfaces
28 * @{
29 */
30
31 enum dma_channel_direction {
32 MEMORY_TO_MEMORY = 0x0,
33 MEMORY_TO_PERIPHERAL,
34 PERIPHERAL_TO_MEMORY,
35 PERIPHERAL_TO_PERIPHERAL /*only supported in NXP EDMA*/
36 };
37
38 /** Valid values for @a source_addr_adj and @a dest_addr_adj */
39 enum dma_addr_adj {
40 DMA_ADDR_ADJ_INCREMENT,
41 DMA_ADDR_ADJ_DECREMENT,
42 DMA_ADDR_ADJ_NO_CHANGE,
43 };
44
45 /* channel attributes */
46 enum dma_channel_filter {
47 DMA_CHANNEL_NORMAL, /* normal DMA channel */
48 DMA_CHANNEL_PERIODIC, /* can be triggerred by periodic sources */
49 };
50
51 /**
52 * @struct dma_block_config
53 * @brief DMA block configuration structure.
54 *
55 * @param source_address is block starting address at source
56 * @param source_gather_interval is the address adjustment at gather boundary
57 * @param dest_address is block starting address at destination
58 * @param dest_scatter_interval is the address adjustment at scatter boundary
59 * @param dest_scatter_count is the continuous transfer count between scatter
60 * boundaries
61 * @param source_gather_count is the continuous transfer count between gather
62 * boundaries
63 *
64 * @param block_size is the number of bytes to be transferred for this block.
65 *
66 * @param config is a bit field with the following parts:
67 *
68 * source_gather_en [ 0 ] - 0-disable, 1-enable.
69 * dest_scatter_en [ 1 ] - 0-disable, 1-enable.
70 * source_addr_adj [ 2 : 3 ] - 00-increment, 01-decrement,
71 * 10-no change.
72 * dest_addr_adj [ 4 : 5 ] - 00-increment, 01-decrement,
73 * 10-no change.
74 * source_reload_en [ 6 ] - reload source address at the end of
75 * block transfer
76 * 0-disable, 1-enable.
77 * dest_reload_en [ 7 ] - reload destination address at the end
78 * of block transfer
79 * 0-disable, 1-enable.
80 * fifo_mode_control [ 8 : 11 ] - How full of the fifo before transfer
81 * start. HW specific.
82 * flow_control_mode [ 12 ] - 0-source request served upon data
83 * availability.
84 * 1-source request postponed until
85 * destination request happens.
86 * reserved [ 13 : 15 ]
87 */
88 struct dma_block_config {
89 #ifdef CONFIG_DMA_64BIT
90 uint64_t source_address;
91 uint64_t dest_address;
92 #else
93 uint32_t source_address;
94 uint32_t dest_address;
95 #endif
96 uint32_t source_gather_interval;
97 uint32_t dest_scatter_interval;
98 uint16_t dest_scatter_count;
99 uint16_t source_gather_count;
100 uint32_t block_size;
101 struct dma_block_config *next_block;
102 uint16_t source_gather_en : 1;
103 uint16_t dest_scatter_en : 1;
104 uint16_t source_addr_adj : 2;
105 uint16_t dest_addr_adj : 2;
106 uint16_t source_reload_en : 1;
107 uint16_t dest_reload_en : 1;
108 uint16_t fifo_mode_control : 4;
109 uint16_t flow_control_mode : 1;
110 uint16_t reserved : 3;
111 };
112
113 /**
114 * @typedef dma_callback_t
115 * @brief Callback function for DMA transfer completion
116 *
117 * If enabled, callback function will be invoked at transfer completion
118 * or when error happens.
119 *
120 * @param dev Pointer to the DMA device calling the callback.
121 * @param user_data A pointer to some user data or NULL
122 * @param channel The channel number
123 * @param status 0 on success, a negative errno otherwise
124 */
125 typedef void (*dma_callback_t)(const struct device *dev, void *user_data,
126 uint32_t channel, int status);
127
128 /**
129 * @struct dma_config
130 * @brief DMA configuration structure.
131 *
132 * @param dma_slot [ 0 : 6 ] - which peripheral and direction
133 * (HW specific)
134 * @param channel_direction [ 7 : 9 ] - 000-memory to memory,
135 * 001-memory to peripheral,
136 * 010-peripheral to memory,
137 * 011-peripheral to peripheral,
138 * ...
139 * @param complete_callback_en [ 10 ] - 0-callback invoked at completion only
140 * 1-callback invoked at completion of
141 * each block
142 * @param error_callback_en [ 11 ] - 0-error callback enabled
143 * 1-error callback disabled
144 * @param source_handshake [ 12 ] - 0-HW, 1-SW
145 * @param dest_handshake [ 13 ] - 0-HW, 1-SW
146 * @param channel_priority [ 14 : 17 ] - DMA channel priority
147 * @param source_chaining_en [ 18 ] - enable/disable source block chaining
148 * 0-disable, 1-enable
149 * @param dest_chaining_en [ 19 ] - enable/disable destination block
150 * chaining.
151 * 0-disable, 1-enable
152 * @param linked_channel [ 20 : 26 ] - after channel count exhaust will
153 * initiate a channel service request
154 * at this channel
155 * @param reserved [ 27 : 31 ]
156 * @param source_data_size [ 0 : 15 ] - width of source data (in bytes)
157 * @param dest_data_size [ 16 : 31 ] - width of dest data (in bytes)
158 * @param source_burst_length [ 0 : 15 ] - number of source data units
159 * @param dest_burst_length [ 16 : 31 ] - number of destination data units
160 * @param block_count is the number of blocks used for block chaining, this
161 * depends on availability of the DMA controller.
162 * @param user_data private data from DMA client.
163 * @param dma_callback see dma_callback_t for details
164 */
165 struct dma_config {
166 uint32_t dma_slot : 7;
167 uint32_t channel_direction : 3;
168 uint32_t complete_callback_en : 1;
169 uint32_t error_callback_en : 1;
170 uint32_t source_handshake : 1;
171 uint32_t dest_handshake : 1;
172 uint32_t channel_priority : 4;
173 uint32_t source_chaining_en : 1;
174 uint32_t dest_chaining_en : 1;
175 uint32_t linked_channel : 7;
176 uint32_t reserved : 5;
177 uint32_t source_data_size : 16;
178 uint32_t dest_data_size : 16;
179 uint32_t source_burst_length : 16;
180 uint32_t dest_burst_length : 16;
181 uint32_t block_count;
182 struct dma_block_config *head_block;
183 void *user_data;
184 dma_callback_t dma_callback;
185 };
186
187 /**
188 * DMA runtime status structure
189 *
190 * busy - is current DMA transfer busy or idle
191 * dir - DMA transfer direction
192 * pending_length - data length pending to be transferred in bytes
193 * or platform dependent.
194 *
195 */
196 struct dma_status {
197 bool busy;
198 enum dma_channel_direction dir;
199 uint32_t pending_length;
200 };
201
202 /**
203 * DMA context structure
204 * Note: the dma_context shall be the first member
205 * of DMA client driver Data, got by dev->data
206 *
207 * magic - magic code to identify the context
208 * dma_channels - dma channels
209 * atomic - driver atomic_t pointer
210 *
211 */
212 struct dma_context {
213 int32_t magic;
214 int dma_channels;
215 atomic_t *atomic;
216 };
217
218 /* magic code to identify context content */
219 #define DMA_MAGIC 0x47494749
220
221 /**
222 * @cond INTERNAL_HIDDEN
223 *
224 * These are for internal use only, so skip these in
225 * public documentation.
226 */
227 typedef int (*dma_api_config)(const struct device *dev, uint32_t channel,
228 struct dma_config *config);
229
230 #ifdef CONFIG_DMA_64BIT
231 typedef int (*dma_api_reload)(const struct device *dev, uint32_t channel,
232 uint64_t src, uint64_t dst, size_t size);
233 #else
234 typedef int (*dma_api_reload)(const struct device *dev, uint32_t channel,
235 uint32_t src, uint32_t dst, size_t size);
236 #endif
237
238 typedef int (*dma_api_start)(const struct device *dev, uint32_t channel);
239
240 typedef int (*dma_api_stop)(const struct device *dev, uint32_t channel);
241
242 typedef int (*dma_api_get_status)(const struct device *dev, uint32_t channel,
243 struct dma_status *status);
244
245 /**
246 * @typedef dma_chan_filter
247 * @brief channel filter function call
248 *
249 * filter function that is used to find the matched internal dma channel
250 * provide by caller
251 *
252 * @param dev Pointer to the DMA device instance
253 * @param channel the channel id to use
254 * @param filter_param filter function parameter, can be NULL
255 *
256 * @retval True on filter matched otherwise return False.
257 */
258 typedef bool (*dma_api_chan_filter)(const struct device *dev,
259 int channel, void *filter_param);
260
261 __subsystem struct dma_driver_api {
262 dma_api_config config;
263 dma_api_reload reload;
264 dma_api_start start;
265 dma_api_stop stop;
266 dma_api_get_status get_status;
267 dma_api_chan_filter chan_filter;
268 };
269 /**
270 * @endcond
271 */
272
273 /**
274 * @brief Configure individual channel for DMA transfer.
275 *
276 * @param dev Pointer to the device structure for the driver instance.
277 * @param channel Numeric identification of the channel to configure
278 * @param config Data structure containing the intended configuration for the
279 * selected channel
280 *
281 * @retval 0 if successful.
282 * @retval Negative errno code if failure.
283 */
dma_config(const struct device * dev,uint32_t channel,struct dma_config * config)284 static inline int dma_config(const struct device *dev, uint32_t channel,
285 struct dma_config *config)
286 {
287 const struct dma_driver_api *api =
288 (const struct dma_driver_api *)dev->api;
289
290 return api->config(dev, channel, config);
291 }
292
293 /**
294 * @brief Reload buffer(s) for a DMA channel
295 *
296 * @param dev Pointer to the device structure for the driver instance.
297 * @param channel Numeric identification of the channel to configure
298 * selected channel
299 * @param src source address for the DMA transfer
300 * @param dst destination address for the DMA transfer
301 * @param size size of DMA transfer
302 *
303 * @retval 0 if successful.
304 * @retval Negative errno code if failure.
305 */
306 #ifdef CONFIG_DMA_64BIT
dma_reload(const struct device * dev,uint32_t channel,uint64_t src,uint64_t dst,size_t size)307 static inline int dma_reload(const struct device *dev, uint32_t channel,
308 uint64_t src, uint64_t dst, size_t size)
309 #else
310 static inline int dma_reload(const struct device *dev, uint32_t channel,
311 uint32_t src, uint32_t dst, size_t size)
312 #endif
313 {
314 const struct dma_driver_api *api =
315 (const struct dma_driver_api *)dev->api;
316
317 if (api->reload) {
318 return api->reload(dev, channel, src, dst, size);
319 }
320
321 return -ENOSYS;
322 }
323
324 /**
325 * @brief Enables DMA channel and starts the transfer, the channel must be
326 * configured beforehand.
327 *
328 * Implementations must check the validity of the channel ID passed in and
329 * return -EINVAL if it is invalid.
330 *
331 * @param dev Pointer to the device structure for the driver instance.
332 * @param channel Numeric identification of the channel where the transfer will
333 * be processed
334 *
335 * @retval 0 if successful.
336 * @retval Negative errno code if failure.
337 */
338 __syscall int dma_start(const struct device *dev, uint32_t channel);
339
z_impl_dma_start(const struct device * dev,uint32_t channel)340 static inline int z_impl_dma_start(const struct device *dev, uint32_t channel)
341 {
342 const struct dma_driver_api *api =
343 (const struct dma_driver_api *)dev->api;
344
345 return api->start(dev, channel);
346 }
347
348 /**
349 * @brief Stops the DMA transfer and disables the channel.
350 *
351 * Implementations must check the validity of the channel ID passed in and
352 * return -EINVAL if it is invalid.
353 *
354 * @param dev Pointer to the device structure for the driver instance.
355 * @param channel Numeric identification of the channel where the transfer was
356 * being processed
357 *
358 * @retval 0 if successful.
359 * @retval Negative errno code if failure.
360 */
361 __syscall int dma_stop(const struct device *dev, uint32_t channel);
362
z_impl_dma_stop(const struct device * dev,uint32_t channel)363 static inline int z_impl_dma_stop(const struct device *dev, uint32_t channel)
364 {
365 const struct dma_driver_api *api =
366 (const struct dma_driver_api *)dev->api;
367
368 return api->stop(dev, channel);
369 }
370
371 /**
372 * @brief request DMA channel.
373 *
374 * request DMA channel resources
375 * return -EINVAL if there is no valid channel available.
376 *
377 * @param dev Pointer to the device structure for the driver instance.
378 * @param filter_param filter function parameter
379 *
380 * @retval dma channel if successful.
381 * @retval Negative errno code if failure.
382 */
383 __syscall int dma_request_channel(const struct device *dev,
384 void *filter_param);
385
z_impl_dma_request_channel(const struct device * dev,void * filter_param)386 static inline int z_impl_dma_request_channel(const struct device *dev,
387 void *filter_param)
388 {
389 int i = 0;
390 int channel = -EINVAL;
391 const struct dma_driver_api *api =
392 (const struct dma_driver_api *)dev->api;
393 /* dma_context shall be the first one in dev data */
394 struct dma_context *dma_ctx = (struct dma_context *)dev->data;
395
396 if (dma_ctx->magic != DMA_MAGIC) {
397 return channel;
398 }
399
400 for (i = 0; i < dma_ctx->dma_channels; i++) {
401 if (!atomic_test_and_set_bit(dma_ctx->atomic, i)) {
402 channel = i;
403 if (api->chan_filter &&
404 !api->chan_filter(dev, channel, filter_param)) {
405 atomic_clear_bit(dma_ctx->atomic, channel);
406 continue;
407 }
408 break;
409 }
410 }
411
412 return channel;
413 }
414
415 /**
416 * @brief release DMA channel.
417 *
418 * release DMA channel resources
419 *
420 * @param dev Pointer to the device structure for the driver instance.
421 * @param channel channel number
422 *
423 */
424 __syscall void dma_release_channel(const struct device *dev,
425 uint32_t channel);
426
z_impl_dma_release_channel(const struct device * dev,uint32_t channel)427 static inline void z_impl_dma_release_channel(const struct device *dev,
428 uint32_t channel)
429 {
430 struct dma_context *dma_ctx = (struct dma_context *)dev->data;
431
432 if (dma_ctx->magic != DMA_MAGIC) {
433 return;
434 }
435
436 if (channel < dma_ctx->dma_channels) {
437 atomic_clear_bit(dma_ctx->atomic, channel);
438 }
439
440 }
441
442 /**
443 * @brief DMA channel filter.
444 *
445 * filter channel by attribute
446 *
447 * @param dev Pointer to the device structure for the driver instance.
448 * @param channel channel number
449 * @param filter_param filter attribute
450 *
451 * @retval Negative errno code if not support
452 *
453 */
454 __syscall int dma_chan_filter(const struct device *dev,
455 int channel, void *filter_param);
456
z_impl_dma_chan_filter(const struct device * dev,int channel,void * filter_param)457 static inline int z_impl_dma_chan_filter(const struct device *dev,
458 int channel, void *filter_param)
459 {
460 const struct dma_driver_api *api =
461 (const struct dma_driver_api *)dev->api;
462
463 if (api->chan_filter) {
464 return api->chan_filter(dev, channel, filter_param);
465 }
466
467 return -ENOSYS;
468 }
469
470 /**
471 * @brief get current runtime status of DMA transfer
472 *
473 * Implementations must check the validity of the channel ID passed in and
474 * return -EINVAL if it is invalid or -ENOSYS if not supported.
475 *
476 * @param dev Pointer to the device structure for the driver instance.
477 * @param channel Numeric identification of the channel where the transfer was
478 * being processed
479 * @param stat a non-NULL dma_status object for storing DMA status
480 *
481 * @retval non-negative if successful.
482 * @retval Negative errno code if failure.
483 */
dma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)484 static inline int dma_get_status(const struct device *dev, uint32_t channel,
485 struct dma_status *stat)
486 {
487 const struct dma_driver_api *api =
488 (const struct dma_driver_api *)dev->api;
489
490 if (api->get_status) {
491 return api->get_status(dev, channel, stat);
492 }
493
494 return -ENOSYS;
495 }
496
497 /**
498 * @brief Look-up generic width index to be used in registers
499 *
500 * WARNING: This look-up works for most controllers, but *may* not work for
501 * yours. Ensure your controller expects the most common register
502 * bit values before using this convenience function. If your
503 * controller does not support these values, you will have to write
504 * your own look-up inside the controller driver.
505 *
506 * @param size: width of bus (in bytes)
507 *
508 * @retval common DMA index to be placed into registers.
509 */
dma_width_index(uint32_t size)510 static inline uint32_t dma_width_index(uint32_t size)
511 {
512 /* Check boundaries (max supported width is 32 Bytes) */
513 if (size < 1 || size > 32) {
514 return 0; /* Zero is the default (8 Bytes) */
515 }
516
517 /* Ensure size is a power of 2 */
518 if (!is_power_of_two(size)) {
519 return 0; /* Zero is the default (8 Bytes) */
520 }
521
522 /* Convert to bit pattern for writing to a register */
523 return find_msb_set(size);
524 }
525
526 /**
527 * @brief Look-up generic burst index to be used in registers
528 *
529 * WARNING: This look-up works for most controllers, but *may* not work for
530 * yours. Ensure your controller expects the most common register
531 * bit values before using this convenience function. If your
532 * controller does not support these values, you will have to write
533 * your own look-up inside the controller driver.
534 *
535 * @param burst: number of bytes to be sent in a single burst
536 *
537 * @retval common DMA index to be placed into registers.
538 */
dma_burst_index(uint32_t burst)539 static inline uint32_t dma_burst_index(uint32_t burst)
540 {
541 /* Check boundaries (max supported burst length is 256) */
542 if (burst < 1 || burst > 256) {
543 return 0; /* Zero is the default (1 burst length) */
544 }
545
546 /* Ensure burst is a power of 2 */
547 if (!(burst & (burst - 1))) {
548 return 0; /* Zero is the default (1 burst length) */
549 }
550
551 /* Convert to bit pattern for writing to a register */
552 return find_msb_set(burst);
553 }
554
555 /**
556 * @}
557 */
558
559 #ifdef __cplusplus
560 }
561 #endif
562
563 #include <syscalls/dma.h>
564
565 #endif /* ZEPHYR_INCLUDE_DRIVERS_DMA_H_ */
566