1 // Copyright 2020 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #pragma once
16
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include "soc/cp_dma_struct.h"
24
25 #define CP_DMA_LL_EVENT_RX_DONE (1 << 0)
26 #define CP_DMA_LL_EVENT_RX_EOF (1 << 1)
27 #define CP_DMA_LL_EVENT_TX_DONE (1 << 2)
28 #define CP_DMA_LL_EVENT_TX_EOF (1 << 3)
29 #define CP_DMA_LL_EVENT_RX_DESC_ERR (1 << 4)
30 #define CP_DMA_LL_EVENT_TX_DESC_ERR (1 << 5)
31 #define CP_DMA_LL_EVENT_RX_DESC_EMPTY (1 << 6)
32 #define CP_DMA_LL_EVENT_TX_TOTAL_EOF (1 << 7)
33 #define CP_DMA_LL_EVENT_ALL (0xFF)
34
35 /**
36 * Copy DMA firstly reads data to be transferred from internal RAM,
37 * stores the data into DMA FIFO via an outlink,
38 * and then writes the data to the destination internal RAM via an inlink.
39 */
40
cp_dma_ll_reset_in_link(cp_dma_dev_t * dev)41 static inline void cp_dma_ll_reset_in_link(cp_dma_dev_t *dev)
42 {
43 dev->dma_conf.dma_in_rst = 1;
44 dev->dma_conf.dma_in_rst = 0;
45 }
46
cp_dma_ll_reset_out_link(cp_dma_dev_t * dev)47 static inline void cp_dma_ll_reset_out_link(cp_dma_dev_t *dev)
48 {
49 dev->dma_conf.dma_out_rst = 1;
50 dev->dma_conf.dma_out_rst = 0;
51 }
52
cp_dma_ll_reset_fifo(cp_dma_dev_t * dev)53 static inline void cp_dma_ll_reset_fifo(cp_dma_dev_t *dev)
54 {
55 dev->dma_conf.dma_fifo_rst = 1;
56 dev->dma_conf.dma_fifo_rst = 0;
57 }
58
cp_dma_ll_reset_cmd_fifo(cp_dma_dev_t * dev)59 static inline void cp_dma_ll_reset_cmd_fifo(cp_dma_dev_t *dev)
60 {
61 dev->dma_conf.dma_cmdfifo_rst = 1;
62 dev->dma_conf.dma_cmdfifo_rst = 0;
63 }
64
cp_dma_ll_enable_owner_check(cp_dma_dev_t * dev,bool enable)65 static inline void cp_dma_ll_enable_owner_check(cp_dma_dev_t *dev, bool enable)
66 {
67 dev->dma_conf.dma_check_owner = enable;
68 dev->dma_conf.dma_out_auto_wrback = 1;
69 dev->dma_conf.dma_out_owner = 0;
70 dev->dma_conf.dma_in_owner = 0;
71 }
72
cp_dma_ll_enable_clock(cp_dma_dev_t * dev,bool enable)73 static inline void cp_dma_ll_enable_clock(cp_dma_dev_t *dev, bool enable)
74 {
75 dev->dma_conf.dma_clk_en = enable;
76 }
77
cp_dma_ll_enable_intr(cp_dma_dev_t * dev,uint32_t mask,bool enable)78 static inline void cp_dma_ll_enable_intr(cp_dma_dev_t *dev, uint32_t mask, bool enable)
79 {
80 if (enable) {
81 dev->dma_int_ena.val |= mask;
82 } else {
83 dev->dma_int_ena.val &= ~mask;
84 }
85 }
86
cp_dma_ll_get_intr_status(cp_dma_dev_t * dev)87 static inline __attribute__((always_inline)) uint32_t cp_dma_ll_get_intr_status(cp_dma_dev_t *dev)
88 {
89 return dev->dma_int_st.val;
90 }
91
cp_dma_ll_clear_intr_status(cp_dma_dev_t * dev,uint32_t mask)92 static inline __attribute__((always_inline)) void cp_dma_ll_clear_intr_status(cp_dma_dev_t *dev, uint32_t mask)
93 {
94 dev->dma_int_clr.val = mask;
95 }
96
cp_dma_ll_tx_set_descriptor_base_addr(cp_dma_dev_t * dev,uint32_t address)97 static inline void cp_dma_ll_tx_set_descriptor_base_addr(cp_dma_dev_t *dev, uint32_t address)
98 {
99 dev->dma_out_link.dma_outlink_addr = address;
100 }
101
cp_dma_ll_rx_set_descriptor_base_addr(cp_dma_dev_t * dev,uint32_t address)102 static inline void cp_dma_ll_rx_set_descriptor_base_addr(cp_dma_dev_t *dev, uint32_t address)
103 {
104 dev->dma_in_link.dma_inlink_addr = address;
105 }
106
cp_dma_ll_start_tx(cp_dma_dev_t * dev,bool enable)107 static inline void cp_dma_ll_start_tx(cp_dma_dev_t *dev, bool enable)
108 {
109 if (enable) {
110 dev->dma_out_link.dma_outlink_start = 1; // cleared automatically by HW
111 } else {
112 dev->dma_out_link.dma_outlink_stop = 1; // cleared automatically by HW
113 }
114 }
115
cp_dma_ll_start_rx(cp_dma_dev_t * dev,bool enable)116 static inline void cp_dma_ll_start_rx(cp_dma_dev_t *dev, bool enable)
117 {
118 if (enable) {
119 dev->dma_in_link.dma_inlink_start = 1; // cleared automatically by HW
120 } else {
121 dev->dma_in_link.dma_inlink_stop = 1; // cleared automatically by HW
122 }
123 }
124
cp_dma_ll_restart_tx(cp_dma_dev_t * dev)125 static inline void cp_dma_ll_restart_tx(cp_dma_dev_t *dev)
126 {
127 dev->dma_out_link.dma_outlink_restart = 1; // cleared automatically by HW
128 }
129
cp_dma_ll_restart_rx(cp_dma_dev_t * dev)130 static inline void cp_dma_ll_restart_rx(cp_dma_dev_t *dev)
131 {
132 dev->dma_in_link.dma_inlink_restart = 1; // cleared automatically by HW
133 }
134
135 // get the address of last rx descriptor
cp_dma_ll_get_rx_eof_descriptor_address(cp_dma_dev_t * dev)136 static inline uint32_t cp_dma_ll_get_rx_eof_descriptor_address(cp_dma_dev_t *dev)
137 {
138 return dev->dma_in_eof_des_addr.dma_in_suc_eof_des_addr;
139 }
140
141 // get the address of last tx descriptor
cp_dma_ll_get_tx_eof_descriptor_address(cp_dma_dev_t * dev)142 static inline uint32_t cp_dma_ll_get_tx_eof_descriptor_address(cp_dma_dev_t *dev)
143 {
144 return dev->dma_out_eof_des_addr.dma_out_eof_des_addr;
145 }
146
cp_dma_ll_get_tx_status(cp_dma_dev_t * dev)147 static inline uint32_t cp_dma_ll_get_tx_status(cp_dma_dev_t *dev)
148 {
149 return dev->dma_out_st.val;
150 }
151
cp_dma_ll_get_rx_status(cp_dma_dev_t * dev)152 static inline uint32_t cp_dma_ll_get_rx_status(cp_dma_dev_t *dev)
153 {
154 return dev->dma_in_st.val;
155 }
156
157 #ifdef __cplusplus
158 }
159 #endif
160