1 // Copyright 2020 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "freertos/FreeRTOS.h"
15 #include "freertos/semphr.h"
16 #include "hal/dma_types.h"
17 #include "esp_compiler.h"
18 #include "esp_heap_caps.h"
19 #include "esp_log.h"
20 #include "esp_async_memcpy.h"
21 #include "esp_async_memcpy_impl.h"
22
23 static const char *TAG = "async_memcpy";
24
25 #define ASMCP_CHECK(a, msg, tag, ret, ...) \
26 do \
27 { \
28 if (unlikely(!(a))) \
29 { \
30 ESP_LOGE(TAG, "%s(%d): " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
31 ret_code = ret; \
32 goto tag; \
33 } \
34 } while (0)
35
36 /**
37 * @brief Type of async mcp stream
38 * mcp stream inherits DMA descriptor, besides that, it has a callback function member
39 */
40 typedef struct {
41 dma_descriptor_t desc;
42 async_memcpy_isr_cb_t cb;
43 void *cb_args;
44 } async_memcpy_stream_t;
45
46 /**
47 * @brief Type of async mcp driver context
48 */
49 typedef struct async_memcpy_context_t {
50 async_memcpy_impl_t mcp_impl; // implementation layer
51 portMUX_TYPE spinlock; // spinlock, prevent operating descriptors concurrently
52 intr_handle_t intr_hdl; // interrupt handle
53 uint32_t flags; // extra driver flags
54 dma_descriptor_t *tx_desc; // pointer to the next free TX descriptor
55 dma_descriptor_t *rx_desc; // pointer to the next free RX descriptor
56 dma_descriptor_t *next_rx_desc_to_check; // pointer to the next RX descriptor to recycle
57 uint32_t max_stream_num; // maximum number of streams
58 async_memcpy_stream_t *out_streams; // pointer to the first TX stream
59 async_memcpy_stream_t *in_streams; // pointer to the first RX stream
60 async_memcpy_stream_t streams_pool[0]; // stream pool (TX + RX), the size is configured during driver installation
61 } async_memcpy_context_t;
62
esp_async_memcpy_install(const async_memcpy_config_t * config,async_memcpy_t * asmcp)63 esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_memcpy_t *asmcp)
64 {
65 esp_err_t ret_code = ESP_OK;
66 async_memcpy_context_t *mcp_hdl = NULL;
67
68 ASMCP_CHECK(config, "configuration can't be null", err, ESP_ERR_INVALID_ARG);
69 ASMCP_CHECK(asmcp, "can't assign mcp handle to null", err, ESP_ERR_INVALID_ARG);
70
71 // context memory size + stream pool size
72 size_t total_malloc_size = sizeof(async_memcpy_context_t) + sizeof(async_memcpy_stream_t) * config->backlog * 2;
73 // to work when cache is disabled, the driver handle should located in SRAM
74 mcp_hdl = heap_caps_calloc(1, total_malloc_size, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
75 ASMCP_CHECK(mcp_hdl, "allocate context memory failed", err, ESP_ERR_NO_MEM);
76
77 mcp_hdl->flags = config->flags;
78 mcp_hdl->out_streams = mcp_hdl->streams_pool;
79 mcp_hdl->in_streams = mcp_hdl->streams_pool + config->backlog;
80 mcp_hdl->max_stream_num = config->backlog;
81
82 // circle TX/RX descriptors
83 for (size_t i = 0; i < mcp_hdl->max_stream_num; i++) {
84 mcp_hdl->out_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
85 mcp_hdl->out_streams[i].desc.next = &mcp_hdl->out_streams[i + 1].desc;
86 mcp_hdl->in_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
87 mcp_hdl->in_streams[i].desc.next = &mcp_hdl->in_streams[i + 1].desc;
88 }
89 mcp_hdl->out_streams[mcp_hdl->max_stream_num - 1].desc.next = &mcp_hdl->out_streams[0].desc;
90 mcp_hdl->in_streams[mcp_hdl->max_stream_num - 1].desc.next = &mcp_hdl->in_streams[0].desc;
91
92 mcp_hdl->tx_desc = &mcp_hdl->out_streams[0].desc;
93 mcp_hdl->rx_desc = &mcp_hdl->in_streams[0].desc;
94 mcp_hdl->next_rx_desc_to_check = &mcp_hdl->in_streams[0].desc;
95 mcp_hdl->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
96
97 // initialize implementation layer
98 async_memcpy_impl_init(&mcp_hdl->mcp_impl);
99
100 *asmcp = mcp_hdl;
101
102 async_memcpy_impl_start(&mcp_hdl->mcp_impl, (intptr_t)&mcp_hdl->out_streams[0].desc, (intptr_t)&mcp_hdl->in_streams[0].desc);
103
104 return ESP_OK;
105 err:
106 if (mcp_hdl) {
107 free(mcp_hdl);
108 }
109 if (asmcp) {
110 *asmcp = NULL;
111 }
112 return ret_code;
113 }
114
esp_async_memcpy_uninstall(async_memcpy_t asmcp)115 esp_err_t esp_async_memcpy_uninstall(async_memcpy_t asmcp)
116 {
117 esp_err_t ret_code = ESP_OK;
118 ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
119
120 async_memcpy_impl_stop(&asmcp->mcp_impl);
121 async_memcpy_impl_deinit(&asmcp->mcp_impl);
122 free(asmcp);
123 return ESP_OK;
124 err:
125 return ret_code;
126 }
127
async_memcpy_prepare_receive(async_memcpy_t asmcp,void * buffer,size_t size,dma_descriptor_t ** start_desc,dma_descriptor_t ** end_desc)128 static int async_memcpy_prepare_receive(async_memcpy_t asmcp, void *buffer, size_t size, dma_descriptor_t **start_desc, dma_descriptor_t **end_desc)
129 {
130 uint32_t prepared_length = 0;
131 uint8_t *buf = (uint8_t *)buffer;
132 dma_descriptor_t *desc = asmcp->rx_desc; // descriptor iterator
133 dma_descriptor_t *start = desc;
134 dma_descriptor_t *end = desc;
135
136 while (size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
137 if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
138 desc->dw0.suc_eof = 0;
139 desc->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
140 desc->buffer = &buf[prepared_length];
141 desc = desc->next; // move to next descriptor
142 prepared_length += DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
143 size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
144 } else {
145 // out of RX descriptors
146 goto _exit;
147 }
148 }
149 if (size) {
150 if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
151 end = desc; // the last descriptor used
152 desc->dw0.suc_eof = 0;
153 desc->dw0.size = size;
154 desc->buffer = &buf[prepared_length];
155 desc = desc->next; // move to next descriptor
156 prepared_length += size;
157 } else {
158 // out of RX descriptors
159 goto _exit;
160 }
161 }
162
163 _exit:
164 *start_desc = start;
165 *end_desc = end;
166 return prepared_length;
167 }
168
async_memcpy_prepare_transmit(async_memcpy_t asmcp,void * buffer,size_t len,dma_descriptor_t ** start_desc,dma_descriptor_t ** end_desc)169 static int async_memcpy_prepare_transmit(async_memcpy_t asmcp, void *buffer, size_t len, dma_descriptor_t **start_desc, dma_descriptor_t **end_desc)
170 {
171 uint32_t prepared_length = 0;
172 uint8_t *buf = (uint8_t *)buffer;
173 dma_descriptor_t *desc = asmcp->tx_desc; // descriptor iterator
174 dma_descriptor_t *start = desc;
175 dma_descriptor_t *end = desc;
176
177 while (len > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
178 if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
179 desc->dw0.suc_eof = 0; // not the end of the transaction
180 desc->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
181 desc->dw0.length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
182 desc->buffer = &buf[prepared_length];
183 desc = desc->next; // move to next descriptor
184 prepared_length += DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
185 len -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
186 } else {
187 // out of TX descriptors
188 goto _exit;
189 }
190 }
191 if (len) {
192 if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
193 end = desc; // the last descriptor used
194 desc->dw0.suc_eof = 1; // end of the transaction
195 desc->dw0.size = len;
196 desc->dw0.length = len;
197 desc->buffer = &buf[prepared_length];
198 desc = desc->next; // move to next descriptor
199 prepared_length += len;
200 } else {
201 // out of TX descriptors
202 goto _exit;
203 }
204 }
205
206 *start_desc = start;
207 *end_desc = end;
208 _exit:
209 return prepared_length;
210 }
211
async_memcpy_get_next_rx_descriptor(async_memcpy_t asmcp,dma_descriptor_t * eof_desc,dma_descriptor_t ** next_desc)212 static bool async_memcpy_get_next_rx_descriptor(async_memcpy_t asmcp, dma_descriptor_t *eof_desc, dma_descriptor_t **next_desc)
213 {
214 dma_descriptor_t *next = asmcp->next_rx_desc_to_check;
215 // additional check, to avoid potential interrupt got triggered by mistake
216 if (next->dw0.owner == DMA_DESCRIPTOR_BUFFER_OWNER_CPU) {
217 asmcp->next_rx_desc_to_check = asmcp->next_rx_desc_to_check->next;
218 *next_desc = next;
219 // return if we need to continue
220 return eof_desc == next ? false : true;
221 }
222
223 *next_desc = NULL;
224 return false;
225 }
226
esp_async_memcpy(async_memcpy_t asmcp,void * dst,void * src,size_t n,async_memcpy_isr_cb_t cb_isr,void * cb_args)227 esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n, async_memcpy_isr_cb_t cb_isr, void *cb_args)
228 {
229 esp_err_t ret_code = ESP_OK;
230 dma_descriptor_t *rx_start_desc = NULL;
231 dma_descriptor_t *rx_end_desc = NULL;
232 dma_descriptor_t *tx_start_desc = NULL;
233 dma_descriptor_t *tx_end_desc = NULL;
234 size_t rx_prepared_size = 0;
235 size_t tx_prepared_size = 0;
236 ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
237 ASMCP_CHECK(async_memcpy_impl_is_buffer_address_valid(&asmcp->mcp_impl, src, dst), "buffer address not valid", err, ESP_ERR_INVALID_ARG);
238 ASMCP_CHECK(n <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE * asmcp->max_stream_num, "buffer size too large", err, ESP_ERR_INVALID_ARG);
239
240 // Prepare TX and RX descriptor
241 portENTER_CRITICAL_SAFE(&asmcp->spinlock);
242 rx_prepared_size = async_memcpy_prepare_receive(asmcp, dst, n, &rx_start_desc, &rx_end_desc);
243 tx_prepared_size = async_memcpy_prepare_transmit(asmcp, src, n, &tx_start_desc, &tx_end_desc);
244 if ((rx_prepared_size == n) && (tx_prepared_size == n)) {
245 // register user callback to the last descriptor
246 async_memcpy_stream_t *mcp_stream = __containerof(rx_end_desc, async_memcpy_stream_t, desc);
247 mcp_stream->cb = cb_isr;
248 mcp_stream->cb_args = cb_args;
249 // restart RX firstly
250 dma_descriptor_t *desc = rx_start_desc;
251 while (desc != rx_end_desc) {
252 desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
253 desc = desc->next;
254 }
255 desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
256 asmcp->rx_desc = desc->next;
257 // restart TX secondly
258 desc = tx_start_desc;
259 while (desc != tx_end_desc) {
260 desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
261 desc = desc->next;
262 }
263 desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
264 asmcp->tx_desc = desc->next;
265 async_memcpy_impl_restart(&asmcp->mcp_impl);
266 }
267 portEXIT_CRITICAL_SAFE(&asmcp->spinlock);
268
269 // It's unlikely that we have space for rx descriptor but no space for tx descriptor
270 // Both tx and rx descriptor should move in the same pace
271 ASMCP_CHECK(rx_prepared_size == n, "out of rx descriptor", err, ESP_FAIL);
272 ASMCP_CHECK(tx_prepared_size == n, "out of tx descriptor", err, ESP_FAIL);
273
274 return ESP_OK;
275 err:
276 return ret_code;
277 }
278
async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t * impl)279 IRAM_ATTR void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl)
280 {
281 bool to_continue = false;
282 async_memcpy_stream_t *in_stream = NULL;
283 dma_descriptor_t *next_desc = NULL;
284 async_memcpy_context_t *asmcp = __containerof(impl, async_memcpy_context_t, mcp_impl);
285
286 // get the RX eof descriptor address
287 dma_descriptor_t *eof = (dma_descriptor_t *)impl->rx_eof_addr;
288 // traversal all unchecked descriptors
289 do {
290 portENTER_CRITICAL_ISR(&asmcp->spinlock);
291 // There is an assumption that the usage of rx descriptors are in the same pace as tx descriptors (this is determined by M2M DMA working mechanism)
292 // And once the rx descriptor is recycled, the corresponding tx desc is guaranteed to be returned by DMA
293 to_continue = async_memcpy_get_next_rx_descriptor(asmcp, eof, &next_desc);
294 portEXIT_CRITICAL_ISR(&asmcp->spinlock);
295 if (next_desc) {
296 in_stream = __containerof(next_desc, async_memcpy_stream_t, desc);
297 // invoke user registered callback if available
298 if (in_stream->cb) {
299 async_memcpy_event_t e = {0};
300 if (in_stream->cb(asmcp, &e, in_stream->cb_args)) {
301 impl->isr_need_yield = true;
302 }
303 in_stream->cb = NULL;
304 in_stream->cb_args = NULL;
305 }
306 }
307 } while (to_continue);
308 }
309