1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 // The HAL layer for SDIO slave (common part)
8 
9 #include <string.h>
10 #include "soc/slc_struct.h"
11 #include "soc/hinf_struct.h"
12 #include "hal/sdio_slave_types.h"
13 #include "soc/host_struct.h"
14 #include "hal/sdio_slave_hal.h"
15 #include "hal/assert.h"
16 #include "hal/log.h"
17 #include "esp_attr.h"
18 
19 
20 #define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\
21     HAL_LOGE(TAG, "%s", str);\
22     return ret_val;\
23 } }while (0)
24 
25 /* The tag may be unused if log level is set to NONE  */
26 static const __attribute__((unused)) char TAG[] = "SDIO_HAL";
27 
28 static esp_err_t init_send_queue(sdio_slave_context_t *hal);
29 
30 /**************** Ring buffer for SDIO sending use *****************/
31 typedef enum {
32     RINGBUF_GET_ONE = 0,
33     RINGBUF_GET_ALL = 1,
34 } ringbuf_get_all_t;
35 
36 typedef enum {
37     RINGBUF_WRITE_PTR,
38     RINGBUF_READ_PTR,
39     RINGBUF_FREE_PTR,
40 } sdio_ringbuf_pointer_t;
41 
42 static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg);
43 static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, sdio_slave_hal_send_desc_t **start, sdio_slave_hal_send_desc_t **end, ringbuf_get_all_t get_all);
44 static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr);
45 
46 #define _SEND_DESC_NEXT(x)    STAILQ_NEXT(&((sdio_slave_hal_send_desc_t*)x)->dma_desc, qe)
47 #define SEND_DESC_NEXT(x)    (sdio_slave_hal_send_desc_t*)_SEND_DESC_NEXT(x)
48 #define SEND_DESC_NEXT_SET(x, target)    do { \
49         _SEND_DESC_NEXT(x)=(sdio_slave_ll_desc_t*)target; \
50     }while(0)
51 
link_desc_to_last(uint8_t * desc,void * arg)52 static esp_err_t link_desc_to_last(uint8_t* desc, void* arg)
53 {
54     SEND_DESC_NEXT_SET(arg, desc);
55     return ESP_OK;
56 }
57 
58 //calculate a pointer with offset to a original pointer of the specific ringbuffer
sdio_ringbuf_offset_ptr(sdio_ringbuf_t * buf,sdio_ringbuf_pointer_t ptr,uint32_t offset)59 static inline uint8_t* sdio_ringbuf_offset_ptr(sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset)
60 {
61     uint8_t *buf_ptr;
62     switch (ptr) {
63         case RINGBUF_WRITE_PTR:
64             buf_ptr = buf->write_ptr;
65             break;
66         case RINGBUF_READ_PTR:
67             buf_ptr = buf->read_ptr;
68             break;
69         case RINGBUF_FREE_PTR:
70             buf_ptr = buf->free_ptr;
71             break;
72         default:
73             abort();
74     }
75 
76     uint8_t *offset_ptr=buf_ptr+offset;
77     if (offset_ptr >= buf->data + buf->size) {
78         offset_ptr -= buf->size;
79     }
80     return offset_ptr;
81 }
82 
sdio_ringbuf_send(sdio_ringbuf_t * buf,esp_err_t (* copy_callback)(uint8_t *,void *),void * arg)83 static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg)
84 {
85     uint8_t* get_ptr = sdio_ringbuf_offset_ptr(buf, RINGBUF_WRITE_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
86     esp_err_t err = ESP_OK;
87     if (copy_callback) {
88         err = (*copy_callback)(get_ptr, arg);
89     }
90     if (err != ESP_OK) return err;
91 
92     buf->write_ptr = get_ptr;
93     return ESP_OK;
94 }
95 
96 // this ringbuf is a return-before-recv-again strategy
97 // since this is designed to be called in the ISR, no parallel logic
sdio_ringbuf_recv(sdio_ringbuf_t * buf,sdio_slave_hal_send_desc_t ** start,sdio_slave_hal_send_desc_t ** end,ringbuf_get_all_t get_all)98 static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, sdio_slave_hal_send_desc_t **start, sdio_slave_hal_send_desc_t **end, ringbuf_get_all_t get_all)
99 {
100     HAL_ASSERT(buf->free_ptr == buf->read_ptr);   //must return before recv again
101     if (start == NULL && end == NULL) return ESP_ERR_INVALID_ARG; // must have a output
102     if (buf->read_ptr == buf->write_ptr) return ESP_ERR_NOT_FOUND; // no data
103 
104     uint8_t *get_start = sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
105 
106     if (get_all != RINGBUF_GET_ONE) {
107         buf->read_ptr = buf->write_ptr;
108     } else {
109         buf->read_ptr = get_start;
110     }
111 
112     if (start != NULL) {
113         *start = (sdio_slave_hal_send_desc_t *) get_start;
114     }
115     if (end != NULL) {
116         *end = (sdio_slave_hal_send_desc_t *) buf->read_ptr;
117     }
118     return ESP_OK;
119 }
120 
sdio_ringbuf_return(sdio_ringbuf_t * buf,uint8_t * ptr)121 static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr)
122 {
123     HAL_ASSERT(sdio_ringbuf_offset_ptr(buf, RINGBUF_FREE_PTR, SDIO_SLAVE_SEND_DESC_SIZE) == ptr);
124     size_t size = (buf->read_ptr + buf->size - buf->free_ptr) % buf->size;
125     size_t count = size / SDIO_SLAVE_SEND_DESC_SIZE;
126     HAL_ASSERT(count * SDIO_SLAVE_SEND_DESC_SIZE==size);
127     buf->free_ptr = buf->read_ptr;
128     return count;
129 }
130 
sdio_ringbuf_peek_front(sdio_ringbuf_t * buf)131 static inline uint8_t* sdio_ringbuf_peek_front(sdio_ringbuf_t* buf)
132 {
133     if (buf->read_ptr != buf->write_ptr) {
134         return sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
135     } else {
136         return NULL;
137     }
138 }
139 
sdio_ringbuf_peek_rear(sdio_ringbuf_t * buf)140 static inline uint8_t* sdio_ringbuf_peek_rear(sdio_ringbuf_t *buf)
141 {
142     return buf->write_ptr;
143 }
144 
sdio_ringbuf_empty(sdio_ringbuf_t * buf)145 static inline bool sdio_ringbuf_empty(sdio_ringbuf_t* buf)
146 {
147     return (buf->read_ptr == buf->write_ptr);
148 }
149 
150 /**************** End of Ring buffer *****************/
151 
sdio_slave_hal_init(sdio_slave_context_t * hal)152 void sdio_slave_hal_init(sdio_slave_context_t *hal)
153 {
154     hal->host = sdio_slave_ll_get_host(0);
155     hal->slc = sdio_slave_ll_get_slc(0);
156     hal->hinf = sdio_slave_ll_get_hinf(0);
157     hal->send_state = STATE_IDLE;
158     hal->recv_link_list = (sdio_slave_hal_recv_stailq_t)STAILQ_HEAD_INITIALIZER(hal->recv_link_list);
159 
160     init_send_queue(hal);
161 }
162 
sdio_slave_hal_hw_init(sdio_slave_context_t * hal)163 void sdio_slave_hal_hw_init(sdio_slave_context_t *hal)
164 {
165     sdio_slave_ll_init(hal->slc);
166     sdio_slave_ll_enable_hs(hal->hinf, !hal->no_highspeed);
167     sdio_slave_ll_set_timing(hal->host, hal->timing);
168     sdio_slave_ll_slvint_t intr_ena = 0xff;
169     sdio_slave_ll_slvint_set_ena(hal->slc, &intr_ena);
170 }
171 
init_send_queue(sdio_slave_context_t * hal)172 static esp_err_t init_send_queue(sdio_slave_context_t *hal)
173 {
174     esp_err_t ret;
175     esp_err_t rcv_res __attribute((unused));
176     sdio_ringbuf_t *buf = &(hal->send_desc_queue);
177 
178     //initialize pointers
179     buf->write_ptr = buf->data;
180     buf->read_ptr = buf->data;
181     buf->free_ptr = buf->data;
182 
183     sdio_slave_hal_send_desc_t *first = NULL, *last = NULL;
184     //no copy for the first descriptor
185 
186     ret = sdio_ringbuf_send(buf, NULL, NULL);
187     if (ret != ESP_OK) return ret;
188 
189     //loop in the ringbuf to link all the desc one after another as a ring
190     for (int i = 0; i < hal->send_queue_size + 1; i++) {
191         rcv_res = sdio_ringbuf_recv(buf, &last, NULL, RINGBUF_GET_ONE);
192         assert (rcv_res == ESP_OK);
193 
194         ret = sdio_ringbuf_send(buf, link_desc_to_last, last);
195         if (ret != ESP_OK) return ret;
196 
197         sdio_ringbuf_return(buf, (uint8_t *) last);
198     }
199 
200     first = NULL;
201     last = NULL;
202     //clear the queue
203     rcv_res = sdio_ringbuf_recv(buf, &first, &last, RINGBUF_GET_ALL);
204     assert (rcv_res == ESP_OK);
205     HAL_ASSERT(first == last); //there should be only one desc remain
206     sdio_ringbuf_return(buf, (uint8_t *) first);
207     return ESP_OK;
208 }
209 
sdio_slave_hal_set_ioready(sdio_slave_context_t * hal,bool ready)210 void sdio_slave_hal_set_ioready(sdio_slave_context_t *hal, bool ready)
211 {
212     sdio_slave_ll_set_ioready(hal->hinf, ready);   //set IO ready to 1 to allow host to use
213 }
214 
215 
216 /*---------------------------------------------------------------------------
217  *                  Send
218  *
219  *  The hardware has a cache, so that once a descriptor is loaded onto the linked-list, it cannot be modified
220  *  until returned (used) by the hardware. This forbids us from loading descriptors onto the linked list during
221  *  the transfer (or the time waiting for host to start a transfer). However, we use a "ringbuffer" (different from
222  *  the one in ``freertos/`` folder) holding descriptors to solve this:
223 
224  *  1.  The driver allocates continuous memory for several buffer descriptors (the maximum buffer number) during
225  *      initialization. Then the driver points the STAILQ_NEXT pointer of all the descriptors except the last one
226  *      to the next descriptor of each of them. Then the pointer of the last descriptor points back to the first one:
227  *      now the descriptor is in a ring.
228 
229  *  2.  The "ringbuffer" has a write pointer points to where app can write new descriptor. The app writes the new descriptor
230  *      indicated by the write pointer without touching the STAILQ_NEXT pointer so that the descriptors are always in a
231  *      ring-like linked-list. The app never touches the part of linked-list being used by the hardware.
232 
233  *  3.  When the hardware needs some data to send, it automatically pick a part of linked descriptors. According to the mode:
234  *          - Buffer mode: only pick the next one to the last one sent;
235  *          - Stream mode: pick the whole unsent linked list, starting from the one above, to the latest linked one.
236 
237  *      The driver removes the STAILQ_NEXT pointer of the last descriptor and put the head of the part to the DMA controller so
238  *      that it looks like just a linear linked-list rather than a ring to the hardware.
239 
240  *  4.  The counter of sending FIFO can increase when app load new buffers (in STREAM_MODE) or when new transfer should
241  *      start (in PACKET_MODE).
242 
243  *  5.  When the sending transfer is finished, the driver goes through the descriptors just send in the ISR and push all
244  *      the ``arg`` member of descriptors to the queue back to the app, so that the app can handle finished buffers. The
245  *      driver also fix the STAILQ_NEXT pointer of the last descriptor so that the descriptors are now in a ring again.
246 ----------------------------------------------------------------------------*/
send_set_state(sdio_slave_context_t * hal,send_state_t state)247 static inline void send_set_state(sdio_slave_context_t *hal, send_state_t state)
248 {
249     hal->send_state = state;
250 }
251 
send_get_state(sdio_slave_context_t * hal)252 static inline send_state_t send_get_state(sdio_slave_context_t* hal)
253 {
254     return hal->send_state;
255 }
256 
257 DMA_ATTR static const sdio_slave_ll_desc_t start_desc = {
258     .owner = 1,
259     .buf = (void*)0x3ffbbbbb, //assign a dma-capable pointer other than NULL, which will not be used
260     .size = 1,
261     .length = 1,
262     .eof = 1,
263 };
264 
265 //force trigger rx_done interrupt. the interrupt is abused to invoke ISR from the app by the enable bit and never cleared.
send_isr_invoker_enable(const sdio_slave_context_t * hal)266 static void send_isr_invoker_enable(const sdio_slave_context_t *hal)
267 {
268     sdio_slave_ll_send_reset(hal->slc);
269     sdio_slave_ll_send_start(hal->slc, &start_desc);
270     //wait for rx_done
271     while(!sdio_slave_ll_send_invoker_ready(hal->slc));
272     sdio_slave_ll_send_stop(hal->slc);
273     sdio_slave_ll_send_hostint_clr(hal->host);
274 }
275 
send_isr_invoker_disable(sdio_slave_context_t * hal)276 static void send_isr_invoker_disable(sdio_slave_context_t *hal)
277 {
278     sdio_slave_ll_send_part_done_clear(hal->slc);
279 }
280 
sdio_slave_hal_send_handle_isr_invoke(sdio_slave_context_t * hal)281 void sdio_slave_hal_send_handle_isr_invoke(sdio_slave_context_t *hal)
282 {
283     sdio_slave_ll_send_part_done_intr_ena(hal->slc, false);
284 }
285 
286 //start hw operation with existing data (if exist)
sdio_slave_hal_send_start(sdio_slave_context_t * hal)287 esp_err_t sdio_slave_hal_send_start(sdio_slave_context_t *hal)
288 {
289     SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE,
290                      "already started", ESP_ERR_INVALID_STATE);
291     send_set_state(hal, STATE_WAIT_FOR_START);
292     send_isr_invoker_enable(hal);
293     sdio_slave_ll_send_intr_clr(hal->slc);
294     sdio_slave_ll_send_intr_ena(hal->slc, true);
295     return ESP_OK;
296 }
297 
298 //only stop hw operations, no touch to data as well as counter
sdio_slave_hal_send_stop(sdio_slave_context_t * hal)299 void sdio_slave_hal_send_stop(sdio_slave_context_t *hal)
300 {
301     sdio_slave_ll_send_stop(hal->slc);
302     send_isr_invoker_disable(hal);
303     sdio_slave_ll_send_intr_ena(hal->slc, false);
304     send_set_state(hal, STATE_IDLE);
305 }
306 
send_new_packet(sdio_slave_context_t * hal)307 static void send_new_packet(sdio_slave_context_t *hal)
308 {
309     // since eof is changed, we have to stop and reset the link list,
310     // and restart new link list operation
311     sdio_slave_hal_send_desc_t *const start_desc = hal->in_flight_head;
312     sdio_slave_hal_send_desc_t *const end_desc = hal->in_flight_end;
313     HAL_ASSERT(start_desc != NULL && end_desc != NULL);
314 
315     sdio_slave_ll_send_stop(hal->slc);
316     sdio_slave_ll_send_reset(hal->slc);
317     sdio_slave_ll_send_start(hal->slc, (sdio_slave_ll_desc_t*)start_desc);
318 
319     // update pkt_len register to allow host reading.
320     sdio_slave_ll_send_write_len(hal->slc, end_desc->pkt_len);
321     HAL_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", end_desc->pkt_len, sdio_slave_ll_send_read_len(hal->host));
322 
323     send_set_state(hal, STATE_SENDING);
324 
325     HAL_EARLY_LOGD(TAG, "restart new send: %p->%p, pkt_len: %d", start_desc, end_desc, end_desc->pkt_len);
326 }
327 
send_check_new_packet(sdio_slave_context_t * hal)328 static esp_err_t send_check_new_packet(sdio_slave_context_t *hal)
329 {
330     esp_err_t ret;
331     sdio_slave_hal_send_desc_t *start = NULL;
332     sdio_slave_hal_send_desc_t *end = NULL;
333     if (hal->sending_mode == SDIO_SLAVE_SEND_PACKET) {
334         ret = sdio_ringbuf_recv(&(hal->send_desc_queue), &start, &end, RINGBUF_GET_ONE);
335     } else { //stream mode
336         ret = sdio_ringbuf_recv(&(hal->send_desc_queue), &start, &end, RINGBUF_GET_ALL);
337     }
338     if (ret == ESP_OK) {
339         hal->in_flight_head = start;
340         hal->in_flight_end = end;
341         end->dma_desc.eof = 1;
342         //temporarily break the link ring here, the ring will be re-connected in ``send_isr_eof()``.
343         hal->in_flight_next = SEND_DESC_NEXT(end);
344         SEND_DESC_NEXT_SET(end, NULL);
345     }
346     return ESP_OK;
347 }
348 
sdio_slave_hal_send_eof_happened(sdio_slave_context_t * hal)349 bool sdio_slave_hal_send_eof_happened(sdio_slave_context_t* hal)
350 {
351     // Goto idle state (cur_start=NULL) if transmission done,
352     // also update sequence and recycle descs.
353     if (sdio_slave_ll_send_done(hal->slc)) {
354         //check current state
355         HAL_ASSERT(send_get_state(hal) == STATE_SENDING);
356         sdio_slave_ll_send_intr_clr(hal->slc);
357         return true;
358     } else {
359         return false;
360     }
361 }
362 
363 //clear counter but keep data
sdio_slave_hal_send_reset_counter(sdio_slave_context_t * hal)364 esp_err_t sdio_slave_hal_send_reset_counter(sdio_slave_context_t* hal)
365 {
366     SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE,
367                      "reset counter when transmission started", ESP_ERR_INVALID_STATE);
368 
369     sdio_slave_ll_send_write_len(hal->slc, 0);
370     HAL_EARLY_LOGV(TAG, "last_len: %08X", sdio_slave_ll_send_read_len(hal->host));
371 
372     hal->tail_pkt_len = 0;
373     sdio_slave_hal_send_desc_t *desc = hal->in_flight_head;
374     while(desc != NULL) {
375         hal->tail_pkt_len += desc->dma_desc.length;
376         desc->pkt_len = hal->tail_pkt_len;
377         desc = SEND_DESC_NEXT(desc);
378     }
379     // in theory the desc should be the one right next to the last of in_flight_head,
380     // but the link of last is NULL, so get the desc from the ringbuf directly.
381     desc = (sdio_slave_hal_send_desc_t*)sdio_ringbuf_peek_front(&(hal->send_desc_queue));
382     while(desc != NULL) {
383         hal->tail_pkt_len += desc->dma_desc.length;
384         desc->pkt_len = hal->tail_pkt_len;
385         desc = SEND_DESC_NEXT(desc);
386     }
387 
388     return ESP_OK;
389 }
390 
send_get_inflight_desc(sdio_slave_context_t * hal,void ** out_arg,uint32_t * out_returned_cnt,bool init)391 static esp_err_t send_get_inflight_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_returned_cnt,
392                                         bool init)
393 {
394     esp_err_t ret;
395     if (init) {
396         HAL_ASSERT(hal->returned_desc == NULL);
397         hal->returned_desc = hal->in_flight_head;
398         send_set_state(hal, STATE_GETTING_RESULT);
399     }
400 
401     if (hal->returned_desc != NULL) {
402         *out_arg = hal->returned_desc->arg;
403         hal->returned_desc = SEND_DESC_NEXT(hal->returned_desc);
404         ret = ESP_OK;
405     } else {
406         if (hal->in_flight_head != NULL) {
407             // fix the link broken of last desc when being sent
408             HAL_ASSERT(hal->in_flight_end != NULL);
409             SEND_DESC_NEXT_SET(hal->in_flight_end, hal->in_flight_next);
410 
411             *out_returned_cnt = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*)hal->in_flight_head);
412         }
413 
414         hal->in_flight_head = NULL;
415         hal->in_flight_end = NULL;
416 
417         ret = ESP_ERR_NOT_FOUND;
418     }
419     return ret;
420 }
421 
send_get_unsent_desc(sdio_slave_context_t * hal,void ** out_arg,uint32_t * out_return_cnt)422 static esp_err_t send_get_unsent_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt)
423 {
424     esp_err_t ret;
425     sdio_slave_hal_send_desc_t *head = NULL;
426     sdio_slave_hal_send_desc_t *tail = NULL;
427     ret = sdio_ringbuf_recv(&(hal->send_desc_queue), &head, &tail, RINGBUF_GET_ONE);
428 
429     if (ret == ESP_OK) {
430         //currently each packet takes only one desc.
431         HAL_ASSERT(head == tail);
432         (*out_arg) = head->arg;
433         (*out_return_cnt) = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*) head);
434     } else if (ret == ESP_ERR_NOT_FOUND) {
435         // if in wait to send state, set the sequence number of tail to the value last sent, just as if the packet wait to
436         // send never queued.
437         // Go to idle state (cur_end!=NULL and cur_start=NULL)
438         send_set_state(hal, STATE_IDLE);
439         hal->tail_pkt_len = sdio_slave_ll_send_read_len(hal->host);
440     }
441     return ret;
442 }
443 
sdio_slave_hal_send_get_next_finished_arg(sdio_slave_context_t * hal,void ** out_arg,uint32_t * out_returned_cnt)444 esp_err_t sdio_slave_hal_send_get_next_finished_arg(sdio_slave_context_t *hal, void **out_arg, uint32_t* out_returned_cnt)
445 {
446     bool init = (send_get_state(hal) == STATE_SENDING);
447     if (init) {
448         HAL_ASSERT(hal->in_flight_head != NULL);
449     } else {
450         HAL_ASSERT(send_get_state(hal) == STATE_GETTING_RESULT);
451     }
452     *out_returned_cnt = 0;
453 
454     esp_err_t ret = send_get_inflight_desc(hal, out_arg, out_returned_cnt, init);
455 
456     if (ret == ESP_ERR_NOT_FOUND) {
457         // Go to wait for packet state
458         send_set_state(hal, STATE_WAIT_FOR_START);
459     }
460     return ret;
461 }
462 
463 
sdio_slave_hal_send_flush_next_buffer(sdio_slave_context_t * hal,void ** out_arg,uint32_t * out_return_cnt)464 esp_err_t sdio_slave_hal_send_flush_next_buffer(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt)
465 {
466     esp_err_t ret = ESP_OK;
467     *out_return_cnt = 0;
468     bool init = (send_get_state(hal) == STATE_IDLE);
469     if (!init) {
470         if (send_get_state(hal) != STATE_GETTING_RESULT && send_get_state(hal) != STATE_GETTING_UNSENT_DESC) {
471             return ESP_ERR_INVALID_STATE;
472         }
473     }
474 
475     if (init || send_get_state(hal) == STATE_GETTING_RESULT) {
476         ret = send_get_inflight_desc(hal, out_arg, out_return_cnt, init);
477         if (ret == ESP_ERR_NOT_FOUND) {
478             send_set_state(hal, STATE_GETTING_UNSENT_DESC);
479         }
480     }
481     if (send_get_state(hal) == STATE_GETTING_UNSENT_DESC) {
482         ret = send_get_unsent_desc(hal, out_arg, out_return_cnt);
483         if (ret == ESP_ERR_NOT_FOUND) {
484             send_set_state(hal, STATE_IDLE);
485         }
486     }
487     return ret;
488 }
489 
sdio_slave_hal_send_new_packet_if_exist(sdio_slave_context_t * hal)490 esp_err_t sdio_slave_hal_send_new_packet_if_exist(sdio_slave_context_t *hal)
491 {
492     esp_err_t ret;
493     // Go to wait sending state (cur_start!=NULL && cur_end==NULL) if not sending and new packet ready.
494     // Note we may also enter this state by stopping sending in the app.
495     if (send_get_state(hal) == STATE_WAIT_FOR_START) {
496         if (hal->in_flight_head == NULL) {
497             send_check_new_packet(hal);
498         }
499         // Go to sending state (cur_start and cur_end != NULL) if has packet to send.
500         if (hal->in_flight_head) {
501             send_new_packet(hal);
502             ret = ESP_OK;
503         } else {
504             ret = ESP_ERR_NOT_FOUND;
505         }
506     } else {
507         ret = ESP_ERR_INVALID_STATE;
508     }
509     return ret;
510 }
511 
send_write_desc(uint8_t * desc,void * arg)512 static esp_err_t send_write_desc(uint8_t* desc, void* arg)
513 {
514     sdio_slave_hal_send_desc_t* next_desc = SEND_DESC_NEXT(desc);
515     memcpy(desc, arg, sizeof(sdio_slave_hal_send_desc_t));
516     SEND_DESC_NEXT_SET(desc, next_desc);
517     return ESP_OK;
518 }
519 
send_isr_invoke(sdio_slave_context_t * hal)520 static void send_isr_invoke(sdio_slave_context_t *hal)
521 {
522     sdio_slave_ll_send_part_done_intr_ena(hal->slc, true);
523 }
524 
sdio_slave_hal_send_queue(sdio_slave_context_t * hal,uint8_t * addr,size_t len,void * arg)525 esp_err_t sdio_slave_hal_send_queue(sdio_slave_context_t* hal, uint8_t *addr, size_t len, void *arg)
526 {
527     hal->tail_pkt_len += len;
528     sdio_slave_hal_send_desc_t new_desc = {
529         .dma_desc = {
530             .size   =   len,
531             .length =   len,
532             .buf    =   addr,
533             .owner  =   1,
534             // in stream mode, the eof is only appended (in ISR) when new packet is ready to be sent
535             .eof    =   (hal->sending_mode == SDIO_SLAVE_SEND_PACKET),
536         },
537         .arg    =   arg,
538         .pkt_len = hal->tail_pkt_len,
539     };
540 
541     esp_err_t ret = sdio_ringbuf_send(&(hal->send_desc_queue), send_write_desc, &new_desc);
542     send_isr_invoke(hal);
543     return ret;
544 }
545 
546 /*---------------------------------------------------------------------------
547  *                  Receive
548  *--------------------------------------------------------------------------*/
549 
recv_get_first_empty_buf(sdio_slave_context_t * hal)550 static sdio_slave_ll_desc_t* recv_get_first_empty_buf(sdio_slave_context_t* hal)
551 {
552     sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list);
553     sdio_slave_ll_desc_t *desc = STAILQ_FIRST(queue);
554     while(desc && desc->owner == 0) {
555         desc = STAILQ_NEXT(desc, qe);
556     }
557     return desc;
558 }
559 
sdio_slave_hal_recv_stop(sdio_slave_context_t * hal)560 void sdio_slave_hal_recv_stop(sdio_slave_context_t* hal)
561 {
562     sdio_slave_ll_set_ioready(hal->hinf, false); //set IO ready to 0 to stop host from using
563     sdio_slave_ll_send_stop(hal->slc);
564     sdio_slave_ll_recv_stop(hal->slc);
565     sdio_slave_ll_recv_intr_ena(hal->slc, false);
566 }
567 
568 //touching linked list, should be protected by spinlock
sdio_slave_hal_recv_has_next_item(sdio_slave_context_t * hal)569 bool sdio_slave_hal_recv_has_next_item(sdio_slave_context_t* hal)
570 {
571 
572     if (hal->recv_cur_ret == NULL || hal->recv_cur_ret->owner != 0) return false;
573 
574     // This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty,
575     // in this case the ``tx_done`` should happen no longer until new desc is appended.
576     // The app is responsible to place the pointer to the right place again when appending new desc.
577 
578     hal->recv_cur_ret = STAILQ_NEXT(hal->recv_cur_ret, qe);
579     return true;
580 }
581 
sdio_slave_hal_recv_done(sdio_slave_context_t * hal)582 bool sdio_slave_hal_recv_done(sdio_slave_context_t *hal)
583 {
584     bool ret = sdio_slave_ll_recv_done(hal->slc);
585     if (ret) {
586         sdio_slave_ll_recv_done_clear(hal->slc);
587     }
588     return ret;
589 }
590 
sdio_slave_hal_recv_unload_desc(sdio_slave_context_t * hal)591 sdio_slave_ll_desc_t *sdio_slave_hal_recv_unload_desc(sdio_slave_context_t *hal)
592 {
593     sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list;
594     sdio_slave_ll_desc_t *desc = STAILQ_FIRST(queue);
595     if (desc) {
596         STAILQ_REMOVE_HEAD(queue, qe);
597     }
598     return desc;
599 }
600 
sdio_slave_hal_recv_init_desc(sdio_slave_context_t * hal,sdio_slave_ll_desc_t * desc,uint8_t * start)601 void sdio_slave_hal_recv_init_desc(sdio_slave_context_t* hal, sdio_slave_ll_desc_t *desc, uint8_t *start)
602 {
603     *desc = (sdio_slave_ll_desc_t) {
604         .size = hal->recv_buffer_size,
605         .buf = start,
606     };
607 }
608 
sdio_slave_hal_recv_start(sdio_slave_context_t * hal)609 void sdio_slave_hal_recv_start(sdio_slave_context_t *hal)
610 {
611     sdio_slave_ll_recv_reset(hal->slc);
612     sdio_slave_ll_desc_t *desc = recv_get_first_empty_buf(hal);
613     if (!desc) {
614         HAL_LOGD(TAG, "recv: restart without desc");
615     } else {
616         //the counter is handled when add/flush/reset
617         sdio_slave_ll_recv_start(hal->slc, desc);
618         sdio_slave_ll_recv_intr_ena(hal->slc, true);
619     }
620 }
621 
sdio_slave_hal_recv_reset_counter(sdio_slave_context_t * hal)622 void sdio_slave_hal_recv_reset_counter(sdio_slave_context_t *hal)
623 {
624     sdio_slave_ll_recv_size_reset(hal->slc);
625     sdio_slave_ll_desc_t *desc = recv_get_first_empty_buf(hal);
626     while (desc != NULL) {
627         sdio_slave_ll_recv_size_inc(hal->slc);
628         desc = STAILQ_NEXT(desc, qe);
629     }
630 }
631 
sdio_slave_hal_recv_flush_one_buffer(sdio_slave_context_t * hal)632 void sdio_slave_hal_recv_flush_one_buffer(sdio_slave_context_t *hal)
633 {
634     sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list;
635     sdio_slave_ll_desc_t *desc = STAILQ_FIRST(queue);
636     assert (desc != NULL && desc->owner == 0);
637     STAILQ_REMOVE_HEAD(queue, qe);
638     desc->owner = 1;
639     STAILQ_INSERT_TAIL(queue, desc, qe);
640     sdio_slave_ll_recv_size_inc(hal->slc);
641     //we only add it to the tail here, without start the DMA nor increase buffer num.
642 }
643 
sdio_slave_hal_load_buf(sdio_slave_context_t * hal,sdio_slave_ll_desc_t * desc)644 void sdio_slave_hal_load_buf(sdio_slave_context_t *hal, sdio_slave_ll_desc_t *desc)
645 {
646     sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list);
647     desc->owner = 1;
648 
649     sdio_slave_ll_desc_t *const tail = STAILQ_LAST(queue, sdio_slave_ll_desc_s, qe);
650 
651     STAILQ_INSERT_TAIL(queue, desc, qe);
652     if (hal->recv_cur_ret == NULL) {
653         hal->recv_cur_ret = desc;
654     }
655 
656     if (tail == NULL) {
657         //no one in the ll, start new ll operation.
658         sdio_slave_ll_recv_start(hal->slc, desc);
659         sdio_slave_ll_recv_intr_ena(hal->slc, true);
660         HAL_LOGV(TAG, "recv_load_buf: start new");
661     } else {
662         //restart former ll operation
663         sdio_slave_ll_recv_restart(hal->slc);
664         HAL_LOGV(TAG, "recv_load_buf: restart");
665     }
666     sdio_slave_ll_recv_size_inc(hal->slc);
667 }
668 
show_queue_item(sdio_slave_ll_desc_t * item)669 static inline void show_queue_item(sdio_slave_ll_desc_t *item)
670 {
671     HAL_EARLY_LOGI(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner);
672     HAL_EARLY_LOGI(TAG, "   buf: %p, stqe_next: %p", item->buf, item->qe.stqe_next);
673 }
674 
dump_queue(sdio_slave_hal_recv_stailq_t * queue)675 static void __attribute((unused)) dump_queue(sdio_slave_hal_recv_stailq_t *queue)
676 {
677     int cnt = 0;
678     sdio_slave_ll_desc_t *item = NULL;
679     HAL_EARLY_LOGI(TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last);
680     STAILQ_FOREACH(item, queue, qe) {
681         cnt++;
682         show_queue_item(item);
683     }
684     HAL_EARLY_LOGI(TAG, "total: %d", cnt);
685 }
686 
687 /*---------------------------------------------------------------------------
688  *                  Host
689  *--------------------------------------------------------------------------*/
sdio_slave_hal_hostint_get_ena(sdio_slave_context_t * hal,sdio_slave_hostint_t * out_int_mask)690 void sdio_slave_hal_hostint_get_ena(sdio_slave_context_t *hal, sdio_slave_hostint_t *out_int_mask)
691 {
692     *out_int_mask = sdio_slave_ll_host_get_intena(hal->host);
693 }
694 
sdio_slave_hal_hostint_clear(sdio_slave_context_t * hal,const sdio_slave_hostint_t * mask)695 void sdio_slave_hal_hostint_clear(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
696 {
697     sdio_slave_ll_host_intr_clear(hal->host, mask);//clear all interrupts
698 }
699 
sdio_slave_hal_hostint_set_ena(sdio_slave_context_t * hal,const sdio_slave_hostint_t * mask)700 void sdio_slave_hal_hostint_set_ena(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
701 {
702     sdio_slave_ll_host_set_intena(hal->host, mask);
703 }
704 
sdio_slave_hal_hostint_send(sdio_slave_context_t * hal,const sdio_slave_hostint_t * mask)705 void sdio_slave_hal_hostint_send(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
706 {
707     sdio_slave_ll_host_send_int(hal->slc, mask);
708 }
709 
sdio_slave_hal_host_get_reg(sdio_slave_context_t * hal,int pos)710 uint8_t sdio_slave_hal_host_get_reg(sdio_slave_context_t *hal, int pos)
711 {
712     return sdio_slave_ll_host_get_reg(hal->host, pos);
713 }
sdio_slave_hal_host_set_reg(sdio_slave_context_t * hal,int pos,uint8_t reg)714 void sdio_slave_hal_host_set_reg(sdio_slave_context_t *hal, int pos, uint8_t reg)
715 {
716     sdio_slave_ll_host_set_reg(hal->host, pos, reg);
717 }
718 
sdio_slave_hal_slvint_fetch_clear(sdio_slave_context_t * hal,sdio_slave_ll_slvint_t * out_int_mask)719 void sdio_slave_hal_slvint_fetch_clear(sdio_slave_context_t *hal, sdio_slave_ll_slvint_t *out_int_mask)
720 {
721     sdio_slave_ll_slvint_fetch_clear(hal->slc, out_int_mask);
722 }
723