1 /*
2  * SPDX-FileCopyrightText: 2010-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "esp_log.h"
8 #include "esp_memory_utils.h"
9 #include "freertos/FreeRTOS.h"
10 #include "freertos/semphr.h"
11 #include "freertos/queue.h"
12 #include "freertos/ringbuf.h"
13 #include "driver/gpio.h"
14 #include "esp_private/spi_common_internal.h"
15 #include "driver/spi_slave_hd.h"
16 #include "hal/spi_slave_hd_hal.h"
17 
18 
19 #if (SOC_SPI_PERIPH_NUM == 2)
20 #define VALID_HOST(x) ((x) == SPI2_HOST)
21 #elif (SOC_SPI_PERIPH_NUM == 3)
22 #define VALID_HOST(x) ((x) >= SPI2_HOST && (x) <= SPI3_HOST)
23 #endif
24 #define SPIHD_CHECK(cond,warn,ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
25 
26 typedef struct {
27     bool dma_enabled;
28     int max_transfer_sz;
29     uint32_t flags;
30     portMUX_TYPE int_spinlock;
31     intr_handle_t intr;
32 #if SOC_GDMA_SUPPORTED
33     gdma_channel_handle_t gdma_handle_tx;   //varible for storge gdma handle
34     gdma_channel_handle_t gdma_handle_rx;
35 #endif
36     intr_handle_t intr_dma;
37     spi_slave_hd_callback_config_t callback;
38     spi_slave_hd_hal_context_t hal;
39     bool append_mode;
40 
41     QueueHandle_t tx_trans_queue;
42     QueueHandle_t tx_ret_queue;
43     QueueHandle_t rx_trans_queue;
44     QueueHandle_t rx_ret_queue;
45     QueueHandle_t tx_cnting_sem;
46     QueueHandle_t rx_cnting_sem;
47 
48     spi_slave_hd_data_t *tx_desc;
49     spi_slave_hd_data_t *rx_desc;
50 #ifdef CONFIG_PM_ENABLE
51     esp_pm_lock_handle_t pm_lock;
52 #endif
53 } spi_slave_hd_slot_t;
54 
55 static spi_slave_hd_slot_t *spihost[SOC_SPI_PERIPH_NUM];
56 static const char TAG[] = "slave_hd";
57 
58 #if SOC_GDMA_SUPPORTED
59 static bool spi_gdma_tx_channel_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data);
60 #endif // SOC_GDMA_SUPPORTED
61 
62 static void spi_slave_hd_intr_append(void *arg);
63 static void spi_slave_hd_intr_segment(void *arg);
64 
spi_slave_hd_init(spi_host_device_t host_id,const spi_bus_config_t * bus_config,const spi_slave_hd_slot_config_t * config)65 esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *bus_config,
66                             const spi_slave_hd_slot_config_t *config)
67 {
68     bool spi_chan_claimed;
69     bool append_mode = (config->flags & SPI_SLAVE_HD_APPEND_MODE);
70     uint32_t actual_tx_dma_chan = 0;
71     uint32_t actual_rx_dma_chan = 0;
72     esp_err_t ret = ESP_OK;
73 
74     SPIHD_CHECK(VALID_HOST(host_id), "invalid host", ESP_ERR_INVALID_ARG);
75 #if CONFIG_IDF_TARGET_ESP32S2
76     SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == (int)host_id || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG);
77 #elif SOC_GDMA_SUPPORTED
78     SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG);
79 #endif
80 
81     spi_chan_claimed = spicommon_periph_claim(host_id, "slave_hd");
82     SPIHD_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE);
83 
84     spi_slave_hd_slot_t *host = heap_caps_calloc(1, sizeof(spi_slave_hd_slot_t), MALLOC_CAP_INTERNAL);
85     if (host == NULL) {
86         ret = ESP_ERR_NO_MEM;
87         goto cleanup;
88     }
89     spihost[host_id] = host;
90     host->int_spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
91     host->dma_enabled = (config->dma_chan != SPI_DMA_DISABLED);
92 
93     if (host->dma_enabled) {
94         ret = spicommon_dma_chan_alloc(host_id, config->dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan);
95         if (ret != ESP_OK) {
96             goto cleanup;
97         }
98     }
99 
100     ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, &host->flags);
101     if (ret != ESP_OK) {
102         goto cleanup;
103     }
104     gpio_set_direction(config->spics_io_num, GPIO_MODE_INPUT);
105     spicommon_cs_initialize(host_id, config->spics_io_num, 0,
106                             !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS));
107     host->append_mode = append_mode;
108 
109     spi_slave_hd_hal_config_t hal_config = {
110         .host_id = host_id,
111         .dma_in = SPI_LL_GET_HW(host_id),
112         .dma_out = SPI_LL_GET_HW(host_id),
113         .dma_enabled = host->dma_enabled,
114         .tx_dma_chan = actual_tx_dma_chan,
115         .rx_dma_chan = actual_rx_dma_chan,
116         .append_mode = append_mode,
117         .mode = config->mode,
118         .tx_lsbfirst = (config->flags & SPI_SLAVE_HD_RXBIT_LSBFIRST),
119         .rx_lsbfirst = (config->flags & SPI_SLAVE_HD_TXBIT_LSBFIRST),
120     };
121 
122     if (host->dma_enabled) {
123         //Malloc for all the DMA descriptors
124         uint32_t total_desc_size = spi_slave_hd_hal_get_total_desc_size(&host->hal, bus_config->max_transfer_sz);
125         host->hal.dmadesc_tx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
126         host->hal.dmadesc_rx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
127         if (!host->hal.dmadesc_tx || !host->hal.dmadesc_rx) {
128             ret = ESP_ERR_NO_MEM;
129             goto cleanup;
130         }
131 
132         //Get the actual SPI bus transaction size in bytes.
133         host->max_transfer_sz = spi_salve_hd_hal_get_max_bus_size(&host->hal);
134     } else {
135         //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
136         host->max_transfer_sz = 0;
137     }
138 
139     //Init the hal according to the hal_config set above
140     spi_slave_hd_hal_init(&host->hal, &hal_config);
141 
142 #ifdef CONFIG_PM_ENABLE
143     ret = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_slave", &host->pm_lock);
144     if (ret != ESP_OK) {
145         goto cleanup;
146     }
147     // Lock APB frequency while SPI slave driver is in use
148     esp_pm_lock_acquire(host->pm_lock);
149 #endif //CONFIG_PM_ENABLE
150 
151     //Create Queues and Semaphores
152     host->tx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
153     host->rx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
154     if (!host->append_mode) {
155         host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
156         host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
157         if (!host->tx_trans_queue || !host->rx_trans_queue) {
158             ret = ESP_ERR_NO_MEM;
159             goto cleanup;
160         }
161     } else {
162         host->tx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
163         host->rx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
164         if (!host->tx_cnting_sem || !host->rx_cnting_sem) {
165             ret = ESP_ERR_NO_MEM;
166             goto cleanup;
167         }
168     }
169 
170     //Alloc intr
171     if (!host->append_mode) {
172         //Seg mode
173         ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_segment,
174                                 (void *)host, &host->intr);
175         if (ret != ESP_OK) {
176             goto cleanup;
177         }
178         ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_segment,
179                                 (void *)host, &host->intr_dma);
180         if (ret != ESP_OK) {
181             goto cleanup;
182         }
183     } else {
184         //Append mode
185         //On ESP32S2, `cmd7` and `cmd8` interrupts registered as spi rx & tx interrupt are from SPI DMA interrupt source.
186         //although the `cmd7` and `cmd8` interrupt on spi are registered independently here
187         ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_append,
188                                 (void *)host, &host->intr);
189         if (ret != ESP_OK) {
190             goto cleanup;
191         }
192 #if SOC_GDMA_SUPPORTED
193         // config gmda and ISR callback for gdma supported chip
194         spicommon_gdma_get_handle(host_id, &host->gdma_handle_tx, GDMA_CHANNEL_DIRECTION_TX);
195         gdma_tx_event_callbacks_t tx_cbs = {
196             .on_trans_eof = spi_gdma_tx_channel_callback
197         };
198         gdma_register_tx_event_callbacks(host->gdma_handle_tx, &tx_cbs, host);
199 #else
200         ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_append,
201                                 (void *)host, &host->intr_dma);
202         if (ret != ESP_OK) {
203             goto cleanup;
204         }
205 #endif  //#if SOC_GDMA_SUPPORTED
206     }
207     //Init callbacks
208     memcpy((uint8_t *)&host->callback, (uint8_t *)&config->cb_config, sizeof(spi_slave_hd_callback_config_t));
209     spi_event_t event = 0;
210     if (host->callback.cb_buffer_tx != NULL) event |= SPI_EV_BUF_TX;
211     if (host->callback.cb_buffer_rx != NULL) event |= SPI_EV_BUF_RX;
212     if (host->callback.cb_cmd9 != NULL) event |= SPI_EV_CMD9;
213     if (host->callback.cb_cmdA != NULL) event |= SPI_EV_CMDA;
214     spi_slave_hd_hal_enable_event_intr(&host->hal, event);
215 
216     return ESP_OK;
217 
218 cleanup:
219     // Memory free is in the deinit function
220     spi_slave_hd_deinit(host_id);
221     return ret;
222 }
223 
spi_slave_hd_deinit(spi_host_device_t host_id)224 esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
225 {
226     spi_slave_hd_slot_t *host = spihost[host_id];
227     if (host == NULL) return ESP_ERR_INVALID_ARG;
228 
229     if (host->tx_trans_queue) vQueueDelete(host->tx_trans_queue);
230     if (host->tx_ret_queue) vQueueDelete(host->tx_ret_queue);
231     if (host->rx_trans_queue) vQueueDelete(host->rx_trans_queue);
232     if (host->rx_ret_queue) vQueueDelete(host->rx_ret_queue);
233     if (host->tx_cnting_sem) vSemaphoreDelete(host->tx_cnting_sem);
234     if (host->rx_cnting_sem) vSemaphoreDelete(host->rx_cnting_sem);
235     if (host) {
236         free(host->hal.dmadesc_tx);
237         free(host->hal.dmadesc_rx);
238         esp_intr_free(host->intr);
239         esp_intr_free(host->intr_dma);
240 #ifdef CONFIG_PM_ENABLE
241         if (host->pm_lock) {
242             esp_pm_lock_release(host->pm_lock);
243             esp_pm_lock_delete(host->pm_lock);
244         }
245 #endif
246     }
247 
248     spicommon_periph_free(host_id);
249     if (host->dma_enabled) {
250         spicommon_dma_chan_free(host_id);
251     }
252     free(host);
253     spihost[host_id] = NULL;
254     return ESP_OK;
255 }
256 
tx_invoke(spi_slave_hd_slot_t * host)257 static void tx_invoke(spi_slave_hd_slot_t *host)
258 {
259     portENTER_CRITICAL(&host->int_spinlock);
260     spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_SEND);
261     portEXIT_CRITICAL(&host->int_spinlock);
262 }
263 
rx_invoke(spi_slave_hd_slot_t * host)264 static void rx_invoke(spi_slave_hd_slot_t *host)
265 {
266     portENTER_CRITICAL(&host->int_spinlock);
267     spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_RECV);
268     portEXIT_CRITICAL(&host->int_spinlock);
269 }
270 
intr_check_clear_callback(spi_slave_hd_slot_t * host,spi_event_t ev,slave_cb_t cb)271 static inline IRAM_ATTR BaseType_t intr_check_clear_callback(spi_slave_hd_slot_t *host, spi_event_t ev, slave_cb_t cb)
272 {
273     BaseType_t cb_awoken = pdFALSE;
274     if (spi_slave_hd_hal_check_clear_event(&host->hal, ev) && cb) {
275         spi_slave_hd_event_t event = {.event = ev};
276         cb(host->callback.arg, &event, &cb_awoken);
277     }
278     return cb_awoken;
279 }
280 
spi_slave_hd_intr_segment(void * arg)281 static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
282 {
283     spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t *)arg;
284     spi_slave_hd_callback_config_t *callback = &host->callback;
285     spi_slave_hd_hal_context_t *hal = &host->hal;
286     BaseType_t awoken = pdFALSE;
287     BaseType_t ret;
288 
289     awoken |= intr_check_clear_callback(host, SPI_EV_BUF_TX, callback->cb_buffer_tx);
290     awoken |= intr_check_clear_callback(host, SPI_EV_BUF_RX, callback->cb_buffer_rx);
291     awoken |= intr_check_clear_callback(host, SPI_EV_CMD9,   callback->cb_cmd9);
292     awoken |= intr_check_clear_callback(host, SPI_EV_CMDA,   callback->cb_cmdA);
293 
294     bool tx_done = false;
295     bool rx_done = false;
296 
297     portENTER_CRITICAL_ISR(&host->int_spinlock);
298     if (host->tx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_SEND)) {
299         tx_done = true;
300     }
301     if (host->rx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_RECV)) {
302         rx_done = true;
303     }
304     portEXIT_CRITICAL_ISR(&host->int_spinlock);
305 
306     if (tx_done) {
307         bool ret_queue = true;
308         if (callback->cb_sent) {
309             spi_slave_hd_event_t ev = {
310                 .event = SPI_EV_SEND,
311                 .trans = host->tx_desc,
312             };
313             BaseType_t cb_awoken = pdFALSE;
314             ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
315             awoken |= cb_awoken;
316         }
317         if (ret_queue) {
318             ret = xQueueSendFromISR(host->tx_ret_queue, &host->tx_desc, &awoken);
319             // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
320             assert(ret == pdTRUE);
321         }
322         host->tx_desc = NULL;
323     }
324     if (rx_done) {
325         bool ret_queue = true;
326         host->rx_desc->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
327         if (callback->cb_recv) {
328             spi_slave_hd_event_t ev = {
329                 .event = SPI_EV_RECV,
330                 .trans = host->rx_desc,
331             };
332             BaseType_t cb_awoken = pdFALSE;
333             ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
334             awoken |= cb_awoken;
335         }
336         if (ret_queue) {
337             ret = xQueueSendFromISR(host->rx_ret_queue, &host->rx_desc, &awoken);
338             // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
339             assert(ret == pdTRUE);
340         }
341         host->rx_desc = NULL;
342     }
343 
344     bool tx_sent = false;
345     bool rx_sent = false;
346     if (!host->tx_desc) {
347         ret = xQueueReceiveFromISR(host->tx_trans_queue, &host->tx_desc, &awoken);
348         if (ret == pdTRUE) {
349             spi_slave_hd_hal_txdma(hal, host->tx_desc->data, host->tx_desc->len);
350             tx_sent = true;
351             if (callback->cb_send_dma_ready) {
352                 spi_slave_hd_event_t ev = {
353                     .event = SPI_EV_SEND_DMA_READY,
354                     .trans = host->tx_desc,
355                 };
356                 BaseType_t cb_awoken = pdFALSE;
357                 callback->cb_send_dma_ready(callback->arg, &ev, &cb_awoken);
358                 awoken |= cb_awoken;
359             }
360         }
361     }
362     if (!host->rx_desc) {
363         ret = xQueueReceiveFromISR(host->rx_trans_queue, &host->rx_desc, &awoken);
364         if (ret == pdTRUE) {
365             spi_slave_hd_hal_rxdma(hal, host->rx_desc->data, host->rx_desc->len);
366             rx_sent = true;
367             if (callback->cb_recv_dma_ready) {
368                 spi_slave_hd_event_t ev = {
369                     .event = SPI_EV_RECV_DMA_READY,
370                     .trans = host->rx_desc,
371                 };
372                 BaseType_t cb_awoken = pdFALSE;
373                 callback->cb_recv_dma_ready(callback->arg, &ev, &cb_awoken);
374                 awoken |= cb_awoken;
375             }
376         }
377     }
378 
379     portENTER_CRITICAL_ISR(&host->int_spinlock);
380     if (tx_sent) {
381         spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_SEND);
382     }
383     if (rx_sent) {
384         spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_RECV);
385     }
386     portEXIT_CRITICAL_ISR(&host->int_spinlock);
387 
388     if (awoken == pdTRUE) portYIELD_FROM_ISR();
389 }
390 
spi_slave_hd_append_tx_isr(void * arg)391 static IRAM_ATTR void spi_slave_hd_append_tx_isr(void *arg)
392 {
393     spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
394     spi_slave_hd_callback_config_t *callback = &host->callback;
395     spi_slave_hd_hal_context_t *hal = &host->hal;
396     BaseType_t awoken = pdFALSE;
397     BaseType_t ret __attribute__((unused));
398 
399     spi_slave_hd_data_t *trans_desc;
400     while (1) {
401         bool trans_finish = false;
402         trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&trans_desc);
403         if (!trans_finish) {
404             break;
405         }
406 
407         bool ret_queue = true;
408         if (callback->cb_sent) {
409             spi_slave_hd_event_t ev = {
410                 .event = SPI_EV_SEND,
411                 .trans = trans_desc,
412             };
413             BaseType_t cb_awoken = pdFALSE;
414             ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
415             awoken |= cb_awoken;
416         }
417 
418         if (ret_queue) {
419             ret = xQueueSendFromISR(host->tx_ret_queue, &trans_desc, &awoken);
420             assert(ret == pdTRUE);
421 
422             ret = xSemaphoreGiveFromISR(host->tx_cnting_sem, &awoken);
423             assert(ret == pdTRUE);
424         }
425     }
426     if (awoken==pdTRUE) portYIELD_FROM_ISR();
427 }
428 
spi_slave_hd_append_rx_isr(void * arg)429 static IRAM_ATTR void spi_slave_hd_append_rx_isr(void *arg)
430 {
431     spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
432     spi_slave_hd_callback_config_t *callback = &host->callback;
433     spi_slave_hd_hal_context_t *hal = &host->hal;
434     BaseType_t awoken = pdFALSE;
435     BaseType_t ret __attribute__((unused));
436 
437     spi_slave_hd_data_t *trans_desc;
438     size_t trans_len;
439     while (1) {
440         bool trans_finish = false;
441         trans_finish = spi_slave_hd_hal_get_rx_finished_trans(hal, (void **)&trans_desc, &trans_len);
442         if (!trans_finish) {
443             break;
444         }
445         trans_desc->trans_len = trans_len;
446 
447         bool ret_queue = true;
448         if (callback->cb_recv) {
449             spi_slave_hd_event_t ev = {
450                 .event = SPI_EV_RECV,
451                 .trans = trans_desc,
452             };
453             BaseType_t cb_awoken = pdFALSE;
454             ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
455             awoken |= cb_awoken;
456         }
457 
458         if (ret_queue) {
459             ret = xQueueSendFromISR(host->rx_ret_queue, &trans_desc, &awoken);
460             assert(ret == pdTRUE);
461 
462             ret = xSemaphoreGiveFromISR(host->rx_cnting_sem, &awoken);
463             assert(ret == pdTRUE);
464         }
465     }
466     if (awoken==pdTRUE) portYIELD_FROM_ISR();
467 }
468 
469 #if SOC_GDMA_SUPPORTED
470 // 'spi_gdma_tx_channel_callback' used as spi tx interrupt of append mode on gdma supported target
spi_gdma_tx_channel_callback(gdma_channel_handle_t dma_chan,gdma_event_data_t * event_data,void * user_data)471 static IRAM_ATTR bool spi_gdma_tx_channel_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
472 {
473     assert(event_data);
474     spi_slave_hd_append_tx_isr(user_data);
475     return true;
476 }
477 #endif // SOC_GDMA_SUPPORTED
478 
479 // SPI slave hd append isr entrance
spi_slave_hd_intr_append(void * arg)480 static IRAM_ATTR void spi_slave_hd_intr_append(void *arg)
481 {
482     spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t *)arg;
483     spi_slave_hd_hal_context_t *hal = &host->hal;
484     bool rx_done = false;
485     bool tx_done = false;
486 
487     // Append Mode
488     portENTER_CRITICAL_ISR(&host->int_spinlock);
489     if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_RECV)) {
490         rx_done = true;
491     }
492     if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_SEND)) {
493         // NOTE: on gdma supported chips, this flag should NOT checked out, handle entrance is only `spi_gdma_tx_channel_callback`,
494         // otherwise, here should be target limited.
495         tx_done = true;
496     }
497     portEXIT_CRITICAL_ISR(&host->int_spinlock);
498 
499     if (rx_done) {
500         spi_slave_hd_append_rx_isr(arg);
501     }
502     if (tx_done) {
503         spi_slave_hd_append_tx_isr(arg);
504     }
505 }
506 
get_ret_queue_result(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t ** out_trans,TickType_t timeout)507 static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
508 {
509     spi_slave_hd_slot_t *host = spihost[host_id];
510     spi_slave_hd_data_t *trans;
511     BaseType_t ret;
512 
513     if (chan == SPI_SLAVE_CHAN_TX) {
514         ret = xQueueReceive(host->tx_ret_queue, &trans, timeout);
515     } else {
516         ret = xQueueReceive(host->rx_ret_queue, &trans, timeout);
517     }
518     if (ret == pdFALSE) {
519         return ESP_ERR_TIMEOUT;
520     }
521 
522     *out_trans = trans;
523     return ESP_OK;
524 }
525 
526 //---------------------------------------------------------Segment Mode Transaction APIs-----------------------------------------------------------//
spi_slave_hd_queue_trans(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t * trans,TickType_t timeout)527 esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, TickType_t timeout)
528 {
529     spi_slave_hd_slot_t *host = spihost[host_id];
530 
531     SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
532     SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
533     SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
534     SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
535 
536     if (chan == SPI_SLAVE_CHAN_TX) {
537         BaseType_t ret = xQueueSend(host->tx_trans_queue, &trans, timeout);
538         if (ret == pdFALSE) {
539             return ESP_ERR_TIMEOUT;
540         }
541         tx_invoke(host);
542     } else { //chan == SPI_SLAVE_CHAN_RX
543         BaseType_t ret = xQueueSend(host->rx_trans_queue, &trans, timeout);
544         if (ret == pdFALSE) {
545             return ESP_ERR_TIMEOUT;
546         }
547         rx_invoke(host);
548     }
549     return ESP_OK;
550 }
551 
spi_slave_hd_get_trans_res(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t ** out_trans,TickType_t timeout)552 esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
553 {
554     esp_err_t ret;
555     spi_slave_hd_slot_t *host = spihost[host_id];
556 
557     SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
558     SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
559     ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
560 
561     return ret;
562 }
563 
spi_slave_hd_read_buffer(spi_host_device_t host_id,int addr,uint8_t * out_data,size_t len)564 void spi_slave_hd_read_buffer(spi_host_device_t host_id, int addr, uint8_t *out_data, size_t len)
565 {
566     spi_slave_hd_hal_read_buffer(&spihost[host_id]->hal, addr, out_data, len);
567 }
568 
spi_slave_hd_write_buffer(spi_host_device_t host_id,int addr,uint8_t * data,size_t len)569 void spi_slave_hd_write_buffer(spi_host_device_t host_id, int addr, uint8_t *data, size_t len)
570 {
571     spi_slave_hd_hal_write_buffer(&spihost[host_id]->hal, addr, data, len);
572 }
573 
574 //---------------------------------------------------------Append Mode Transaction APIs-----------------------------------------------------------//
spi_slave_hd_append_trans(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t * trans,TickType_t timeout)575 esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, TickType_t timeout)
576 {
577     esp_err_t err;
578     spi_slave_hd_slot_t *host = spihost[host_id];
579     spi_slave_hd_hal_context_t *hal = &host->hal;
580 
581     SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG);
582     SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
583     SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
584     SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
585     SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
586 
587     if (chan == SPI_SLAVE_CHAN_TX) {
588         BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout);
589         if (ret == pdFALSE) {
590             return ESP_ERR_TIMEOUT;
591         }
592         err = spi_slave_hd_hal_txdma_append(hal, trans->data, trans->len, trans);
593     } else {
594         BaseType_t ret = xSemaphoreTake(host->rx_cnting_sem, timeout);
595         if (ret == pdFALSE) {
596             return ESP_ERR_TIMEOUT;
597         }
598         err = spi_slave_hd_hal_rxdma_append(hal, trans->data, trans->len, trans);
599     }
600     if (err != ESP_OK) {
601         ESP_LOGE(TAG, "Wait until the DMA finishes its transaction");
602     }
603 
604     return err;
605 }
606 
spi_slave_hd_get_append_trans_res(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t ** out_trans,TickType_t timeout)607 esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
608 {
609     esp_err_t ret;
610     spi_slave_hd_slot_t *host = spihost[host_id];
611 
612     SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
613     SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
614     ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
615 
616     return ret;
617 }
618