1 /*
2 * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <string.h>
8 #include "esp_types.h"
9 #include "esp_attr.h"
10 #include "esp_check.h"
11 #include "esp_intr_alloc.h"
12 #include "esp_log.h"
13 #include "esp_err.h"
14 #include "esp_pm.h"
15 #include "esp_heap_caps.h"
16 #include "esp_rom_sys.h"
17 #include "soc/lldesc.h"
18 #include "soc/soc_caps.h"
19 #include "soc/spi_periph.h"
20 #include "soc/soc_memory_layout.h"
21 #include "freertos/FreeRTOS.h"
22 #include "freertos/semphr.h"
23 #include "freertos/task.h"
24 #include "sdkconfig.h"
25
26 #include "driver/gpio.h"
27 #include "driver/spi_slave.h"
28 #include "hal/gpio_hal.h"
29 #include "hal/spi_slave_hal.h"
30 #include "esp_private/spi_slave_internal.h"
31 #include "esp_private/spi_common_internal.h"
32
33
34 static const char *SPI_TAG = "spi_slave";
35
36 #define SPI_CHECK(a, str, ret_val) ESP_RETURN_ON_FALSE(a, ret_val, SPI_TAG, str)
37
38
39 #ifdef CONFIG_SPI_SLAVE_ISR_IN_IRAM
40 #define SPI_SLAVE_ISR_ATTR IRAM_ATTR
41 #else
42 #define SPI_SLAVE_ISR_ATTR
43 #endif
44
45 #ifdef CONFIG_SPI_SLAVE_IN_IRAM
46 #define SPI_SLAVE_ATTR IRAM_ATTR
47 #else
48 #define SPI_SLAVE_ATTR
49 #endif
50
51 typedef struct {
52 int id;
53 spi_bus_config_t bus_config;
54 spi_slave_interface_config_t cfg;
55 intr_handle_t intr;
56 spi_slave_hal_context_t hal;
57 spi_slave_transaction_t *cur_trans;
58 uint32_t flags;
59 uint32_t intr_flags;
60 int max_transfer_sz;
61 QueueHandle_t trans_queue;
62 QueueHandle_t ret_queue;
63 bool dma_enabled;
64 bool cs_iomux;
65 uint8_t cs_in_signal;
66 uint32_t tx_dma_chan;
67 uint32_t rx_dma_chan;
68 #ifdef CONFIG_PM_ENABLE
69 esp_pm_lock_handle_t pm_lock;
70 #endif
71 } spi_slave_t;
72
73 static spi_slave_t *spihost[SOC_SPI_PERIPH_NUM];
74
75 static void spi_intr(void *arg);
76
77 __attribute__((always_inline))
is_valid_host(spi_host_device_t host)78 static inline bool is_valid_host(spi_host_device_t host)
79 {
80 //SPI1 can be used as GPSPI only on ESP32
81 #if CONFIG_IDF_TARGET_ESP32
82 return host >= SPI1_HOST && host <= SPI3_HOST;
83 #elif (SOC_SPI_PERIPH_NUM == 2)
84 return host == SPI2_HOST;
85 #elif (SOC_SPI_PERIPH_NUM == 3)
86 return host >= SPI2_HOST && host <= SPI3_HOST;
87 #endif
88 }
89
bus_is_iomux(spi_slave_t * host)90 static inline bool SPI_SLAVE_ISR_ATTR bus_is_iomux(spi_slave_t *host)
91 {
92 return host->flags&SPICOMMON_BUSFLAG_IOMUX_PINS;
93 }
94
freeze_cs(spi_slave_t * host)95 static void SPI_SLAVE_ISR_ATTR freeze_cs(spi_slave_t *host)
96 {
97 esp_rom_gpio_connect_in_signal(GPIO_MATRIX_CONST_ONE_INPUT, host->cs_in_signal, false);
98 }
99
100 // Use this function instead of cs_initial to avoid overwrite the output config
101 // This is used in test by internal gpio matrix connections
restore_cs(spi_slave_t * host)102 static inline void SPI_SLAVE_ISR_ATTR restore_cs(spi_slave_t *host)
103 {
104 if (host->cs_iomux) {
105 gpio_ll_iomux_in(GPIO_HAL_GET_HW(GPIO_PORT_0), host->cfg.spics_io_num, host->cs_in_signal);
106 } else {
107 esp_rom_gpio_connect_in_signal(host->cfg.spics_io_num, host->cs_in_signal, false);
108 }
109 }
110
111 #if (SOC_CPU_CORES_NUM > 1) && (!CONFIG_FREERTOS_UNICORE)
112 typedef struct {
113 spi_slave_t *host;
114 esp_err_t *err;
115 } spi_ipc_param_t;
116
ipc_isr_reg_to_core(void * args)117 static void ipc_isr_reg_to_core(void *args)
118 {
119 spi_slave_t *host = ((spi_ipc_param_t *)args)->host;
120 *((spi_ipc_param_t *)args)->err = esp_intr_alloc(spicommon_irqsource_for_host(host->id), host->intr_flags | ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void *)host, &host->intr);
121 }
122 #endif
123
spi_slave_initialize(spi_host_device_t host,const spi_bus_config_t * bus_config,const spi_slave_interface_config_t * slave_config,spi_dma_chan_t dma_chan)124 esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *bus_config, const spi_slave_interface_config_t *slave_config, spi_dma_chan_t dma_chan)
125 {
126 bool spi_chan_claimed;
127 uint32_t actual_tx_dma_chan = 0;
128 uint32_t actual_rx_dma_chan = 0;
129 esp_err_t ret = ESP_OK;
130 esp_err_t err;
131 SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
132 #ifdef CONFIG_IDF_TARGET_ESP32
133 SPI_CHECK(dma_chan >= SPI_DMA_DISABLED && dma_chan <= SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG );
134 #elif CONFIG_IDF_TARGET_ESP32S2
135 SPI_CHECK( dma_chan == SPI_DMA_DISABLED || dma_chan == (int)host || dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG );
136 #elif SOC_GDMA_SUPPORTED
137 SPI_CHECK( dma_chan == SPI_DMA_DISABLED || dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG );
138 #endif
139 SPI_CHECK((bus_config->intr_flags & (ESP_INTR_FLAG_HIGH|ESP_INTR_FLAG_EDGE|ESP_INTR_FLAG_INTRDISABLED))==0, "intr flag not allowed", ESP_ERR_INVALID_ARG);
140 #ifndef CONFIG_SPI_SLAVE_ISR_IN_IRAM
141 SPI_CHECK((bus_config->intr_flags & ESP_INTR_FLAG_IRAM)==0, "ESP_INTR_FLAG_IRAM should be disabled when CONFIG_SPI_SLAVE_ISR_IN_IRAM is not set.", ESP_ERR_INVALID_ARG);
142 #endif
143 SPI_CHECK(slave_config->spics_io_num < 0 || GPIO_IS_VALID_GPIO(slave_config->spics_io_num), "spics pin invalid", ESP_ERR_INVALID_ARG);
144
145 //Check post_trans_cb status when `SPI_SLAVE_NO_RETURN_RESULT` flag is set.
146 if(slave_config->flags & SPI_SLAVE_NO_RETURN_RESULT) {
147 SPI_CHECK(slave_config->post_trans_cb != NULL, "use feature flag 'SPI_SLAVE_NO_RETURN_RESULT' but no post_trans_cb function sets", ESP_ERR_INVALID_ARG);
148 }
149
150 spi_chan_claimed=spicommon_periph_claim(host, "spi slave");
151 SPI_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE);
152
153 spihost[host] = malloc(sizeof(spi_slave_t));
154 if (spihost[host] == NULL) {
155 ret = ESP_ERR_NO_MEM;
156 goto cleanup;
157 }
158 memset(spihost[host], 0, sizeof(spi_slave_t));
159 memcpy(&spihost[host]->cfg, slave_config, sizeof(spi_slave_interface_config_t));
160 memcpy(&spihost[host]->bus_config, bus_config, sizeof(spi_bus_config_t));
161 spihost[host]->id = host;
162
163 bool use_dma = (dma_chan != SPI_DMA_DISABLED);
164 spihost[host]->dma_enabled = use_dma;
165 if (use_dma) {
166 ret = spicommon_dma_chan_alloc(host, dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan);
167 if (ret != ESP_OK) {
168 goto cleanup;
169 }
170 }
171
172 err = spicommon_bus_initialize_io(host, bus_config, SPICOMMON_BUSFLAG_SLAVE|bus_config->flags, &spihost[host]->flags);
173 if (err!=ESP_OK) {
174 ret = err;
175 goto cleanup;
176 }
177 if (slave_config->spics_io_num >= 0) {
178 spicommon_cs_initialize(host, slave_config->spics_io_num, 0, !bus_is_iomux(spihost[host]));
179 // check and save where cs line really route through
180 spihost[host]->cs_iomux = (slave_config->spics_io_num == spi_periph_signal[host].spics0_iomux_pin) && bus_is_iomux(spihost[host]);
181 spihost[host]->cs_in_signal = spi_periph_signal[host].spics_in;
182 }
183
184 // The slave DMA suffers from unexpected transactions. Forbid reading if DMA is enabled by disabling the CS line.
185 if (use_dma) freeze_cs(spihost[host]);
186
187 int dma_desc_ct = 0;
188 spihost[host]->tx_dma_chan = actual_tx_dma_chan;
189 spihost[host]->rx_dma_chan = actual_rx_dma_chan;
190 if (use_dma) {
191 //See how many dma descriptors we need and allocate them
192 dma_desc_ct = (bus_config->max_transfer_sz + SPI_MAX_DMA_LEN - 1) / SPI_MAX_DMA_LEN;
193 if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given
194 spihost[host]->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN;
195 } else {
196 //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
197 spihost[host]->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
198 }
199 #ifdef CONFIG_PM_ENABLE
200 err = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_slave",
201 &spihost[host]->pm_lock);
202 if (err != ESP_OK) {
203 ret = err;
204 goto cleanup;
205 }
206 // Lock APB frequency while SPI slave driver is in use
207 esp_pm_lock_acquire(spihost[host]->pm_lock);
208 #endif //CONFIG_PM_ENABLE
209
210 //Create queues
211 spihost[host]->trans_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *));
212 if (!spihost[host]->trans_queue) {
213 ret = ESP_ERR_NO_MEM;
214 goto cleanup;
215 }
216 if(!(slave_config->flags & SPI_SLAVE_NO_RETURN_RESULT)) {
217 spihost[host]->ret_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *));
218 if (!spihost[host]->ret_queue) {
219 ret = ESP_ERR_NO_MEM;
220 goto cleanup;
221 }
222 }
223
224 #if (SOC_CPU_CORES_NUM > 1) && (!CONFIG_FREERTOS_UNICORE)
225 if(bus_config->isr_cpu_id > INTR_CPU_ID_AUTO) {
226 spihost[host]->intr_flags = bus_config->intr_flags;
227 SPI_CHECK(bus_config->isr_cpu_id <= INTR_CPU_ID_1, "invalid core id", ESP_ERR_INVALID_ARG);
228 spi_ipc_param_t ipc_args = {
229 .host = spihost[host],
230 .err = &err,
231 };
232 esp_ipc_call_blocking(INTR_CPU_CONVERT_ID(bus_config->isr_cpu_id), ipc_isr_reg_to_core, (void *)&ipc_args);
233 } else
234 #endif
235 {
236 err = esp_intr_alloc(spicommon_irqsource_for_host(host), bus_config->intr_flags | ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void *)spihost[host], &spihost[host]->intr);
237 }
238 if (err != ESP_OK) {
239 ret = err;
240 goto cleanup;
241 }
242
243 spi_slave_hal_context_t *hal = &spihost[host]->hal;
244 //assign the SPI, RX DMA and TX DMA peripheral registers beginning address
245 spi_slave_hal_config_t hal_config = {
246 .host_id = host,
247 .dma_in = SPI_LL_GET_HW(host),
248 .dma_out = SPI_LL_GET_HW(host)
249 };
250 spi_slave_hal_init(hal, &hal_config);
251
252 if (dma_desc_ct) {
253 hal->dmadesc_tx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
254 hal->dmadesc_rx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
255 if (!hal->dmadesc_tx || !hal->dmadesc_rx) {
256 ret = ESP_ERR_NO_MEM;
257 goto cleanup;
258 }
259 }
260 hal->dmadesc_n = dma_desc_ct;
261 hal->rx_lsbfirst = (slave_config->flags & SPI_SLAVE_RXBIT_LSBFIRST) ? 1 : 0;
262 hal->tx_lsbfirst = (slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST) ? 1 : 0;
263 hal->mode = slave_config->mode;
264 hal->use_dma = use_dma;
265 hal->tx_dma_chan = actual_tx_dma_chan;
266 hal->rx_dma_chan = actual_rx_dma_chan;
267
268 spi_slave_hal_setup_device(hal);
269
270 return ESP_OK;
271
272 cleanup:
273 if (spihost[host]) {
274 if (spihost[host]->trans_queue) vQueueDelete(spihost[host]->trans_queue);
275 if (spihost[host]->ret_queue) vQueueDelete(spihost[host]->ret_queue);
276 free(spihost[host]->hal.dmadesc_tx);
277 free(spihost[host]->hal.dmadesc_rx);
278 #ifdef CONFIG_PM_ENABLE
279 if (spihost[host]->pm_lock) {
280 esp_pm_lock_release(spihost[host]->pm_lock);
281 esp_pm_lock_delete(spihost[host]->pm_lock);
282 }
283 #endif
284 }
285 spi_slave_hal_deinit(&spihost[host]->hal);
286 if (spihost[host]->dma_enabled) {
287 spicommon_dma_chan_free(host);
288 }
289
290 free(spihost[host]);
291 spihost[host] = NULL;
292 spicommon_periph_free(host);
293
294 return ret;
295 }
296
spi_slave_free(spi_host_device_t host)297 esp_err_t spi_slave_free(spi_host_device_t host)
298 {
299 SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
300 SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
301 if (spihost[host]->trans_queue) vQueueDelete(spihost[host]->trans_queue);
302 if (spihost[host]->ret_queue) vQueueDelete(spihost[host]->ret_queue);
303 if (spihost[host]->dma_enabled) {
304 spicommon_dma_chan_free(host);
305 }
306 spicommon_bus_free_io_cfg(&spihost[host]->bus_config);
307 free(spihost[host]->hal.dmadesc_tx);
308 free(spihost[host]->hal.dmadesc_rx);
309 esp_intr_free(spihost[host]->intr);
310 #ifdef CONFIG_PM_ENABLE
311 esp_pm_lock_release(spihost[host]->pm_lock);
312 esp_pm_lock_delete(spihost[host]->pm_lock);
313 #endif //CONFIG_PM_ENABLE
314 free(spihost[host]);
315 spihost[host] = NULL;
316 spicommon_periph_free(host);
317 return ESP_OK;
318 }
319
320 /**
321 * @note
322 * This API is used to reset SPI Slave transaction queue. After calling this function:
323 * - The SPI Slave transaction queue will be reset.
324 *
325 * Therefore, this API shouldn't be called when the corresponding SPI Master is doing an SPI transaction.
326 *
327 * @note
328 * We don't actually need to enter a critical section here.
329 * SPI Slave ISR will only get triggered when its corresponding SPI Master's transaction is done.
330 * As we don't expect this function to be called when its corresponding SPI Master is doing an SPI transaction,
331 * so concurrent call to these registers won't happen
332 *
333 */
spi_slave_queue_reset(spi_host_device_t host)334 esp_err_t SPI_SLAVE_ATTR spi_slave_queue_reset(spi_host_device_t host)
335 {
336 SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
337 SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
338
339 esp_intr_disable(spihost[host]->intr);
340 spi_ll_set_int_stat(spihost[host]->hal.hw);
341
342 spihost[host]->cur_trans = NULL;
343 xQueueReset(spihost[host]->trans_queue);
344
345 return ESP_OK;
346 }
347
spi_slave_queue_reset_isr(spi_host_device_t host)348 esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_queue_reset_isr(spi_host_device_t host)
349 {
350 ESP_RETURN_ON_FALSE_ISR(is_valid_host(host), ESP_ERR_INVALID_ARG, SPI_TAG, "invalid host");
351 ESP_RETURN_ON_FALSE_ISR(spihost[host], ESP_ERR_INVALID_ARG, SPI_TAG, "host not slave");
352
353 spi_slave_transaction_t *trans = NULL;
354 BaseType_t do_yield = pdFALSE;
355 while( pdFALSE == xQueueIsQueueEmptyFromISR(spihost[host]->trans_queue)) {
356 xQueueReceiveFromISR(spihost[host]->trans_queue, &trans, &do_yield);
357 }
358 if (do_yield) {
359 portYIELD_FROM_ISR();
360 }
361
362 spihost[host]->cur_trans = NULL;
363 return ESP_OK;
364 }
365
spi_slave_queue_trans(spi_host_device_t host,const spi_slave_transaction_t * trans_desc,TickType_t ticks_to_wait)366 esp_err_t SPI_SLAVE_ATTR spi_slave_queue_trans(spi_host_device_t host, const spi_slave_transaction_t *trans_desc, TickType_t ticks_to_wait)
367 {
368 BaseType_t r;
369 SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
370 SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
371 SPI_CHECK(spihost[host]->dma_enabled == 0 || trans_desc->tx_buffer==NULL || esp_ptr_dma_capable(trans_desc->tx_buffer),
372 "txdata not in DMA-capable memory", ESP_ERR_INVALID_ARG);
373 SPI_CHECK(spihost[host]->dma_enabled == 0 || trans_desc->rx_buffer==NULL ||
374 (esp_ptr_dma_capable(trans_desc->rx_buffer) && esp_ptr_word_aligned(trans_desc->rx_buffer) &&
375 (trans_desc->length%4==0)),
376 "rxdata not in DMA-capable memory or not WORD aligned", ESP_ERR_INVALID_ARG);
377
378 SPI_CHECK(trans_desc->length <= spihost[host]->max_transfer_sz * 8, "data transfer > host maximum", ESP_ERR_INVALID_ARG);
379 r = xQueueSend(spihost[host]->trans_queue, (void *)&trans_desc, ticks_to_wait);
380 if (!r) return ESP_ERR_TIMEOUT;
381 esp_intr_enable(spihost[host]->intr);
382 return ESP_OK;
383 }
384
spi_slave_queue_trans_isr(spi_host_device_t host,const spi_slave_transaction_t * trans_desc)385 esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_queue_trans_isr(spi_host_device_t host, const spi_slave_transaction_t *trans_desc)
386 {
387 BaseType_t r;
388 BaseType_t do_yield = pdFALSE;
389 ESP_RETURN_ON_FALSE_ISR(is_valid_host(host), ESP_ERR_INVALID_ARG, SPI_TAG, "invalid host");
390 ESP_RETURN_ON_FALSE_ISR(spihost[host], ESP_ERR_INVALID_ARG, SPI_TAG, "host not slave");
391 ESP_RETURN_ON_FALSE_ISR(spihost[host]->dma_enabled == 0 || trans_desc->tx_buffer==NULL || esp_ptr_dma_capable(trans_desc->tx_buffer),
392 ESP_ERR_INVALID_ARG, SPI_TAG, "txdata not in DMA-capable memory");
393 ESP_RETURN_ON_FALSE_ISR(spihost[host]->dma_enabled == 0 || trans_desc->rx_buffer==NULL ||
394 (esp_ptr_dma_capable(trans_desc->rx_buffer) && esp_ptr_word_aligned(trans_desc->rx_buffer) &&
395 (trans_desc->length%4==0)),
396 ESP_ERR_INVALID_ARG, SPI_TAG, "rxdata not in DMA-capable memory or not WORD aligned");
397 ESP_RETURN_ON_FALSE_ISR(trans_desc->length <= spihost[host]->max_transfer_sz * 8, ESP_ERR_INVALID_ARG, SPI_TAG, "data transfer > host maximum");
398
399 r = xQueueSendFromISR(spihost[host]->trans_queue, (void *)&trans_desc, &do_yield);
400 if (!r) {
401 return ESP_ERR_NO_MEM;
402 }
403 if (do_yield) {
404 portYIELD_FROM_ISR();
405 }
406 return ESP_OK;
407 }
408
spi_slave_get_trans_result(spi_host_device_t host,spi_slave_transaction_t ** trans_desc,TickType_t ticks_to_wait)409 esp_err_t SPI_SLAVE_ATTR spi_slave_get_trans_result(spi_host_device_t host, spi_slave_transaction_t **trans_desc, TickType_t ticks_to_wait)
410 {
411 BaseType_t r;
412 SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
413 SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
414 //if SPI_SLAVE_NO_RETURN_RESULT is set, ret_queue will always be empty
415 SPI_CHECK(!(spihost[host]->cfg.flags & SPI_SLAVE_NO_RETURN_RESULT), "API not Supported!", ESP_ERR_NOT_SUPPORTED);
416
417 r = xQueueReceive(spihost[host]->ret_queue, (void *)trans_desc, ticks_to_wait);
418 if (!r) return ESP_ERR_TIMEOUT;
419 return ESP_OK;
420 }
421
422
spi_slave_transmit(spi_host_device_t host,spi_slave_transaction_t * trans_desc,TickType_t ticks_to_wait)423 esp_err_t SPI_SLAVE_ATTR spi_slave_transmit(spi_host_device_t host, spi_slave_transaction_t *trans_desc, TickType_t ticks_to_wait)
424 {
425 esp_err_t ret;
426 spi_slave_transaction_t *ret_trans;
427 //ToDo: check if any spi transfers in flight
428 ret = spi_slave_queue_trans(host, trans_desc, ticks_to_wait);
429 if (ret != ESP_OK) return ret;
430 ret = spi_slave_get_trans_result(host, &ret_trans, ticks_to_wait);
431 if (ret != ESP_OK) return ret;
432 assert(ret_trans == trans_desc);
433 return ESP_OK;
434 }
435
436 #if CONFIG_IDF_TARGET_ESP32
spi_slave_restart_after_dmareset(void * arg)437 static void SPI_SLAVE_ISR_ATTR spi_slave_restart_after_dmareset(void *arg)
438 {
439 spi_slave_t *host = (spi_slave_t *)arg;
440 esp_intr_enable(host->intr);
441 }
442 #endif //#if CONFIG_IDF_TARGET_ESP32
443
444 //This is run in interrupt context and apart from initialization and destruction, this is the only code
445 //touching the host (=spihost[x]) variable. The rest of the data arrives in queues. That is why there are
446 //no muxes in this code.
spi_intr(void * arg)447 static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg)
448 {
449 BaseType_t r;
450 BaseType_t do_yield = pdFALSE;
451 spi_slave_transaction_t *trans = NULL;
452 spi_slave_t *host = (spi_slave_t *)arg;
453 spi_slave_hal_context_t *hal = &host->hal;
454
455 assert(spi_slave_hal_usr_is_done(hal));
456
457 bool use_dma = host->dma_enabled;
458 if (host->cur_trans) {
459 // When DMA is enabled, the slave rx dma suffers from unexpected transactions. Forbid reading until transaction ready.
460 if (use_dma) freeze_cs(host);
461
462 spi_slave_hal_store_result(hal);
463 host->cur_trans->trans_len = spi_slave_hal_get_rcv_bitlen(hal);
464
465 #if CONFIG_IDF_TARGET_ESP32
466 //This workaround is only for esp32
467 if (spi_slave_hal_dma_need_reset(hal)) {
468 //On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
469 spicommon_dmaworkaround_req_reset(host->tx_dma_chan, spi_slave_restart_after_dmareset, host);
470 }
471 #endif //#if CONFIG_IDF_TARGET_ESP32
472
473 if (host->cfg.post_trans_cb) host->cfg.post_trans_cb(host->cur_trans);
474
475 if(!(host->cfg.flags & SPI_SLAVE_NO_RETURN_RESULT)) {
476 xQueueSendFromISR(host->ret_queue, &host->cur_trans, &do_yield);
477 }
478 host->cur_trans = NULL;
479 }
480
481 #if CONFIG_IDF_TARGET_ESP32
482 //This workaround is only for esp32
483 if (use_dma) {
484 //On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
485 spicommon_dmaworkaround_idle(host->tx_dma_chan);
486 if (spicommon_dmaworkaround_reset_in_progress()) {
487 //We need to wait for the reset to complete. Disable int (will be re-enabled on reset callback) and exit isr.
488 esp_intr_disable(host->intr);
489 if (do_yield) portYIELD_FROM_ISR();
490 return;
491 }
492 }
493 #endif //#if CONFIG_IDF_TARGET_ESP32
494
495 //Disable interrupt before checking to avoid concurrency issue.
496 esp_intr_disable(host->intr);
497 //Grab next transaction
498 r = xQueueReceiveFromISR(host->trans_queue, &trans, &do_yield);
499 if (r) {
500 // sanity check
501 assert(trans);
502
503 //enable the interrupt again if there is packet to send
504 esp_intr_enable(host->intr);
505
506 //We have a transaction. Send it.
507 host->cur_trans = trans;
508
509 hal->bitlen = trans->length;
510 hal->rx_buffer = trans->rx_buffer;
511 hal->tx_buffer = trans->tx_buffer;
512
513 #if CONFIG_IDF_TARGET_ESP32
514 if (use_dma) {
515 //This workaround is only for esp32
516 //On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
517 spicommon_dmaworkaround_transfer_active(host->tx_dma_chan);
518 }
519 #endif //#if CONFIG_IDF_TARGET_ESP32
520
521 spi_slave_hal_prepare_data(hal);
522
523 //The slave rx dma get disturbed by unexpected transaction. Only connect the CS when slave is ready.
524 if (use_dma) {
525 restore_cs(host);
526 }
527
528 //Kick off transfer
529 spi_slave_hal_user_start(hal);
530 if (host->cfg.post_setup_cb) host->cfg.post_setup_cb(trans);
531 }
532 if (do_yield) portYIELD_FROM_ISR();
533 }
534