1 /*
2  * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8 Architecture:
9 
10 We can initialize a SPI driver, but we don't talk to the SPI driver itself, we address a device. A device essentially
11 is a combination of SPI port and CS pin, plus some information about the specifics of communication to the device
12 (timing, command/address length etc). The arbitration between tasks is also in conception of devices.
13 
14 A device can work in interrupt mode and polling mode, and a third but
15 complicated mode which combines the two modes above:
16 
17 1. Work in the ISR with a set of queues; one per device.
18 
19    The idea is that to send something to a SPI device, you allocate a
20    transaction descriptor. It contains some information about the transfer
21    like the length, address, command etc, plus pointers to transmit and
22    receive buffer. The address of this block gets pushed into the transmit
23    queue. The SPI driver does its magic, and sends and retrieves the data
24    eventually. The data gets written to the receive buffers, if needed the
25    transaction descriptor is modified to indicate returned parameters and
26    the entire thing goes into the return queue, where whatever software
27    initiated the transaction can retrieve it.
28 
29    The entire thing is run from the SPI interrupt handler. If SPI is done
30    transmitting/receiving but nothing is in the queue, it will not clear the
31    SPI interrupt but just disable it by esp_intr_disable. This way, when a
32    new thing is sent, pushing the packet into the send queue and re-enabling
33    the interrupt (by esp_intr_enable) will trigger the interrupt again, which
34    can then take care of the sending.
35 
36 2. Work in the polling mode in the task.
37 
38    In this mode we get rid of the ISR, FreeRTOS queue and task switching, the
39    task is no longer blocked during a transaction. This increase the cpu
40    load, but decrease the interval of SPI transactions. Each time only one
41    device (in one task) can send polling transactions, transactions to
42    other devices are blocked until the polling transaction of current device
43    is done.
44 
45    In the polling mode, the queue is not used, all the operations are done
46    in the task. The task calls ``spi_device_polling_start`` to setup and start
47    a new transaction, then call ``spi_device_polling_end`` to handle the
48    return value of the transaction.
49 
50    To handle the arbitration among devices, the device "temporarily" acquire
51    a bus by the ``device_acquire_bus_internal`` function, which writes
52    dev_request by CAS operation. Other devices which wants to send polling
53    transactions but don't own the bus will block and wait until given the
54    semaphore which indicates the ownership of bus.
55 
56    In case of the ISR is still sending transactions to other devices, the ISR
57    should maintain an ``random_idle`` flag indicating that it's not doing
58    transactions. When the bus is locked, the ISR can only send new
59    transactions to the acquiring device. The ISR will automatically disable
60    itself and send semaphore to the device if the ISR is free. If the device
61    sees the random_idle flag, it can directly start its polling transaction.
62    Otherwise it should block and wait for the semaphore from the ISR.
63 
64    After the polling transaction, the driver will release the bus. During the
65    release of the bus, the driver search all other devices to see whether
66    there is any device waiting to acquire the bus, if so, acquire for it and
67    send it a semaphore if the device queue is empty, or invoke the ISR for
68    it. If all other devices don't need to acquire the bus, but there are
69    still transactions in the queues, the ISR will also be invoked.
70 
71    To get better polling efficiency, user can call ``spi_device_acquire_bus``
72    function, which also calls the ``spi_bus_lock_acquire_core`` function,
73    before a series of polling transactions to a device. The bus acquiring and
74    task switching before and after the polling transaction will be escaped.
75 
76 3. Mixed mode
77 
78    The driver is written under the assumption that polling and interrupt
79    transactions are not happening simultaneously. When sending polling
80    transactions, it will check whether the ISR is active, which includes the
81    case the ISR is sending the interrupt transactions of the acquiring
82    device. If the ISR is still working, the routine sending a polling
83    transaction will get blocked and wait until the semaphore from the ISR
84    which indicates the ISR is free now.
85 
86    A fatal case is, a polling transaction is in flight, but the ISR received
87    an interrupt transaction. The behavior of the driver is unpredictable,
88    which should be strictly forbidden.
89 
90 We have two bits to control the interrupt:
91 
92 1. The slave->trans_done bit, which is automatically asserted when a transaction is done.
93 
94    This bit is cleared during an interrupt transaction, so that the interrupt
95    will be triggered when the transaction is done, or the SW can check the
96    bit to see if the transaction is done for polling transactions.
97 
98    When no transaction is in-flight, the bit is kept active, so that the SW
99    can easily invoke the ISR by enable the interrupt.
100 
101 2. The system interrupt enable/disable, controlled by esp_intr_enable and esp_intr_disable.
102 
103    The interrupt is disabled (by the ISR itself) when no interrupt transaction
104    is queued. When the bus is not occupied, any task, which queues a
105    transaction into the queue, will enable the interrupt to invoke the ISR.
106    When the bus is occupied by a device, other device will put off the
107    invoking of ISR to the moment when the bus is released. The device
108    acquiring the bus can still send interrupt transactions by enable the
109    interrupt.
110 
111 */
112 
113 #include <string.h>
114 #include "driver/spi_common_internal.h"
115 #include "driver/spi_master.h"
116 
117 #include "esp_log.h"
118 #include "freertos/task.h"
119 #include "freertos/queue.h"
120 #include "soc/soc_memory_layout.h"
121 #include "driver/gpio.h"
122 #include "hal/spi_hal.h"
123 #include "esp_heap_caps.h"
124 
125 
126 typedef struct spi_device_t spi_device_t;
127 
128 /// struct to hold private transaction data (like tx and rx buffer for DMA).
129 typedef struct {
130     spi_transaction_t   *trans;
131     const uint32_t *buffer_to_send;   //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is;
132                                 //otherwise sets to the original buffer or NULL if no buffer is assigned.
133     uint32_t *buffer_to_rcv;    // similar to buffer_to_send
134 } spi_trans_priv_t;
135 
136 typedef struct {
137     int id;
138     spi_device_t* device[DEV_NUM_MAX];
139     intr_handle_t intr;
140     spi_hal_context_t hal;
141     spi_trans_priv_t cur_trans_buf;
142     int cur_cs;     //current device doing transaction
143     const spi_bus_attr_t* bus_attr;
144 
145     /**
146      * the bus is permanently controlled by a device until `spi_bus_release_bus`` is called. Otherwise
147      * the acquiring of SPI bus will be freed when `spi_device_polling_end` is called.
148      */
149     spi_device_t* device_acquiring_lock;
150 
151 //debug information
152     bool polling;   //in process of a polling, avoid of queue new transactions into ISR
153 } spi_host_t;
154 
155 struct spi_device_t {
156     int id;
157     QueueHandle_t trans_queue;
158     QueueHandle_t ret_queue;
159     spi_device_interface_config_t cfg;
160     spi_hal_dev_config_t hal_dev;
161     spi_host_t *host;
162     spi_bus_lock_dev_handle_t dev_lock;
163 };
164 
165 static spi_host_t* bus_driver_ctx[SOC_SPI_PERIPH_NUM] = {};
166 
167 static const char *SPI_TAG = "spi_master";
168 #define SPI_CHECK(a, str, ret_val, ...) \
169     if (unlikely(!(a))) { \
170         ESP_LOGE(SPI_TAG,"%s(%d): "str, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
171         return (ret_val); \
172     }
173 
174 
175 static void spi_intr(void *arg);
176 static void spi_bus_intr_enable(void *host);
177 static void spi_bus_intr_disable(void *host);
178 
179 static esp_err_t spi_master_deinit_driver(void* arg);
180 
is_valid_host(spi_host_device_t host)181 static inline bool is_valid_host(spi_host_device_t host)
182 {
183 //SPI1 can be used as GPSPI only on ESP32
184 #if CONFIG_IDF_TARGET_ESP32
185     return host >= SPI1_HOST && host <= SPI3_HOST;
186 #elif (SOC_SPI_PERIPH_NUM == 2)
187     return host == SPI2_HOST;
188 #elif (SOC_SPI_PERIPH_NUM == 3)
189     return host >= SPI2_HOST && host <= SPI3_HOST;
190 #endif
191 }
192 
193 // Should be called before any devices are actually registered or used.
194 // Currently automatically called after `spi_bus_initialize()` and when first device is registered.
spi_master_init_driver(spi_host_device_t host_id)195 static esp_err_t spi_master_init_driver(spi_host_device_t host_id)
196 {
197     esp_err_t err = ESP_OK;
198 
199     const spi_bus_attr_t* bus_attr = spi_bus_get_attr(host_id);
200     SPI_CHECK(bus_attr != NULL, "host_id not initialized", ESP_ERR_INVALID_STATE);
201     SPI_CHECK(bus_attr->lock != NULL, "SPI Master cannot attach to bus. (Check CONFIG_SPI_FLASH_SHARE_SPI1_BUS)", ESP_ERR_INVALID_ARG);
202     // spihost contains atomic variables, which should not be put in PSRAM
203     spi_host_t* host = heap_caps_malloc(sizeof(spi_host_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
204     if (host == NULL) {
205         err = ESP_ERR_NO_MEM;
206         goto cleanup;
207     }
208 
209     *host = (spi_host_t) {
210         .id = host_id,
211         .cur_cs = DEV_NUM_MAX,
212         .polling = false,
213         .device_acquiring_lock = NULL,
214         .bus_attr = bus_attr,
215     };
216 
217     if (host_id != SPI1_HOST) {
218         // interrupts are not allowed on SPI1 bus
219         err = esp_intr_alloc(spicommon_irqsource_for_host(host_id),
220                             bus_attr->bus_cfg.intr_flags | ESP_INTR_FLAG_INTRDISABLED,
221                             spi_intr, host, &host->intr);
222         if (err != ESP_OK) {
223             goto cleanup;
224         }
225     }
226 
227     //assign the SPI, RX DMA and TX DMA peripheral registers beginning address
228     spi_hal_config_t hal_config = {
229         //On ESP32-S2 and earlier chips, DMA registers are part of SPI registers. Pass the registers of SPI peripheral to control it.
230         .dma_in = SPI_LL_GET_HW(host_id),
231         .dma_out = SPI_LL_GET_HW(host_id),
232         .dma_enabled = bus_attr->dma_enabled,
233         .dmadesc_tx = bus_attr->dmadesc_tx,
234         .dmadesc_rx = bus_attr->dmadesc_rx,
235         .tx_dma_chan = bus_attr->tx_dma_chan,
236         .rx_dma_chan = bus_attr->rx_dma_chan,
237         .dmadesc_n = bus_attr->dma_desc_num,
238     };
239     spi_hal_init(&host->hal, host_id, &hal_config);
240 
241     if (host_id != SPI1_HOST) {
242         //SPI1 attributes are already initialized at start up.
243         spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
244         spi_bus_lock_set_bg_control(lock, spi_bus_intr_enable, spi_bus_intr_disable, host);
245         spi_bus_register_destroy_func(host_id, spi_master_deinit_driver, host);
246     }
247 
248     bus_driver_ctx[host_id] = host;
249     return ESP_OK;
250 
251 cleanup:
252     if (host) {
253         spi_hal_deinit(&host->hal);
254         if (host->intr) {
255             esp_intr_free(host->intr);
256         }
257     }
258     free(host);
259     return err;
260 }
261 
spi_master_deinit_driver(void * arg)262 static esp_err_t spi_master_deinit_driver(void* arg)
263 {
264     spi_host_t *host = (spi_host_t*)arg;
265     SPI_CHECK(host != NULL, "host_id not in use", ESP_ERR_INVALID_STATE);
266 
267     int host_id = host->id;
268     SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
269 
270     int x;
271     for (x=0; x<DEV_NUM_MAX; x++) {
272         SPI_CHECK(host->device[x] == NULL, "not all CSses freed", ESP_ERR_INVALID_STATE);
273     }
274 
275     spi_hal_deinit(&host->hal);
276 
277     if (host->intr) {
278         esp_intr_free(host->intr);
279     }
280     free(host);
281     bus_driver_ctx[host_id] = NULL;
282     return ESP_OK;
283 }
284 
spi_get_timing(bool gpio_is_used,int input_delay_ns,int eff_clk,int * dummy_o,int * cycles_remain_o)285 void spi_get_timing(bool gpio_is_used, int input_delay_ns, int eff_clk, int* dummy_o, int* cycles_remain_o)
286 {
287     int timing_dummy;
288     int timing_miso_delay;
289 
290     spi_hal_cal_timing(eff_clk, gpio_is_used, input_delay_ns, &timing_dummy, &timing_miso_delay);
291     if (dummy_o) *dummy_o = timing_dummy;
292     if (cycles_remain_o) *cycles_remain_o = timing_miso_delay;
293 }
294 
spi_get_freq_limit(bool gpio_is_used,int input_delay_ns)295 int spi_get_freq_limit(bool gpio_is_used, int input_delay_ns)
296 {
297     return spi_hal_get_freq_limit(gpio_is_used, input_delay_ns);
298 }
299 
300 /*
301  Add a device. This allocates a CS line for the device, allocates memory for the device structure and hooks
302  up the CS pin to whatever is specified.
303 */
spi_bus_add_device(spi_host_device_t host_id,const spi_device_interface_config_t * dev_config,spi_device_handle_t * handle)304 esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interface_config_t *dev_config, spi_device_handle_t *handle)
305 {
306     spi_device_t *dev = NULL;
307     esp_err_t err = ESP_OK;
308 
309     SPI_CHECK(is_valid_host(host_id), "invalid host", ESP_ERR_INVALID_ARG);
310     if (bus_driver_ctx[host_id] == NULL) {
311         //lazy initialization the driver, get deinitialized by the bus is freed
312         err = spi_master_init_driver(host_id);
313         if (err != ESP_OK) {
314             return err;
315         }
316     }
317 
318     spi_host_t *host = bus_driver_ctx[host_id];
319     const spi_bus_attr_t* bus_attr = host->bus_attr;
320     SPI_CHECK(dev_config->spics_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(dev_config->spics_io_num), "spics pin invalid", ESP_ERR_INVALID_ARG);
321     SPI_CHECK(dev_config->clock_speed_hz > 0, "invalid sclk speed", ESP_ERR_INVALID_ARG);
322 #ifdef CONFIG_IDF_TARGET_ESP32
323     //The hardware looks like it would support this, but actually setting cs_ena_pretrans when transferring in full
324     //duplex mode does absolutely nothing on the ESP32.
325     SPI_CHECK(dev_config->cs_ena_pretrans <= 1 || (dev_config->address_bits == 0 && dev_config->command_bits == 0) ||
326         (dev_config->flags & SPI_DEVICE_HALFDUPLEX), "In full-duplex mode, only support cs pretrans delay = 1 and without address_bits and command_bits", ESP_ERR_INVALID_ARG);
327 #endif
328     uint32_t lock_flag = ((dev_config->spics_io_num != -1)? SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED: 0);
329 
330     spi_bus_lock_dev_config_t lock_config = {
331         .flags = lock_flag,
332     };
333     spi_bus_lock_dev_handle_t dev_handle;
334     err = spi_bus_lock_register_dev(bus_attr->lock, &lock_config, &dev_handle);
335     if (err != ESP_OK) {
336         goto nomem;
337     }
338 
339     int freecs = spi_bus_lock_get_dev_id(dev_handle);
340     SPI_CHECK(freecs != -1, "no free cs pins for the host", ESP_ERR_NOT_FOUND);
341 
342     //input parameters to calculate timing configuration
343     int half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
344     int no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
345     int duty_cycle = (dev_config->duty_cycle_pos==0) ? 128 : dev_config->duty_cycle_pos;
346     int use_gpio = !(bus_attr->flags & SPICOMMON_BUSFLAG_IOMUX_PINS);
347     spi_hal_timing_param_t timing_param = {
348         .half_duplex = half_duplex,
349         .no_compensate = no_compensate,
350         .clock_speed_hz = dev_config->clock_speed_hz,
351         .duty_cycle = duty_cycle,
352         .input_delay_ns = dev_config->input_delay_ns,
353         .use_gpio = use_gpio
354     };
355 
356     //output values of timing configuration
357     spi_hal_timing_conf_t temp_timing_conf;
358     int freq;
359     esp_err_t ret = spi_hal_cal_clock_conf(&timing_param, &freq, &temp_timing_conf);
360     SPI_CHECK(ret==ESP_OK, "assigned clock speed not supported", ret);
361 
362     //Allocate memory for device
363     dev = malloc(sizeof(spi_device_t));
364     if (dev == NULL) goto nomem;
365     memset(dev, 0, sizeof(spi_device_t));
366 
367     dev->id = freecs;
368     dev->dev_lock = dev_handle;
369 
370     //Allocate queues, set defaults
371     dev->trans_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
372     dev->ret_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
373     if (!dev->trans_queue || !dev->ret_queue) {
374         goto nomem;
375     }
376 
377     //We want to save a copy of the dev config in the dev struct.
378     memcpy(&dev->cfg, dev_config, sizeof(spi_device_interface_config_t));
379     dev->cfg.duty_cycle_pos = duty_cycle;
380     // TODO: if we have to change the apb clock among transactions, re-calculate this each time the apb clock lock is locked.
381 
382     //Set CS pin, CS options
383     if (dev_config->spics_io_num >= 0) {
384         spicommon_cs_initialize(host_id, dev_config->spics_io_num, freecs, use_gpio);
385     }
386 
387     //save a pointer to device in spi_host_t
388     host->device[freecs] = dev;
389     //save a pointer to host in spi_device_t
390     dev->host= host;
391 
392     //initialise the device specific configuration
393     spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
394     hal_dev->mode = dev_config->mode;
395     hal_dev->cs_setup = dev_config->cs_ena_pretrans;
396     hal_dev->cs_hold = dev_config->cs_ena_posttrans;
397     //set hold_time to 0 will not actually append delay to CS
398     //set it to 1 since we do need at least one clock of hold time in most cases
399     if (hal_dev->cs_hold == 0) {
400         hal_dev->cs_hold = 1;
401     }
402     hal_dev->cs_pin_id = dev->id;
403     hal_dev->timing_conf = temp_timing_conf;
404     hal_dev->sio = (dev_config->flags) & SPI_DEVICE_3WIRE ? 1 : 0;
405     hal_dev->half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
406     hal_dev->tx_lsbfirst = dev_config->flags & SPI_DEVICE_TXBIT_LSBFIRST ? 1 : 0;
407     hal_dev->rx_lsbfirst = dev_config->flags & SPI_DEVICE_RXBIT_LSBFIRST ? 1 : 0;
408     hal_dev->no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
409 #if SOC_SPI_SUPPORT_AS_CS
410     hal_dev->as_cs = dev_config->flags& SPI_DEVICE_CLK_AS_CS ? 1 : 0;
411 #endif
412     hal_dev->positive_cs = dev_config->flags & SPI_DEVICE_POSITIVE_CS ? 1 : 0;
413 
414     *handle = dev;
415     ESP_LOGD(SPI_TAG, "SPI%d: New device added to CS%d, effective clock: %dkHz", host_id+1, freecs, freq/1000);
416 
417     return ESP_OK;
418 
419 nomem:
420     if (dev) {
421         if (dev->trans_queue) vQueueDelete(dev->trans_queue);
422         if (dev->ret_queue) vQueueDelete(dev->ret_queue);
423         spi_bus_lock_unregister_dev(dev->dev_lock);
424     }
425     free(dev);
426     return ESP_ERR_NO_MEM;
427 }
428 
spi_bus_remove_device(spi_device_handle_t handle)429 esp_err_t spi_bus_remove_device(spi_device_handle_t handle)
430 {
431     SPI_CHECK(handle!=NULL, "invalid handle", ESP_ERR_INVALID_ARG);
432     //These checks aren't exhaustive; another thread could sneak in a transaction inbetween. These are only here to
433     //catch design errors and aren't meant to be triggered during normal operation.
434     SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
435     SPI_CHECK(handle->host->cur_cs == DEV_NUM_MAX || handle->host->device[handle->host->cur_cs] != handle, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
436     SPI_CHECK(uxQueueMessagesWaiting(handle->ret_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
437 
438     //return
439     int spics_io_num = handle->cfg.spics_io_num;
440     if (spics_io_num >= 0) spicommon_cs_free_io(spics_io_num);
441 
442     //Kill queues
443     vQueueDelete(handle->trans_queue);
444     vQueueDelete(handle->ret_queue);
445     spi_bus_lock_unregister_dev(handle->dev_lock);
446 
447     assert(handle->host->device[handle->id] == handle);
448     handle->host->device[handle->id] = NULL;
449     free(handle);
450     return ESP_OK;
451 }
452 
spi_cal_clock(int fapb,int hz,int duty_cycle,uint32_t * reg_o)453 int spi_cal_clock(int fapb, int hz, int duty_cycle, uint32_t *reg_o)
454 {
455     return spi_ll_master_cal_clock(fapb, hz, duty_cycle, reg_o);
456 }
457 
spi_get_actual_clock(int fapb,int hz,int duty_cycle)458 int spi_get_actual_clock(int fapb, int hz, int duty_cycle)
459 {
460     return spi_hal_master_cal_clock(fapb, hz, duty_cycle);
461 }
462 
463 // Setup the device-specified configuration registers. Called every time a new
464 // transaction is to be sent, but only apply new configurations when the device
465 // changes.
spi_setup_device(spi_device_t * dev)466 static SPI_MASTER_ISR_ATTR void spi_setup_device(spi_device_t *dev)
467 {
468     spi_bus_lock_dev_handle_t dev_lock = dev->dev_lock;
469     spi_hal_context_t *hal = &dev->host->hal;
470     spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
471 
472     if (spi_bus_lock_touch(dev_lock)) {
473         /* Configuration has not been applied yet. */
474         spi_hal_setup_device(hal, hal_dev);
475     }
476 }
477 
get_acquiring_dev(spi_host_t * host)478 static SPI_MASTER_ISR_ATTR spi_device_t *get_acquiring_dev(spi_host_t *host)
479 {
480     spi_bus_lock_dev_handle_t dev_lock = spi_bus_lock_get_acquiring_dev(host->bus_attr->lock);
481     if (!dev_lock) return NULL;
482 
483     return host->device[spi_bus_lock_get_dev_id(dev_lock)];
484 }
485 
486 // Debug only
487 // NOTE if the acquiring is not fully completed, `spi_bus_lock_get_acquiring_dev`
488 // may return a false `NULL` cause the function returning false `false`.
spi_bus_device_is_polling(spi_device_t * dev)489 static inline SPI_MASTER_ISR_ATTR bool spi_bus_device_is_polling(spi_device_t *dev)
490 {
491     return get_acquiring_dev(dev->host) == dev && dev->host->polling;
492 }
493 
494 /*-----------------------------------------------------------------------------
495     Working Functions
496 -----------------------------------------------------------------------------*/
497 
498 // The interrupt may get invoked by the bus lock.
spi_bus_intr_enable(void * host)499 static void SPI_MASTER_ISR_ATTR spi_bus_intr_enable(void *host)
500 {
501     esp_intr_enable(((spi_host_t*)host)->intr);
502 }
503 
504 // The interrupt is always disabled by the ISR itself, not exposed
spi_bus_intr_disable(void * host)505 static void SPI_MASTER_ISR_ATTR spi_bus_intr_disable(void *host)
506 {
507     esp_intr_disable(((spi_host_t*)host)->intr);
508 }
509 
510 // The function is called to send a new transaction, in ISR or in the task.
511 // Setup the transaction-specified registers and linked-list used by the DMA (or FIFO if DMA is not used)
spi_new_trans(spi_device_t * dev,spi_trans_priv_t * trans_buf)512 static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_t *trans_buf)
513 {
514     spi_transaction_t *trans = trans_buf->trans;
515     spi_host_t *host = dev->host;
516     spi_hal_context_t *hal = &(host->hal);
517     spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
518 
519     host->cur_cs = dev->id;
520 
521     //Reconfigure according to device settings, the function only has effect when the dev_id is changed.
522     spi_setup_device(dev);
523 
524     //set the transaction specific configuration each time before a transaction setup
525     spi_hal_trans_config_t hal_trans = {};
526     hal_trans.tx_bitlen = trans->length;
527     hal_trans.rx_bitlen = trans->rxlength;
528     hal_trans.rcv_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_rcv;
529     hal_trans.send_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_send;
530     hal_trans.cmd = trans->cmd;
531     hal_trans.addr = trans->addr;
532     hal_trans.cs_keep_active = (trans->flags & SPI_TRANS_CS_KEEP_ACTIVE) ? 1 : 0;
533 
534     //Set up OIO/QIO/DIO if needed
535     hal_trans.line_mode.data_lines = (trans->flags & SPI_TRANS_MODE_DIO) ? 2 :
536         (trans->flags & SPI_TRANS_MODE_QIO) ? 4 : 1;
537 #if SOC_SPI_SUPPORT_OCT
538     if (trans->flags & SPI_TRANS_MODE_OCT) {
539         hal_trans.line_mode.data_lines = 8;
540     }
541 #endif
542     hal_trans.line_mode.addr_lines = (trans->flags & SPI_TRANS_MULTILINE_ADDR) ? hal_trans.line_mode.data_lines : 1;
543     hal_trans.line_mode.cmd_lines = (trans->flags & SPI_TRANS_MULTILINE_CMD) ? hal_trans.line_mode.data_lines : 1;
544 
545     if (trans->flags & SPI_TRANS_VARIABLE_CMD) {
546         hal_trans.cmd_bits = ((spi_transaction_ext_t *)trans)->command_bits;
547     } else {
548         hal_trans.cmd_bits = dev->cfg.command_bits;
549     }
550     if (trans->flags & SPI_TRANS_VARIABLE_ADDR) {
551         hal_trans.addr_bits = ((spi_transaction_ext_t *)trans)->address_bits;
552     } else {
553         hal_trans.addr_bits = dev->cfg.address_bits;
554     }
555     if (trans->flags & SPI_TRANS_VARIABLE_DUMMY) {
556         hal_trans.dummy_bits = ((spi_transaction_ext_t *)trans)->dummy_bits;
557     } else {
558         hal_trans.dummy_bits = dev->cfg.dummy_bits;
559     }
560 
561     spi_hal_setup_trans(hal, hal_dev, &hal_trans);
562     spi_hal_prepare_data(hal, hal_dev, &hal_trans);
563 
564     //Call pre-transmission callback, if any
565     if (dev->cfg.pre_cb) dev->cfg.pre_cb(trans);
566     //Kick off transfer
567     spi_hal_user_start(hal);
568 }
569 
570 // The function is called when a transaction is done, in ISR or in the task.
571 // Fetch the data from FIFO and call the ``post_cb``.
spi_post_trans(spi_host_t * host)572 static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host)
573 {
574     spi_transaction_t *cur_trans = host->cur_trans_buf.trans;
575 
576     spi_hal_fetch_result(&host->hal);
577     //Call post-transaction callback, if any
578     spi_device_t* dev = host->device[host->cur_cs];
579     if (dev->cfg.post_cb) dev->cfg.post_cb(cur_trans);
580 
581     host->cur_cs = DEV_NUM_MAX;
582 }
583 
584 // This is run in interrupt context.
spi_intr(void * arg)585 static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
586 {
587     BaseType_t do_yield = pdFALSE;
588     spi_host_t *host = (spi_host_t *)arg;
589     const spi_bus_attr_t* bus_attr = host->bus_attr;
590 
591     assert(spi_hal_usr_is_done(&host->hal));
592 
593     /*
594      * Help to skip the handling of in-flight transaction, and disable of the interrupt.
595      * The esp_intr_enable will be called (b) after new BG request is queued (a) in the task;
596      * while esp_intr_disable should be called (c) if we check and found the sending queue is empty (d).
597      * If (c) is called after (d), then there is a risk that things happens in this sequence:
598      * (d) -> (a) -> (b) -> (c), and in this case the interrupt is disabled while there's pending BG request in the queue.
599      * To avoid this, interrupt is disabled here, and re-enabled later if required.
600      */
601     if (!spi_bus_lock_bg_entry(bus_attr->lock)) {
602         /*------------ deal with the in-flight transaction -----------------*/
603         assert(host->cur_cs != DEV_NUM_MAX);
604         //Okay, transaction is done.
605         const int cs = host->cur_cs;
606         //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
607         if (bus_attr->dma_enabled) {
608             //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
609             spicommon_dmaworkaround_idle(bus_attr->tx_dma_chan);
610         }
611 
612         //cur_cs is changed to DEV_NUM_MAX here
613         spi_post_trans(host);
614         // spi_bus_lock_bg_pause(bus_attr->lock);
615         //Return transaction descriptor.
616         xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_trans_buf, &do_yield);
617 #ifdef CONFIG_PM_ENABLE
618         //Release APB frequency lock
619         esp_pm_lock_release(bus_attr->pm_lock);
620 #endif
621     }
622 
623     /*------------ new transaction starts here ------------------*/
624     assert(host->cur_cs == DEV_NUM_MAX);
625 
626     spi_bus_lock_handle_t lock = host->bus_attr->lock;
627     BaseType_t trans_found = pdFALSE;
628 
629 
630     // There should be remaining requests
631     BUS_LOCK_DEBUG_EXECUTE_CHECK(spi_bus_lock_bg_req_exist(lock));
632 
633     do {
634         spi_bus_lock_dev_handle_t acq_dev_lock = spi_bus_lock_get_acquiring_dev(lock);
635         spi_bus_lock_dev_handle_t desired_dev = acq_dev_lock;
636         bool resume_task = false;
637         spi_device_t* device_to_send = NULL;
638 
639         if (!acq_dev_lock) {
640             // This function may assign a new acquiring device, otherwise it will suggest a desired device with BG active
641             // We use either of them without further searching in the devices.
642             // If the return value is true, it means either there's no acquiring device, or the acquiring device's BG is active,
643             // We stay in the ISR to deal with those transactions of desired device, otherwise nothing will be done, check whether we need to resume some other tasks, or just quit the ISR
644             resume_task = spi_bus_lock_bg_check_dev_acq(lock, &desired_dev);
645         }
646 
647         if (!resume_task) {
648             bool dev_has_req = spi_bus_lock_bg_check_dev_req(desired_dev);
649             if (dev_has_req) {
650                 device_to_send = host->device[spi_bus_lock_get_dev_id(desired_dev)];
651                 trans_found = xQueueReceiveFromISR(device_to_send->trans_queue, &host->cur_trans_buf, &do_yield);
652                 if (!trans_found) {
653                     spi_bus_lock_bg_clear_req(desired_dev);
654                 }
655             }
656         }
657 
658         if (trans_found) {
659             spi_trans_priv_t *const cur_trans_buf = &host->cur_trans_buf;
660             if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) {
661                 //mark channel as active, so that the DMA will not be reset by the slave
662                 //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
663                 spicommon_dmaworkaround_transfer_active(bus_attr->tx_dma_chan);
664             }
665             spi_new_trans(device_to_send, cur_trans_buf);
666         }
667         // Exit of the ISR, handle interrupt re-enable (if sending transaction), retry (if there's coming BG),
668         // or resume acquiring device task (if quit due to bus acquiring).
669     } while (!spi_bus_lock_bg_exit(lock, trans_found, &do_yield));
670 
671     if (do_yield) portYIELD_FROM_ISR();
672 }
673 
check_trans_valid(spi_device_handle_t handle,spi_transaction_t * trans_desc)674 static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handle, spi_transaction_t *trans_desc)
675 {
676     SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
677     spi_host_t *host = handle->host;
678     const spi_bus_attr_t* bus_attr = host->bus_attr;
679     bool tx_enabled = (trans_desc->flags & SPI_TRANS_USE_TXDATA) || (trans_desc->tx_buffer);
680     bool rx_enabled = (trans_desc->flags & SPI_TRANS_USE_RXDATA) || (trans_desc->rx_buffer);
681     spi_transaction_ext_t *t_ext = (spi_transaction_ext_t *)trans_desc;
682     bool dummy_enabled = (((trans_desc->flags & SPI_TRANS_VARIABLE_DUMMY)? t_ext->dummy_bits: handle->cfg.dummy_bits) != 0);
683     bool extra_dummy_enabled = handle->hal_dev.timing_conf.timing_dummy;
684     bool is_half_duplex = ((handle->cfg.flags & SPI_DEVICE_HALFDUPLEX) != 0);
685 
686     //check transmission length
687     SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA)==0 || trans_desc->rxlength <= 32, "SPI_TRANS_USE_RXDATA only available for rxdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
688     SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA)==0 || trans_desc->length <= 32, "SPI_TRANS_USE_TXDATA only available for txdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
689     SPI_CHECK(trans_desc->length <= bus_attr->max_transfer_sz*8, "txdata transfer > host maximum", ESP_ERR_INVALID_ARG);
690     SPI_CHECK(trans_desc->rxlength <= bus_attr->max_transfer_sz*8, "rxdata transfer > host maximum", ESP_ERR_INVALID_ARG);
691     SPI_CHECK(is_half_duplex || trans_desc->rxlength <= trans_desc->length, "rx length > tx length in full duplex mode", ESP_ERR_INVALID_ARG);
692     //check working mode
693 #if SOC_SPI_SUPPORT_OCT
694     SPI_CHECK(!(host->id == SPI3_HOST && trans_desc->flags & SPI_TRANS_MODE_OCT), "SPI3 does not support octal mode", ESP_ERR_INVALID_ARG);
695     SPI_CHECK(!((trans_desc->flags & SPI_TRANS_MODE_OCT) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "Incompatible when setting to both Octal mode and 3-wire-mode", ESP_ERR_INVALID_ARG);
696     SPI_CHECK(!((trans_desc->flags & SPI_TRANS_MODE_OCT) && !is_half_duplex), "Incompatible when setting to both Octal mode and half duplex mode", ESP_ERR_INVALID_ARG);
697 #endif
698     SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "Incompatible when setting to both multi-line mode and 3-wire-mode", ESP_ERR_INVALID_ARG);
699     SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && !is_half_duplex), "Incompatible when setting to both multi-line mode and half duplex mode", ESP_ERR_INVALID_ARG);
700 #ifdef CONFIG_IDF_TARGET_ESP32
701     SPI_CHECK(!is_half_duplex || !bus_attr->dma_enabled || !rx_enabled || !tx_enabled, "SPI half duplex mode does not support using DMA with both MOSI and MISO phases.", ESP_ERR_INVALID_ARG );
702 #elif CONFIG_IDF_TARGET_ESP32S3
703     SPI_CHECK(!is_half_duplex || !tx_enabled || !rx_enabled, "SPI half duplex mode is not supported when both MOSI and MISO phases are enabled.", ESP_ERR_INVALID_ARG);
704 #endif
705     //MOSI phase is skipped only when both tx_buffer and SPI_TRANS_USE_TXDATA are not set.
706     SPI_CHECK(trans_desc->length != 0 || !tx_enabled, "trans tx_buffer should be NULL and SPI_TRANS_USE_TXDATA should be cleared to skip MOSI phase.", ESP_ERR_INVALID_ARG);
707     //MISO phase is skipped only when both rx_buffer and SPI_TRANS_USE_RXDATA are not set.
708     //If set rxlength=0 in full_duplex mode, it will be automatically set to length
709     SPI_CHECK(!is_half_duplex || trans_desc->rxlength != 0 || !rx_enabled, "trans rx_buffer should be NULL and SPI_TRANS_USE_RXDATA should be cleared to skip MISO phase.", ESP_ERR_INVALID_ARG);
710     //In Full duplex mode, default rxlength to be the same as length, if not filled in.
711     // set rxlength to length is ok, even when rx buffer=NULL
712     if (trans_desc->rxlength==0 && !is_half_duplex) {
713         trans_desc->rxlength=trans_desc->length;
714     }
715     //Dummy phase is not available when both data out and in are enabled, regardless of FD or HD mode.
716     SPI_CHECK(!tx_enabled || !rx_enabled || !dummy_enabled || !extra_dummy_enabled, "Dummy phase is not available when both data out and in are enabled", ESP_ERR_INVALID_ARG);
717 
718     return ESP_OK;
719 }
720 
uninstall_priv_desc(spi_trans_priv_t * trans_buf)721 static SPI_MASTER_ISR_ATTR void uninstall_priv_desc(spi_trans_priv_t* trans_buf)
722 {
723     spi_transaction_t *trans_desc = trans_buf->trans;
724     if ((void *)trans_buf->buffer_to_send != &trans_desc->tx_data[0] &&
725         trans_buf->buffer_to_send != trans_desc->tx_buffer) {
726         free((void *)trans_buf->buffer_to_send); //force free, ignore const
727     }
728     // copy data from temporary DMA-capable buffer back to IRAM buffer and free the temporary one.
729     if ((void *)trans_buf->buffer_to_rcv != &trans_desc->rx_data[0] &&
730         trans_buf->buffer_to_rcv != trans_desc->rx_buffer) { // NOLINT(clang-analyzer-unix.Malloc)
731         if (trans_desc->flags & SPI_TRANS_USE_RXDATA) {
732             memcpy((uint8_t *) & trans_desc->rx_data[0], trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
733         } else {
734             memcpy(trans_desc->rx_buffer, trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
735         }
736         free(trans_buf->buffer_to_rcv);
737     }
738 }
739 
setup_priv_desc(spi_transaction_t * trans_desc,spi_trans_priv_t * new_desc,bool isdma)740 static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_transaction_t *trans_desc, spi_trans_priv_t* new_desc, bool isdma)
741 {
742     *new_desc = (spi_trans_priv_t) { .trans = trans_desc, };
743 
744     // rx memory assign
745     uint32_t* rcv_ptr;
746     if ( trans_desc->flags & SPI_TRANS_USE_RXDATA ) {
747         rcv_ptr = (uint32_t *)&trans_desc->rx_data[0];
748     } else {
749         //if not use RXDATA neither rx_buffer, buffer_to_rcv assigned to NULL
750         rcv_ptr = trans_desc->rx_buffer;
751     }
752     if (rcv_ptr && isdma && (!esp_ptr_dma_capable(rcv_ptr) || ((int)rcv_ptr % 4 != 0))) {
753         //if rxbuf in the desc not DMA-capable, malloc a new one. The rx buffer need to be length of multiples of 32 bits to avoid heap corruption.
754         ESP_LOGD(SPI_TAG, "Allocate RX buffer for DMA" );
755         rcv_ptr = heap_caps_malloc((trans_desc->rxlength + 31) / 8, MALLOC_CAP_DMA);
756         if (rcv_ptr == NULL) goto clean_up;
757     }
758     new_desc->buffer_to_rcv = rcv_ptr;
759 
760     // tx memory assign
761     const uint32_t *send_ptr;
762     if ( trans_desc->flags & SPI_TRANS_USE_TXDATA ) {
763         send_ptr = (uint32_t *)&trans_desc->tx_data[0];
764     } else {
765         //if not use TXDATA neither tx_buffer, tx data assigned to NULL
766         send_ptr = trans_desc->tx_buffer ;
767     }
768     if (send_ptr && isdma && !esp_ptr_dma_capable( send_ptr )) {
769         //if txbuf in the desc not DMA-capable, malloc a new one
770         ESP_LOGD(SPI_TAG, "Allocate TX buffer for DMA" );
771         uint32_t *temp = heap_caps_malloc((trans_desc->length + 7) / 8, MALLOC_CAP_DMA);
772         if (temp == NULL) goto clean_up;
773 
774         memcpy( temp, send_ptr, (trans_desc->length + 7) / 8 );
775         send_ptr = temp;
776     }
777     new_desc->buffer_to_send = send_ptr;
778 
779     return ESP_OK;
780 
781 clean_up:
782     uninstall_priv_desc(new_desc);
783     return ESP_ERR_NO_MEM;
784 }
785 
spi_device_queue_trans(spi_device_handle_t handle,spi_transaction_t * trans_desc,TickType_t ticks_to_wait)786 esp_err_t SPI_MASTER_ATTR spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
787 {
788     esp_err_t ret = check_trans_valid(handle, trans_desc);
789     if (ret != ESP_OK) return ret;
790 
791     spi_host_t *host = handle->host;
792 
793     SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
794 
795     /* Even when using interrupt transfer, the CS can only be kept activated if the bus has been
796      * acquired with `spi_device_acquire_bus()` first. */
797     if (host->device_acquiring_lock != handle && (trans_desc->flags & SPI_TRANS_CS_KEEP_ACTIVE)) {
798         return ESP_ERR_INVALID_ARG;
799     }
800 
801     spi_trans_priv_t trans_buf;
802     ret = setup_priv_desc(trans_desc, &trans_buf, (host->bus_attr->dma_enabled));
803     if (ret != ESP_OK) return ret;
804 
805 #ifdef CONFIG_PM_ENABLE
806     esp_pm_lock_acquire(host->bus_attr->pm_lock);
807 #endif
808     //Send to queue and invoke the ISR.
809 
810     BaseType_t r = xQueueSend(handle->trans_queue, (void *)&trans_buf, ticks_to_wait);
811     if (!r) {
812         ret = ESP_ERR_TIMEOUT;
813 #ifdef CONFIG_PM_ENABLE
814         //Release APB frequency lock
815         esp_pm_lock_release(host->bus_attr->pm_lock);
816 #endif
817         goto clean_up;
818     }
819 
820     // The ISR will be invoked at correct time by the lock with `spi_bus_intr_enable`.
821     ret = spi_bus_lock_bg_request(handle->dev_lock);
822     if (ret != ESP_OK) {
823         goto clean_up;
824     }
825     return ESP_OK;
826 
827 clean_up:
828     uninstall_priv_desc(&trans_buf);
829     return ret;
830 }
831 
spi_device_get_trans_result(spi_device_handle_t handle,spi_transaction_t ** trans_desc,TickType_t ticks_to_wait)832 esp_err_t SPI_MASTER_ATTR spi_device_get_trans_result(spi_device_handle_t handle, spi_transaction_t **trans_desc, TickType_t ticks_to_wait)
833 {
834     BaseType_t r;
835     spi_trans_priv_t trans_buf;
836     SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
837 
838     //use the interrupt, block until return
839     r=xQueueReceive(handle->ret_queue, (void*)&trans_buf, ticks_to_wait);
840     if (!r) {
841         // The memory occupied by rx and tx DMA buffer destroyed only when receiving from the queue (transaction finished).
842         // If timeout, wait and retry.
843         // Every in-flight transaction request occupies internal memory as DMA buffer if needed.
844         return ESP_ERR_TIMEOUT;
845     }
846     //release temporary buffers
847     uninstall_priv_desc(&trans_buf);
848     (*trans_desc) = trans_buf.trans;
849 
850     return ESP_OK;
851 }
852 
853 //Porcelain to do one blocking transmission.
spi_device_transmit(spi_device_handle_t handle,spi_transaction_t * trans_desc)854 esp_err_t SPI_MASTER_ATTR spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc)
855 {
856     esp_err_t ret;
857     spi_transaction_t *ret_trans;
858     //ToDo: check if any spi transfers in flight
859     ret = spi_device_queue_trans(handle, trans_desc, portMAX_DELAY);
860     if (ret != ESP_OK) return ret;
861 
862     ret = spi_device_get_trans_result(handle, &ret_trans, portMAX_DELAY);
863     if (ret != ESP_OK) return ret;
864 
865     assert(ret_trans == trans_desc);
866     return ESP_OK;
867 }
868 
spi_device_acquire_bus(spi_device_t * device,TickType_t wait)869 esp_err_t SPI_MASTER_ISR_ATTR spi_device_acquire_bus(spi_device_t *device, TickType_t wait)
870 {
871     spi_host_t *const host = device->host;
872     SPI_CHECK(wait==portMAX_DELAY, "acquire finite time not supported now.", ESP_ERR_INVALID_ARG);
873     SPI_CHECK(!spi_bus_device_is_polling(device), "Cannot acquire bus when a polling transaction is in progress.", ESP_ERR_INVALID_STATE );
874 
875     esp_err_t ret = spi_bus_lock_acquire_start(device->dev_lock, wait);
876     if (ret != ESP_OK) {
877         return ret;
878     }
879     host->device_acquiring_lock = device;
880 
881     ESP_LOGD(SPI_TAG, "device%d locked the bus", device->id);
882 
883 #ifdef CONFIG_PM_ENABLE
884     // though we don't suggest to block the task before ``release_bus``, still allow doing so.
885     // this keeps the spi clock at 80MHz even if all tasks are blocked
886     esp_pm_lock_acquire(host->bus_attr->pm_lock);
887 #endif
888     //configure the device ahead so that we don't need to do it again in the following transactions
889     spi_setup_device(host->device[device->id]);
890     //the DMA is also occupied by the device, all the slave devices that using DMA should wait until bus released.
891     if (host->bus_attr->dma_enabled) {
892         //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
893         spicommon_dmaworkaround_transfer_active(host->bus_attr->tx_dma_chan);
894     }
895     return ESP_OK;
896 }
897 
898 // This function restore configurations required in the non-polling mode
spi_device_release_bus(spi_device_t * dev)899 void SPI_MASTER_ISR_ATTR spi_device_release_bus(spi_device_t *dev)
900 {
901     spi_host_t *host = dev->host;
902 
903     if (spi_bus_device_is_polling(dev)){
904         ESP_EARLY_LOGE(SPI_TAG, "Cannot release bus when a polling transaction is in progress.");
905         assert(0);
906     }
907 
908     if (host->bus_attr->dma_enabled) {
909         //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
910         spicommon_dmaworkaround_idle(host->bus_attr->tx_dma_chan);
911     }
912     //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
913 
914     //allow clock to be lower than 80MHz when all tasks blocked
915 #ifdef CONFIG_PM_ENABLE
916     //Release APB frequency lock
917     esp_pm_lock_release(host->bus_attr->pm_lock);
918 #endif
919     ESP_LOGD(SPI_TAG, "device%d release bus", dev->id);
920 
921     host->device_acquiring_lock = NULL;
922     esp_err_t ret = spi_bus_lock_acquire_end(dev->dev_lock);
923     assert(ret == ESP_OK);
924     (void) ret;
925 }
926 
spi_device_polling_start(spi_device_handle_t handle,spi_transaction_t * trans_desc,TickType_t ticks_to_wait)927 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_start(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
928 {
929     esp_err_t ret;
930     SPI_CHECK(ticks_to_wait == portMAX_DELAY, "currently timeout is not available for polling transactions", ESP_ERR_INVALID_ARG);
931     ret = check_trans_valid(handle, trans_desc);
932     if (ret!=ESP_OK) return ret;
933     SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot send polling transaction while the previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
934 
935     /* If device_acquiring_lock is set to handle, it means that the user has already
936      * acquired the bus thanks to the function `spi_device_acquire_bus()`.
937      * In that case, we don't need to take the lock again. */
938     spi_host_t *host = handle->host;
939     if (host->device_acquiring_lock != handle) {
940         /* The user cannot ask for the CS to keep active has the bus is not locked/acquired. */
941         if ((trans_desc->flags & SPI_TRANS_CS_KEEP_ACTIVE) != 0) {
942             ret = ESP_ERR_INVALID_ARG;
943         } else {
944             ret = spi_bus_lock_acquire_start(handle->dev_lock, ticks_to_wait);
945         }
946     } else {
947         ret = spi_bus_lock_wait_bg_done(handle->dev_lock, ticks_to_wait);
948     }
949     if (ret != ESP_OK) return ret;
950 
951     ret = setup_priv_desc(trans_desc, &host->cur_trans_buf, (host->bus_attr->dma_enabled));
952     if (ret!=ESP_OK) return ret;
953 
954     //Polling, no interrupt is used.
955     host->polling = true;
956 
957     ESP_LOGV(SPI_TAG, "polling trans");
958     spi_new_trans(handle, &host->cur_trans_buf);
959 
960     return ESP_OK;
961 }
962 
spi_device_polling_end(spi_device_handle_t handle,TickType_t ticks_to_wait)963 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_end(spi_device_handle_t handle, TickType_t ticks_to_wait)
964 {
965     SPI_CHECK(handle != NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
966     spi_host_t *host = handle->host;
967 
968     assert(host->cur_cs == handle->id);
969     assert(handle == get_acquiring_dev(host));
970 
971     TickType_t start = xTaskGetTickCount();
972     while (!spi_hal_usr_is_done(&host->hal)) {
973         TickType_t end = xTaskGetTickCount();
974         if (end - start > ticks_to_wait) {
975             return ESP_ERR_TIMEOUT;
976         }
977     }
978 
979     ESP_LOGV(SPI_TAG, "polling trans done");
980     //deal with the in-flight transaction
981     spi_post_trans(host);
982     //release temporary buffers
983     uninstall_priv_desc(&host->cur_trans_buf);
984 
985     host->polling = false;
986     /* Once again here, if device_acquiring_lock is set to `handle`, it means that the user has already
987      * acquired the bus thanks to the function `spi_device_acquire_bus()`.
988      * In that case, the lock must not be released now because . */
989     if (host->device_acquiring_lock != handle) {
990         assert(host->device_acquiring_lock == NULL);
991         spi_bus_lock_acquire_end(handle->dev_lock);
992     }
993 
994     return ESP_OK;
995 }
996 
spi_device_polling_transmit(spi_device_handle_t handle,spi_transaction_t * trans_desc)997 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_transmit(spi_device_handle_t handle, spi_transaction_t* trans_desc)
998 {
999     esp_err_t ret;
1000     ret = spi_device_polling_start(handle, trans_desc, portMAX_DELAY);
1001     if (ret != ESP_OK) return ret;
1002 
1003     return spi_device_polling_end(handle, portMAX_DELAY);
1004 }
1005