1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8 Architecture:
9 
10 We can initialize a SPI driver, but we don't talk to the SPI driver itself, we address a device. A device essentially
11 is a combination of SPI port and CS pin, plus some information about the specifics of communication to the device
12 (timing, command/address length etc). The arbitration between tasks is also in conception of devices.
13 
14 A device can work in interrupt mode and polling mode, and a third but
15 complicated mode which combines the two modes above:
16 
17 1. Work in the ISR with a set of queues; one per device.
18 
19    The idea is that to send something to a SPI device, you allocate a
20    transaction descriptor. It contains some information about the transfer
21    like the length, address, command etc, plus pointers to transmit and
22    receive buffer. The address of this block gets pushed into the transmit
23    queue. The SPI driver does its magic, and sends and retrieves the data
24    eventually. The data gets written to the receive buffers, if needed the
25    transaction descriptor is modified to indicate returned parameters and
26    the entire thing goes into the return queue, where whatever software
27    initiated the transaction can retrieve it.
28 
29    The entire thing is run from the SPI interrupt handler. If SPI is done
30    transmitting/receiving but nothing is in the queue, it will not clear the
31    SPI interrupt but just disable it by esp_intr_disable. This way, when a
32    new thing is sent, pushing the packet into the send queue and re-enabling
33    the interrupt (by esp_intr_enable) will trigger the interrupt again, which
34    can then take care of the sending.
35 
36 2. Work in the polling mode in the task.
37 
38    In this mode we get rid of the ISR, FreeRTOS queue and task switching, the
39    task is no longer blocked during a transaction. This increase the cpu
40    load, but decrease the interval of SPI transactions. Each time only one
41    device (in one task) can send polling transactions, transactions to
42    other devices are blocked until the polling transaction of current device
43    is done.
44 
45    In the polling mode, the queue is not used, all the operations are done
46    in the task. The task calls ``spi_device_polling_start`` to setup and start
47    a new transaction, then call ``spi_device_polling_end`` to handle the
48    return value of the transaction.
49 
50    To handle the arbitration among devices, the device "temporarily" acquire
51    a bus by the ``device_acquire_bus_internal`` function, which writes
52    dev_request by CAS operation. Other devices which wants to send polling
53    transactions but don't own the bus will block and wait until given the
54    semaphore which indicates the ownership of bus.
55 
56    In case of the ISR is still sending transactions to other devices, the ISR
57    should maintain an ``random_idle`` flag indicating that it's not doing
58    transactions. When the bus is locked, the ISR can only send new
59    transactions to the acquiring device. The ISR will automatically disable
60    itself and send semaphore to the device if the ISR is free. If the device
61    sees the random_idle flag, it can directly start its polling transaction.
62    Otherwise it should block and wait for the semaphore from the ISR.
63 
64    After the polling transaction, the driver will release the bus. During the
65    release of the bus, the driver search all other devices to see whether
66    there is any device waiting to acquire the bus, if so, acquire for it and
67    send it a semaphore if the device queue is empty, or invoke the ISR for
68    it. If all other devices don't need to acquire the bus, but there are
69    still transactions in the queues, the ISR will also be invoked.
70 
71    To get better polling efficiency, user can call ``spi_device_acquire_bus``
72    function, which also calls the ``spi_bus_lock_acquire_core`` function,
73    before a series of polling transactions to a device. The bus acquiring and
74    task switching before and after the polling transaction will be escaped.
75 
76 3. Mixed mode
77 
78    The driver is written under the assumption that polling and interrupt
79    transactions are not happening simultaneously. When sending polling
80    transactions, it will check whether the ISR is active, which includes the
81    case the ISR is sending the interrupt transactions of the acquiring
82    device. If the ISR is still working, the routine sending a polling
83    transaction will get blocked and wait until the semaphore from the ISR
84    which indicates the ISR is free now.
85 
86    A fatal case is, a polling transaction is in flight, but the ISR received
87    an interrupt transaction. The behavior of the driver is unpredictable,
88    which should be strictly forbidden.
89 
90 We have two bits to control the interrupt:
91 
92 1. The slave->trans_done bit, which is automatically asserted when a transaction is done.
93 
94    This bit is cleared during an interrupt transaction, so that the interrupt
95    will be triggered when the transaction is done, or the SW can check the
96    bit to see if the transaction is done for polling transactions.
97 
98    When no transaction is in-flight, the bit is kept active, so that the SW
99    can easily invoke the ISR by enable the interrupt.
100 
101 2. The system interrupt enable/disable, controlled by esp_intr_enable and esp_intr_disable.
102 
103    The interrupt is disabled (by the ISR itself) when no interrupt transaction
104    is queued. When the bus is not occupied, any task, which queues a
105    transaction into the queue, will enable the interrupt to invoke the ISR.
106    When the bus is occupied by a device, other device will put off the
107    invoking of ISR to the moment when the bus is released. The device
108    acquiring the bus can still send interrupt transactions by enable the
109    interrupt.
110 
111 */
112 
113 #include <string.h>
114 #include <sys/param.h>
115 #include "esp_private/spi_common_internal.h"
116 #include "driver/spi_master.h"
117 #include "esp_clk_tree.h"
118 #include "clk_ctrl_os.h"
119 #include "esp_log.h"
120 #include "esp_check.h"
121 #include "esp_ipc.h"
122 #include "freertos/task.h"
123 #include "freertos/queue.h"
124 #include "soc/soc_memory_layout.h"
125 #include "driver/gpio.h"
126 #include "hal/spi_hal.h"
127 #include "hal/spi_ll.h"
128 #include "esp_heap_caps.h"
129 
130 typedef struct spi_device_t spi_device_t;
131 
132 /// struct to hold private transaction data (like tx and rx buffer for DMA).
133 typedef struct {
134     spi_transaction_t   *trans;
135     const uint32_t *buffer_to_send;   //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is;
136                                 //otherwise sets to the original buffer or NULL if no buffer is assigned.
137     uint32_t *buffer_to_rcv;    // similar to buffer_to_send
138 } spi_trans_priv_t;
139 
140 typedef struct {
141     int id;
142     spi_device_t* device[DEV_NUM_MAX];
143     intr_handle_t intr;
144     spi_hal_context_t hal;
145     spi_trans_priv_t cur_trans_buf;
146     int cur_cs;     //current device doing transaction
147     const spi_bus_attr_t* bus_attr;
148 
149     /**
150      * the bus is permanently controlled by a device until `spi_bus_release_bus`` is called. Otherwise
151      * the acquiring of SPI bus will be freed when `spi_device_polling_end` is called.
152      */
153     spi_device_t* device_acquiring_lock;
154 
155 //debug information
156     bool polling;   //in process of a polling, avoid of queue new transactions into ISR
157 } spi_host_t;
158 
159 struct spi_device_t {
160     int id;
161     int real_clk_freq_hz;
162     QueueHandle_t trans_queue;
163     QueueHandle_t ret_queue;
164     spi_device_interface_config_t cfg;
165     spi_hal_dev_config_t hal_dev;
166     spi_host_t *host;
167     spi_bus_lock_dev_handle_t dev_lock;
168 };
169 
170 static spi_host_t* bus_driver_ctx[SOC_SPI_PERIPH_NUM] = {};
171 
172 static const char *SPI_TAG = "spi_master";
173 #define SPI_CHECK(a, str, ret_val)  ESP_RETURN_ON_FALSE_ISR(a, ret_val, SPI_TAG, str)
174 
175 
176 static void spi_intr(void *arg);
177 static void spi_bus_intr_enable(void *host);
178 static void spi_bus_intr_disable(void *host);
179 
180 static esp_err_t spi_master_deinit_driver(void* arg);
181 
is_valid_host(spi_host_device_t host)182 static inline bool is_valid_host(spi_host_device_t host)
183 {
184 //SPI1 can be used as GPSPI only on ESP32
185 #if CONFIG_IDF_TARGET_ESP32
186     return host >= SPI1_HOST && host <= SPI3_HOST;
187 #elif (SOC_SPI_PERIPH_NUM == 2)
188     return host == SPI2_HOST;
189 #elif (SOC_SPI_PERIPH_NUM == 3)
190     return host >= SPI2_HOST && host <= SPI3_HOST;
191 #endif
192 }
193 
194 #if (SOC_CPU_CORES_NUM > 1) && (!CONFIG_FREERTOS_UNICORE)
195 typedef struct {
196     spi_host_t *spi_host;
197     esp_err_t *err;
198 } spi_ipc_param_t;
199 
ipc_isr_reg_to_core(void * args)200 static void ipc_isr_reg_to_core(void *args)
201 {
202     spi_host_t *host = ((spi_ipc_param_t *)args)->spi_host;
203     const spi_bus_attr_t* bus_attr = host->bus_attr;
204     *((spi_ipc_param_t *)args)->err = esp_intr_alloc(spicommon_irqsource_for_host(host->id), bus_attr->bus_cfg.intr_flags | ESP_INTR_FLAG_INTRDISABLED, spi_intr, host, &host->intr);
205 }
206 #endif
207 
208 // Should be called before any devices are actually registered or used.
209 // Currently automatically called after `spi_bus_initialize()` and when first device is registered.
spi_master_init_driver(spi_host_device_t host_id)210 static esp_err_t spi_master_init_driver(spi_host_device_t host_id)
211 {
212     esp_err_t err = ESP_OK;
213 
214     const spi_bus_attr_t* bus_attr = spi_bus_get_attr(host_id);
215     SPI_CHECK(bus_attr != NULL, "host_id not initialized", ESP_ERR_INVALID_STATE);
216     SPI_CHECK(bus_attr->lock != NULL, "SPI Master cannot attach to bus. (Check CONFIG_SPI_FLASH_SHARE_SPI1_BUS)", ESP_ERR_INVALID_ARG);
217     // spihost contains atomic variables, which should not be put in PSRAM
218     spi_host_t* host = heap_caps_malloc(sizeof(spi_host_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
219     if (host == NULL) {
220         err = ESP_ERR_NO_MEM;
221         goto cleanup;
222     }
223 
224     *host = (spi_host_t) {
225         .id = host_id,
226         .cur_cs = DEV_NUM_MAX,
227         .polling = false,
228         .device_acquiring_lock = NULL,
229         .bus_attr = bus_attr,
230     };
231 
232     // interrupts are not allowed on SPI1 bus
233     if (host_id != SPI1_HOST) {
234 #if (SOC_CPU_CORES_NUM > 1) && (!CONFIG_FREERTOS_UNICORE)
235         if(bus_attr->bus_cfg.isr_cpu_id > INTR_CPU_ID_AUTO) {
236             SPI_CHECK(bus_attr->bus_cfg.isr_cpu_id <= INTR_CPU_ID_1, "invalid core id", ESP_ERR_INVALID_ARG);
237             spi_ipc_param_t ipc_arg = {
238                 .spi_host = host,
239                 .err = &err,
240             };
241             esp_ipc_call_blocking(INTR_CPU_CONVERT_ID(bus_attr->bus_cfg.isr_cpu_id), ipc_isr_reg_to_core, (void *) &ipc_arg);
242         } else
243 #endif
244         {
245             err = esp_intr_alloc(spicommon_irqsource_for_host(host_id), bus_attr->bus_cfg.intr_flags | ESP_INTR_FLAG_INTRDISABLED, spi_intr, host, &host->intr);
246         }
247         if (err != ESP_OK) {
248             goto cleanup;
249         }
250     }
251 
252     //assign the SPI, RX DMA and TX DMA peripheral registers beginning address
253     spi_hal_config_t hal_config = {
254         //On ESP32-S2 and earlier chips, DMA registers are part of SPI registers. Pass the registers of SPI peripheral to control it.
255         .dma_in = SPI_LL_GET_HW(host_id),
256         .dma_out = SPI_LL_GET_HW(host_id),
257         .dma_enabled = bus_attr->dma_enabled,
258         .dmadesc_tx = bus_attr->dmadesc_tx,
259         .dmadesc_rx = bus_attr->dmadesc_rx,
260         .tx_dma_chan = bus_attr->tx_dma_chan,
261         .rx_dma_chan = bus_attr->rx_dma_chan,
262         .dmadesc_n = bus_attr->dma_desc_num,
263     };
264     spi_hal_init(&host->hal, host_id, &hal_config);
265 
266     if (host_id != SPI1_HOST) {
267         //SPI1 attributes are already initialized at start up.
268         spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
269         spi_bus_lock_set_bg_control(lock, spi_bus_intr_enable, spi_bus_intr_disable, host);
270         spi_bus_register_destroy_func(host_id, spi_master_deinit_driver, host);
271     }
272 
273     bus_driver_ctx[host_id] = host;
274     return ESP_OK;
275 
276 cleanup:
277     if (host) {
278         spi_hal_deinit(&host->hal);
279         if (host->intr) {
280             esp_intr_free(host->intr);
281         }
282     }
283     free(host);
284     return err;
285 }
286 
spi_master_deinit_driver(void * arg)287 static esp_err_t spi_master_deinit_driver(void* arg)
288 {
289     spi_host_t *host = (spi_host_t*)arg;
290     SPI_CHECK(host != NULL, "host_id not in use", ESP_ERR_INVALID_STATE);
291 
292     int host_id = host->id;
293     SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
294 
295     int x;
296     for (x=0; x<DEV_NUM_MAX; x++) {
297         SPI_CHECK(host->device[x] == NULL, "not all CSses freed", ESP_ERR_INVALID_STATE);
298     }
299 
300     spi_hal_deinit(&host->hal);
301 
302     if (host->intr) {
303         esp_intr_free(host->intr);
304     }
305     free(host);
306     bus_driver_ctx[host_id] = NULL;
307     return ESP_OK;
308 }
309 
spi_get_timing(bool gpio_is_used,int input_delay_ns,int eff_clk,int * dummy_o,int * cycles_remain_o)310 void spi_get_timing(bool gpio_is_used, int input_delay_ns, int eff_clk, int* dummy_o, int* cycles_remain_o)
311 {
312 #ifdef CONFIG_IDF_TARGET_ESP32
313     int timing_dummy;
314     int timing_miso_delay;
315 
316     spi_hal_cal_timing(APB_CLK_FREQ, eff_clk, gpio_is_used, input_delay_ns, &timing_dummy, &timing_miso_delay);
317     if (dummy_o) *dummy_o = timing_dummy;
318     if (cycles_remain_o) *cycles_remain_o = timing_miso_delay;
319 #else
320     //TODO: IDF-6578
321     ESP_LOGW(SPI_TAG, "This func temporary not supported for current target!");
322 #endif
323 }
324 
spi_get_freq_limit(bool gpio_is_used,int input_delay_ns)325 int spi_get_freq_limit(bool gpio_is_used, int input_delay_ns)
326 {
327 #ifdef CONFIG_IDF_TARGET_ESP32
328     return spi_hal_get_freq_limit(gpio_is_used, input_delay_ns);
329 #else
330     //TODO: IDF-6578
331     ESP_LOGW(SPI_TAG, "This func temporary not supported for current target!");
332     return 0;
333 #endif
334 }
335 
336 /*
337  Add a device. This allocates a CS line for the device, allocates memory for the device structure and hooks
338  up the CS pin to whatever is specified.
339 */
spi_bus_add_device(spi_host_device_t host_id,const spi_device_interface_config_t * dev_config,spi_device_handle_t * handle)340 esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interface_config_t *dev_config, spi_device_handle_t *handle)
341 {
342     spi_device_t *dev = NULL;
343     esp_err_t err = ESP_OK;
344 
345     SPI_CHECK(is_valid_host(host_id), "invalid host", ESP_ERR_INVALID_ARG);
346     if (bus_driver_ctx[host_id] == NULL) {
347         //lazy initialization the driver, get deinitialized by the bus is freed
348         err = spi_master_init_driver(host_id);
349         if (err != ESP_OK) {
350             return err;
351         }
352     }
353 
354     spi_host_t *host = bus_driver_ctx[host_id];
355     const spi_bus_attr_t* bus_attr = host->bus_attr;
356     SPI_CHECK(dev_config->spics_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(dev_config->spics_io_num), "spics pin invalid", ESP_ERR_INVALID_ARG);
357 #if SOC_SPI_SUPPORT_CLK_RC_FAST
358     if (dev_config->clock_source == SPI_CLK_SRC_RC_FAST) {
359         SPI_CHECK(periph_rtc_dig_clk8m_enable(), "the selected clock not available", ESP_ERR_INVALID_STATE);
360     }
361 #endif
362     spi_clock_source_t clk_src = SPI_CLK_SRC_DEFAULT;
363     uint32_t clock_source_hz = 0;
364     if (dev_config->clock_source) {
365         clk_src = dev_config->clock_source;
366     }
367     esp_clk_tree_src_get_freq_hz(clk_src, ESP_CLK_TREE_SRC_FREQ_PRECISION_APPROX, &clock_source_hz);
368     SPI_CHECK((dev_config->clock_speed_hz > 0) && (dev_config->clock_speed_hz <= clock_source_hz), "invalid sclk speed", ESP_ERR_INVALID_ARG);
369 #ifdef CONFIG_IDF_TARGET_ESP32
370     //The hardware looks like it would support this, but actually setting cs_ena_pretrans when transferring in full
371     //duplex mode does absolutely nothing on the ESP32.
372     SPI_CHECK(dev_config->cs_ena_pretrans <= 1 || (dev_config->address_bits == 0 && dev_config->command_bits == 0) ||
373         (dev_config->flags & SPI_DEVICE_HALFDUPLEX), "In full-duplex mode, only support cs pretrans delay = 1 and without address_bits and command_bits", ESP_ERR_INVALID_ARG);
374 #endif
375 
376     //Check post_cb status when `SPI_DEVICE_NO_RETURN_RESULT` flag is set.
377     if (dev_config->flags & SPI_DEVICE_NO_RETURN_RESULT) {
378         SPI_CHECK(dev_config->post_cb != NULL, "use feature flag 'SPI_DEVICE_NO_RETURN_RESULT' but no post callback function sets", ESP_ERR_INVALID_ARG);
379     }
380 
381     uint32_t lock_flag = ((dev_config->spics_io_num != -1) ? SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED : 0);
382 
383     spi_bus_lock_dev_config_t lock_config = {
384         .flags = lock_flag,
385     };
386     spi_bus_lock_dev_handle_t dev_handle;
387     err = spi_bus_lock_register_dev(bus_attr->lock, &lock_config, &dev_handle);
388     if (err != ESP_OK) {
389         goto nomem;
390     }
391 
392     int freecs = spi_bus_lock_get_dev_id(dev_handle);
393     SPI_CHECK(freecs != -1, "no free cs pins for the host", ESP_ERR_NOT_FOUND);
394 
395     //input parameters to calculate timing configuration
396     int half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
397     int no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
398     int duty_cycle = (dev_config->duty_cycle_pos == 0) ? 128 : dev_config->duty_cycle_pos;
399     int use_gpio = !(bus_attr->flags & SPICOMMON_BUSFLAG_IOMUX_PINS);
400     spi_hal_timing_param_t timing_param = {
401         .half_duplex = half_duplex,
402         .no_compensate = no_compensate,
403         .clk_src_hz = clock_source_hz,
404         .expected_freq = dev_config->clock_speed_hz,
405         .duty_cycle = duty_cycle,
406         .input_delay_ns = dev_config->input_delay_ns,
407         .use_gpio = use_gpio
408     };
409 
410     //output values of timing configuration
411     spi_hal_timing_conf_t temp_timing_conf;
412     int freq;
413     esp_err_t ret = spi_hal_cal_clock_conf(&timing_param, &freq, &temp_timing_conf);
414     temp_timing_conf.clock_source = clk_src;
415     SPI_CHECK(ret == ESP_OK, "assigned clock speed not supported", ret);
416 
417     //Allocate memory for device
418     dev = malloc(sizeof(spi_device_t));
419     if (dev == NULL) goto nomem;
420     memset(dev, 0, sizeof(spi_device_t));
421 
422     dev->id = freecs;
423     dev->dev_lock = dev_handle;
424 
425     //Allocate queues, set defaults
426     dev->trans_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
427     if (!dev->trans_queue) {
428         goto nomem;
429     }
430     //ret_queue nolonger needed if use flag SPI_DEVICE_NO_RETURN_RESULT
431     if (!(dev_config->flags & SPI_DEVICE_NO_RETURN_RESULT)) {
432         dev->ret_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
433         if (!dev->ret_queue) {
434             goto nomem;
435         }
436     }
437 
438     //We want to save a copy of the dev config in the dev struct.
439     memcpy(&dev->cfg, dev_config, sizeof(spi_device_interface_config_t));
440     dev->cfg.duty_cycle_pos = duty_cycle;
441     dev->real_clk_freq_hz = freq;
442     // TODO: if we have to change the apb clock among transactions, re-calculate this each time the apb clock lock is locked.
443 
444     //Set CS pin, CS options
445     if (dev_config->spics_io_num >= 0) {
446         spicommon_cs_initialize(host_id, dev_config->spics_io_num, freecs, use_gpio);
447     }
448 
449     //save a pointer to device in spi_host_t
450     host->device[freecs] = dev;
451     //save a pointer to host in spi_device_t
452     dev->host= host;
453 
454     //initialise the device specific configuration
455     spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
456     hal_dev->mode = dev_config->mode;
457     hal_dev->cs_setup = dev_config->cs_ena_pretrans;
458     hal_dev->cs_hold = dev_config->cs_ena_posttrans;
459     //set hold_time to 0 will not actually append delay to CS
460     //set it to 1 since we do need at least one clock of hold time in most cases
461     if (hal_dev->cs_hold == 0) {
462         hal_dev->cs_hold = 1;
463     }
464     hal_dev->cs_pin_id = dev->id;
465     hal_dev->timing_conf = temp_timing_conf;
466     hal_dev->sio = (dev_config->flags) & SPI_DEVICE_3WIRE ? 1 : 0;
467     hal_dev->half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
468     hal_dev->tx_lsbfirst = dev_config->flags & SPI_DEVICE_TXBIT_LSBFIRST ? 1 : 0;
469     hal_dev->rx_lsbfirst = dev_config->flags & SPI_DEVICE_RXBIT_LSBFIRST ? 1 : 0;
470     hal_dev->no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
471 #if SOC_SPI_AS_CS_SUPPORTED
472     hal_dev->as_cs = dev_config->flags & SPI_DEVICE_CLK_AS_CS ? 1 : 0;
473 #endif
474     hal_dev->positive_cs = dev_config->flags & SPI_DEVICE_POSITIVE_CS ? 1 : 0;
475 
476     *handle = dev;
477     ESP_LOGD(SPI_TAG, "SPI%d: New device added to CS%d, effective clock: %dkHz", host_id+1, freecs, freq/1000);
478 
479     return ESP_OK;
480 
481 nomem:
482     if (dev) {
483         if (dev->trans_queue) vQueueDelete(dev->trans_queue);
484         if (dev->ret_queue) vQueueDelete(dev->ret_queue);
485         spi_bus_lock_unregister_dev(dev->dev_lock);
486     }
487     free(dev);
488     return ESP_ERR_NO_MEM;
489 }
490 
spi_bus_remove_device(spi_device_handle_t handle)491 esp_err_t spi_bus_remove_device(spi_device_handle_t handle)
492 {
493     SPI_CHECK(handle!=NULL, "invalid handle", ESP_ERR_INVALID_ARG);
494     //These checks aren't exhaustive; another thread could sneak in a transaction inbetween. These are only here to
495     //catch design errors and aren't meant to be triggered during normal operation.
496     SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
497     SPI_CHECK(handle->host->cur_cs == DEV_NUM_MAX || handle->host->device[handle->host->cur_cs] != handle, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
498     if (handle->ret_queue) {
499         SPI_CHECK(uxQueueMessagesWaiting(handle->ret_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
500     }
501 
502 #if SOC_SPI_SUPPORT_CLK_RC_FAST
503     if (handle->cfg.clock_source == SPI_CLK_SRC_RC_FAST) {
504         periph_rtc_dig_clk8m_disable();
505     }
506 #endif
507 
508     //return
509     int spics_io_num = handle->cfg.spics_io_num;
510     if (spics_io_num >= 0) spicommon_cs_free_io(spics_io_num);
511 
512     //Kill queues
513     if (handle->trans_queue) vQueueDelete(handle->trans_queue);
514     if (handle->ret_queue) vQueueDelete(handle->ret_queue);
515     spi_bus_lock_unregister_dev(handle->dev_lock);
516 
517     assert(handle->host->device[handle->id] == handle);
518     handle->host->device[handle->id] = NULL;
519     free(handle);
520     return ESP_OK;
521 }
522 
spi_device_get_actual_freq(spi_device_handle_t handle,int * freq_khz)523 esp_err_t spi_device_get_actual_freq(spi_device_handle_t handle, int* freq_khz)
524 {
525     if ((spi_device_t*)handle == NULL || freq_khz == NULL) {
526         return ESP_ERR_INVALID_ARG;
527     }
528 
529     *freq_khz = handle->real_clk_freq_hz / 1000;
530     return ESP_OK;
531 }
532 
spi_get_actual_clock(int fapb,int hz,int duty_cycle)533 int spi_get_actual_clock(int fapb, int hz, int duty_cycle)
534 {
535     return spi_hal_master_cal_clock(fapb, hz, duty_cycle);
536 }
537 
538 // Setup the device-specified configuration registers. Called every time a new
539 // transaction is to be sent, but only apply new configurations when the device
540 // changes.
spi_setup_device(spi_device_t * dev)541 static SPI_MASTER_ISR_ATTR void spi_setup_device(spi_device_t *dev)
542 {
543     spi_bus_lock_dev_handle_t dev_lock = dev->dev_lock;
544     spi_hal_context_t *hal = &dev->host->hal;
545     spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
546 
547     if (spi_bus_lock_touch(dev_lock)) {
548         /* Configuration has not been applied yet. */
549         spi_hal_setup_device(hal, hal_dev);
550     }
551 }
552 
get_acquiring_dev(spi_host_t * host)553 static SPI_MASTER_ISR_ATTR spi_device_t *get_acquiring_dev(spi_host_t *host)
554 {
555     spi_bus_lock_dev_handle_t dev_lock = spi_bus_lock_get_acquiring_dev(host->bus_attr->lock);
556     if (!dev_lock) return NULL;
557 
558     return host->device[spi_bus_lock_get_dev_id(dev_lock)];
559 }
560 
561 // Debug only
562 // NOTE if the acquiring is not fully completed, `spi_bus_lock_get_acquiring_dev`
563 // may return a false `NULL` cause the function returning false `false`.
spi_bus_device_is_polling(spi_device_t * dev)564 static inline SPI_MASTER_ISR_ATTR bool spi_bus_device_is_polling(spi_device_t *dev)
565 {
566     return get_acquiring_dev(dev->host) == dev && dev->host->polling;
567 }
568 
569 /*-----------------------------------------------------------------------------
570     Working Functions
571 -----------------------------------------------------------------------------*/
572 
573 // The interrupt may get invoked by the bus lock.
spi_bus_intr_enable(void * host)574 static void SPI_MASTER_ISR_ATTR spi_bus_intr_enable(void *host)
575 {
576     esp_intr_enable(((spi_host_t*)host)->intr);
577 }
578 
579 // The interrupt is always disabled by the ISR itself, not exposed
spi_bus_intr_disable(void * host)580 static void SPI_MASTER_ISR_ATTR spi_bus_intr_disable(void *host)
581 {
582     esp_intr_disable(((spi_host_t*)host)->intr);
583 }
584 
585 // The function is called to send a new transaction, in ISR or in the task.
586 // Setup the transaction-specified registers and linked-list used by the DMA (or FIFO if DMA is not used)
spi_new_trans(spi_device_t * dev,spi_trans_priv_t * trans_buf)587 static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_t *trans_buf)
588 {
589     spi_transaction_t *trans = trans_buf->trans;
590     spi_host_t *host = dev->host;
591     spi_hal_context_t *hal = &(host->hal);
592     spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
593 
594     host->cur_cs = dev->id;
595 
596     //Reconfigure according to device settings, the function only has effect when the dev_id is changed.
597     spi_setup_device(dev);
598 
599     //set the transaction specific configuration each time before a transaction setup
600     spi_hal_trans_config_t hal_trans = {};
601     hal_trans.tx_bitlen = trans->length;
602     hal_trans.rx_bitlen = trans->rxlength;
603     hal_trans.rcv_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_rcv;
604     hal_trans.send_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_send;
605     hal_trans.cmd = trans->cmd;
606     hal_trans.addr = trans->addr;
607     hal_trans.cs_keep_active = (trans->flags & SPI_TRANS_CS_KEEP_ACTIVE) ? 1 : 0;
608 
609     //Set up OIO/QIO/DIO if needed
610     hal_trans.line_mode.data_lines = (trans->flags & SPI_TRANS_MODE_DIO) ? 2 :
611         (trans->flags & SPI_TRANS_MODE_QIO) ? 4 : 1;
612 #if SOC_SPI_SUPPORT_OCT
613     if (trans->flags & SPI_TRANS_MODE_OCT) {
614         hal_trans.line_mode.data_lines = 8;
615     }
616 #endif
617     hal_trans.line_mode.addr_lines = (trans->flags & SPI_TRANS_MULTILINE_ADDR) ? hal_trans.line_mode.data_lines : 1;
618     hal_trans.line_mode.cmd_lines = (trans->flags & SPI_TRANS_MULTILINE_CMD) ? hal_trans.line_mode.data_lines : 1;
619 
620     if (trans->flags & SPI_TRANS_VARIABLE_CMD) {
621         hal_trans.cmd_bits = ((spi_transaction_ext_t *)trans)->command_bits;
622     } else {
623         hal_trans.cmd_bits = dev->cfg.command_bits;
624     }
625     if (trans->flags & SPI_TRANS_VARIABLE_ADDR) {
626         hal_trans.addr_bits = ((spi_transaction_ext_t *)trans)->address_bits;
627     } else {
628         hal_trans.addr_bits = dev->cfg.address_bits;
629     }
630     if (trans->flags & SPI_TRANS_VARIABLE_DUMMY) {
631         hal_trans.dummy_bits = ((spi_transaction_ext_t *)trans)->dummy_bits;
632     } else {
633         hal_trans.dummy_bits = dev->cfg.dummy_bits;
634     }
635 
636     spi_hal_setup_trans(hal, hal_dev, &hal_trans);
637     spi_hal_prepare_data(hal, hal_dev, &hal_trans);
638 
639     //Call pre-transmission callback, if any
640     if (dev->cfg.pre_cb) dev->cfg.pre_cb(trans);
641     //Kick off transfer
642     spi_hal_user_start(hal);
643 }
644 
645 // The function is called when a transaction is done, in ISR or in the task.
646 // Fetch the data from FIFO and call the ``post_cb``.
spi_post_trans(spi_host_t * host)647 static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host)
648 {
649     spi_transaction_t *cur_trans = host->cur_trans_buf.trans;
650 
651     spi_hal_fetch_result(&host->hal);
652     //Call post-transaction callback, if any
653     spi_device_t* dev = host->device[host->cur_cs];
654     if (dev->cfg.post_cb) dev->cfg.post_cb(cur_trans);
655 
656     host->cur_cs = DEV_NUM_MAX;
657 }
658 
659 // This is run in interrupt context.
spi_intr(void * arg)660 static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
661 {
662     BaseType_t do_yield = pdFALSE;
663     spi_host_t *host = (spi_host_t *)arg;
664     const spi_bus_attr_t* bus_attr = host->bus_attr;
665 
666     assert(spi_hal_usr_is_done(&host->hal));
667 
668     /*
669      * Help to skip the handling of in-flight transaction, and disable of the interrupt.
670      * The esp_intr_enable will be called (b) after new BG request is queued (a) in the task;
671      * while esp_intr_disable should be called (c) if we check and found the sending queue is empty (d).
672      * If (c) is called after (d), then there is a risk that things happens in this sequence:
673      * (d) -> (a) -> (b) -> (c), and in this case the interrupt is disabled while there's pending BG request in the queue.
674      * To avoid this, interrupt is disabled here, and re-enabled later if required.
675      */
676     if (!spi_bus_lock_bg_entry(bus_attr->lock)) {
677         /*------------ deal with the in-flight transaction -----------------*/
678         assert(host->cur_cs != DEV_NUM_MAX);
679         //Okay, transaction is done.
680         const int cs = host->cur_cs;
681         //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
682 
683 #if CONFIG_IDF_TARGET_ESP32
684         if (bus_attr->dma_enabled) {
685             //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
686             spicommon_dmaworkaround_idle(bus_attr->tx_dma_chan);
687         }
688 #endif  //#if CONFIG_IDF_TARGET_ESP32
689 
690         //cur_cs is changed to DEV_NUM_MAX here
691         spi_post_trans(host);
692 
693         if (!(host->device[cs]->cfg.flags & SPI_DEVICE_NO_RETURN_RESULT)) {
694             //Return transaction descriptor.
695             xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_trans_buf, &do_yield);
696         }
697 
698         // spi_bus_lock_bg_pause(bus_attr->lock);
699 #ifdef CONFIG_PM_ENABLE
700         //Release APB frequency lock
701         esp_pm_lock_release(bus_attr->pm_lock);
702 #endif
703     }
704 
705     /*------------ new transaction starts here ------------------*/
706     assert(host->cur_cs == DEV_NUM_MAX);
707 
708     spi_bus_lock_handle_t lock = host->bus_attr->lock;
709     BaseType_t trans_found = pdFALSE;
710 
711 
712     // There should be remaining requests
713     BUS_LOCK_DEBUG_EXECUTE_CHECK(spi_bus_lock_bg_req_exist(lock));
714 
715     do {
716         spi_bus_lock_dev_handle_t acq_dev_lock = spi_bus_lock_get_acquiring_dev(lock);
717         spi_bus_lock_dev_handle_t desired_dev = acq_dev_lock;
718         bool resume_task = false;
719         spi_device_t* device_to_send = NULL;
720 
721         if (!acq_dev_lock) {
722             // This function may assign a new acquiring device, otherwise it will suggest a desired device with BG active
723             // We use either of them without further searching in the devices.
724             // If the return value is true, it means either there's no acquiring device, or the acquiring device's BG is active,
725             // We stay in the ISR to deal with those transactions of desired device, otherwise nothing will be done, check whether we need to resume some other tasks, or just quit the ISR
726             resume_task = spi_bus_lock_bg_check_dev_acq(lock, &desired_dev);
727         }
728 
729         if (!resume_task) {
730             // sanity check
731             assert(desired_dev);
732 
733             bool dev_has_req = spi_bus_lock_bg_check_dev_req(desired_dev);
734             if (dev_has_req) {
735                 device_to_send = host->device[spi_bus_lock_get_dev_id(desired_dev)];
736                 trans_found = xQueueReceiveFromISR(device_to_send->trans_queue, &host->cur_trans_buf, &do_yield);
737                 if (!trans_found) {
738                     spi_bus_lock_bg_clear_req(desired_dev);
739                 }
740             }
741         }
742 
743         if (trans_found) {
744             spi_trans_priv_t *const cur_trans_buf = &host->cur_trans_buf;
745 #if CONFIG_IDF_TARGET_ESP32
746             if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) {
747                 //mark channel as active, so that the DMA will not be reset by the slave
748                 //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
749                 spicommon_dmaworkaround_transfer_active(bus_attr->tx_dma_chan);
750             }
751 #endif  //#if CONFIG_IDF_TARGET_ESP32
752             spi_new_trans(device_to_send, cur_trans_buf);
753         }
754         // Exit of the ISR, handle interrupt re-enable (if sending transaction), retry (if there's coming BG),
755         // or resume acquiring device task (if quit due to bus acquiring).
756     } while (!spi_bus_lock_bg_exit(lock, trans_found, &do_yield));
757 
758     if (do_yield) portYIELD_FROM_ISR();
759 }
760 
check_trans_valid(spi_device_handle_t handle,spi_transaction_t * trans_desc)761 static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handle, spi_transaction_t *trans_desc)
762 {
763     SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
764     spi_host_t *host = handle->host;
765     const spi_bus_attr_t* bus_attr = host->bus_attr;
766     bool tx_enabled = (trans_desc->flags & SPI_TRANS_USE_TXDATA) || (trans_desc->tx_buffer);
767     bool rx_enabled = (trans_desc->flags & SPI_TRANS_USE_RXDATA) || (trans_desc->rx_buffer);
768     spi_transaction_ext_t *t_ext = (spi_transaction_ext_t *)trans_desc;
769     bool dummy_enabled = (((trans_desc->flags & SPI_TRANS_VARIABLE_DUMMY)? t_ext->dummy_bits: handle->cfg.dummy_bits) != 0);
770     bool extra_dummy_enabled = handle->hal_dev.timing_conf.timing_dummy;
771     bool is_half_duplex = ((handle->cfg.flags & SPI_DEVICE_HALFDUPLEX) != 0);
772 
773     //check transmission length
774     SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA)==0 || trans_desc->rxlength <= 32, "SPI_TRANS_USE_RXDATA only available for rxdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
775     SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA)==0 || trans_desc->length <= 32, "SPI_TRANS_USE_TXDATA only available for txdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
776     SPI_CHECK(trans_desc->length <= bus_attr->max_transfer_sz*8, "txdata transfer > host maximum", ESP_ERR_INVALID_ARG);
777     SPI_CHECK(trans_desc->rxlength <= bus_attr->max_transfer_sz*8, "rxdata transfer > host maximum", ESP_ERR_INVALID_ARG);
778     SPI_CHECK(is_half_duplex || trans_desc->rxlength <= trans_desc->length, "rx length > tx length in full duplex mode", ESP_ERR_INVALID_ARG);
779     //check working mode
780 #if SOC_SPI_SUPPORT_OCT
781     SPI_CHECK(!(host->id == SPI3_HOST && trans_desc->flags & SPI_TRANS_MODE_OCT), "SPI3 does not support octal mode", ESP_ERR_INVALID_ARG);
782     SPI_CHECK(!((trans_desc->flags & SPI_TRANS_MODE_OCT) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "Incompatible when setting to both Octal mode and 3-wire-mode", ESP_ERR_INVALID_ARG);
783     SPI_CHECK(!((trans_desc->flags & SPI_TRANS_MODE_OCT) && !is_half_duplex), "Incompatible when setting to both Octal mode and half duplex mode", ESP_ERR_INVALID_ARG);
784 #endif
785     SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "Incompatible when setting to both multi-line mode and 3-wire-mode", ESP_ERR_INVALID_ARG);
786     SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && !is_half_duplex), "Incompatible when setting to both multi-line mode and half duplex mode", ESP_ERR_INVALID_ARG);
787 #ifdef CONFIG_IDF_TARGET_ESP32
788     SPI_CHECK(!is_half_duplex || !bus_attr->dma_enabled || !rx_enabled || !tx_enabled, "SPI half duplex mode does not support using DMA with both MOSI and MISO phases.", ESP_ERR_INVALID_ARG );
789 #endif
790 #if !SOC_SPI_HD_BOTH_INOUT_SUPPORTED
791     //On these chips, HW doesn't support using both TX and RX phases when in halfduplex mode
792     SPI_CHECK(!is_half_duplex || !tx_enabled || !rx_enabled, "SPI half duplex mode is not supported when both MOSI and MISO phases are enabled.", ESP_ERR_INVALID_ARG);
793     SPI_CHECK(!is_half_duplex || !trans_desc->length || !trans_desc->rxlength, "SPI half duplex mode is not supported when both MOSI and MISO phases are enabled.", ESP_ERR_INVALID_ARG);
794 #endif
795     //MOSI phase is skipped only when both tx_buffer and SPI_TRANS_USE_TXDATA are not set.
796     SPI_CHECK(trans_desc->length != 0 || !tx_enabled, "trans tx_buffer should be NULL and SPI_TRANS_USE_TXDATA should be cleared to skip MOSI phase.", ESP_ERR_INVALID_ARG);
797     //MISO phase is skipped only when both rx_buffer and SPI_TRANS_USE_RXDATA are not set.
798     //If set rxlength=0 in full_duplex mode, it will be automatically set to length
799     SPI_CHECK(!is_half_duplex || trans_desc->rxlength != 0 || !rx_enabled, "trans rx_buffer should be NULL and SPI_TRANS_USE_RXDATA should be cleared to skip MISO phase.", ESP_ERR_INVALID_ARG);
800     //In Full duplex mode, default rxlength to be the same as length, if not filled in.
801     // set rxlength to length is ok, even when rx buffer=NULL
802     if (trans_desc->rxlength==0 && !is_half_duplex) {
803         trans_desc->rxlength=trans_desc->length;
804     }
805     //Dummy phase is not available when both data out and in are enabled, regardless of FD or HD mode.
806     SPI_CHECK(!tx_enabled || !rx_enabled || !dummy_enabled || !extra_dummy_enabled, "Dummy phase is not available when both data out and in are enabled", ESP_ERR_INVALID_ARG);
807 
808     if (bus_attr->dma_enabled) {
809         SPI_CHECK(trans_desc->length <= SPI_LL_DMA_MAX_BIT_LEN, "txdata transfer > hardware max supported len", ESP_ERR_INVALID_ARG);
810         SPI_CHECK(trans_desc->rxlength <= SPI_LL_DMA_MAX_BIT_LEN, "rxdata transfer > hardware max supported len", ESP_ERR_INVALID_ARG);
811     } else {
812         SPI_CHECK(trans_desc->length <= SPI_LL_CPU_MAX_BIT_LEN, "txdata transfer > hardware max supported len", ESP_ERR_INVALID_ARG);
813         SPI_CHECK(trans_desc->rxlength <= SPI_LL_CPU_MAX_BIT_LEN, "rxdata transfer > hardware max supported len", ESP_ERR_INVALID_ARG);
814     }
815 
816     return ESP_OK;
817 }
818 
uninstall_priv_desc(spi_trans_priv_t * trans_buf)819 static SPI_MASTER_ISR_ATTR void uninstall_priv_desc(spi_trans_priv_t* trans_buf)
820 {
821     spi_transaction_t *trans_desc = trans_buf->trans;
822     if ((void *)trans_buf->buffer_to_send != &trans_desc->tx_data[0] &&
823         trans_buf->buffer_to_send != trans_desc->tx_buffer) {
824         free((void *)trans_buf->buffer_to_send); //force free, ignore const
825     }
826     // copy data from temporary DMA-capable buffer back to IRAM buffer and free the temporary one.
827     if (trans_buf->buffer_to_rcv &&
828         (void *)trans_buf->buffer_to_rcv != &trans_desc->rx_data[0] &&
829         trans_buf->buffer_to_rcv != trans_desc->rx_buffer) { // NOLINT(clang-analyzer-unix.Malloc)
830         if (trans_desc->flags & SPI_TRANS_USE_RXDATA) {
831             memcpy((uint8_t *) & trans_desc->rx_data[0], trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
832         } else {
833             memcpy(trans_desc->rx_buffer, trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
834         }
835         free(trans_buf->buffer_to_rcv);
836     }
837 }
838 
setup_priv_desc(spi_transaction_t * trans_desc,spi_trans_priv_t * new_desc,bool isdma)839 static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_transaction_t *trans_desc, spi_trans_priv_t* new_desc, bool isdma)
840 {
841     *new_desc = (spi_trans_priv_t) { .trans = trans_desc, };
842 
843     // rx memory assign
844     uint32_t* rcv_ptr;
845     if ( trans_desc->flags & SPI_TRANS_USE_RXDATA ) {
846         rcv_ptr = (uint32_t *)&trans_desc->rx_data[0];
847     } else {
848         //if not use RXDATA neither rx_buffer, buffer_to_rcv assigned to NULL
849         rcv_ptr = trans_desc->rx_buffer;
850     }
851     if (rcv_ptr && isdma && (!esp_ptr_dma_capable(rcv_ptr) || ((int)rcv_ptr % 4 != 0))) {
852         //if rxbuf in the desc not DMA-capable, malloc a new one. The rx buffer need to be length of multiples of 32 bits to avoid heap corruption.
853         ESP_LOGD(SPI_TAG, "Allocate RX buffer for DMA" );
854         rcv_ptr = heap_caps_malloc(((trans_desc->rxlength + 31) / 32) * 4, MALLOC_CAP_DMA);
855         if (rcv_ptr == NULL) goto clean_up;
856     }
857     new_desc->buffer_to_rcv = rcv_ptr;
858 
859     // tx memory assign
860     const uint32_t *send_ptr;
861     if ( trans_desc->flags & SPI_TRANS_USE_TXDATA ) {
862         send_ptr = (uint32_t *)&trans_desc->tx_data[0];
863     } else {
864         //if not use TXDATA neither tx_buffer, tx data assigned to NULL
865         send_ptr = trans_desc->tx_buffer ;
866     }
867     if (send_ptr && isdma && !esp_ptr_dma_capable( send_ptr )) {
868         //if txbuf in the desc not DMA-capable, malloc a new one
869         ESP_LOGD(SPI_TAG, "Allocate TX buffer for DMA" );
870         uint32_t *temp = heap_caps_malloc((trans_desc->length + 7) / 8, MALLOC_CAP_DMA);
871         if (temp == NULL) goto clean_up;
872 
873         memcpy( temp, send_ptr, (trans_desc->length + 7) / 8 );
874         send_ptr = temp;
875     }
876     new_desc->buffer_to_send = send_ptr;
877 
878     return ESP_OK;
879 
880 clean_up:
881     uninstall_priv_desc(new_desc);
882     return ESP_ERR_NO_MEM;
883 }
884 
spi_device_queue_trans(spi_device_handle_t handle,spi_transaction_t * trans_desc,TickType_t ticks_to_wait)885 esp_err_t SPI_MASTER_ATTR spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
886 {
887     esp_err_t ret = check_trans_valid(handle, trans_desc);
888     if (ret != ESP_OK) return ret;
889 
890     spi_host_t *host = handle->host;
891 
892     SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
893 
894     /* Even when using interrupt transfer, the CS can only be kept activated if the bus has been
895      * acquired with `spi_device_acquire_bus()` first. */
896     if (host->device_acquiring_lock != handle && (trans_desc->flags & SPI_TRANS_CS_KEEP_ACTIVE)) {
897         return ESP_ERR_INVALID_ARG;
898     }
899 
900     spi_trans_priv_t trans_buf;
901     ret = setup_priv_desc(trans_desc, &trans_buf, (host->bus_attr->dma_enabled));
902     if (ret != ESP_OK) return ret;
903 
904 #ifdef CONFIG_PM_ENABLE
905     // though clock source is selectable, read/write reg and mem of spi peripherial still use APB
906     // and dma still use APB, so pm_lock is still needed
907     esp_pm_lock_acquire(host->bus_attr->pm_lock);
908 #endif
909     //Send to queue and invoke the ISR.
910 
911     BaseType_t r = xQueueSend(handle->trans_queue, (void *)&trans_buf, ticks_to_wait);
912     if (!r) {
913         ret = ESP_ERR_TIMEOUT;
914 #ifdef CONFIG_PM_ENABLE
915         //Release APB frequency lock
916         esp_pm_lock_release(host->bus_attr->pm_lock);
917 #endif
918         goto clean_up;
919     }
920 
921     // The ISR will be invoked at correct time by the lock with `spi_bus_intr_enable`.
922     ret = spi_bus_lock_bg_request(handle->dev_lock);
923     if (ret != ESP_OK) {
924         goto clean_up;
925     }
926     return ESP_OK;
927 
928 clean_up:
929     uninstall_priv_desc(&trans_buf);
930     return ret;
931 }
932 
spi_device_get_trans_result(spi_device_handle_t handle,spi_transaction_t ** trans_desc,TickType_t ticks_to_wait)933 esp_err_t SPI_MASTER_ATTR spi_device_get_trans_result(spi_device_handle_t handle, spi_transaction_t **trans_desc, TickType_t ticks_to_wait)
934 {
935     BaseType_t r;
936     spi_trans_priv_t trans_buf;
937     SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
938 
939     //if SPI_DEVICE_NO_RETURN_RESULT is set, ret_queue will always be empty
940     SPI_CHECK(!(handle->cfg.flags & SPI_DEVICE_NO_RETURN_RESULT), "API not Supported!", ESP_ERR_NOT_SUPPORTED);
941 
942     //use the interrupt, block until return
943     r=xQueueReceive(handle->ret_queue, (void*)&trans_buf, ticks_to_wait);
944     if (!r) {
945         // The memory occupied by rx and tx DMA buffer destroyed only when receiving from the queue (transaction finished).
946         // If timeout, wait and retry.
947         // Every in-flight transaction request occupies internal memory as DMA buffer if needed.
948         return ESP_ERR_TIMEOUT;
949     }
950     //release temporary buffers
951     uninstall_priv_desc(&trans_buf);
952     (*trans_desc) = trans_buf.trans;
953 
954     return ESP_OK;
955 }
956 
957 //Porcelain to do one blocking transmission.
spi_device_transmit(spi_device_handle_t handle,spi_transaction_t * trans_desc)958 esp_err_t SPI_MASTER_ATTR spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc)
959 {
960     esp_err_t ret;
961     spi_transaction_t *ret_trans;
962     //ToDo: check if any spi transfers in flight
963     ret = spi_device_queue_trans(handle, trans_desc, portMAX_DELAY);
964     if (ret != ESP_OK) return ret;
965 
966     ret = spi_device_get_trans_result(handle, &ret_trans, portMAX_DELAY);
967     if (ret != ESP_OK) return ret;
968 
969     assert(ret_trans == trans_desc);
970     return ESP_OK;
971 }
972 
spi_device_acquire_bus(spi_device_t * device,TickType_t wait)973 esp_err_t SPI_MASTER_ISR_ATTR spi_device_acquire_bus(spi_device_t *device, TickType_t wait)
974 {
975     spi_host_t *const host = device->host;
976     SPI_CHECK(wait==portMAX_DELAY, "acquire finite time not supported now.", ESP_ERR_INVALID_ARG);
977     SPI_CHECK(!spi_bus_device_is_polling(device), "Cannot acquire bus when a polling transaction is in progress.", ESP_ERR_INVALID_STATE );
978 
979     esp_err_t ret = spi_bus_lock_acquire_start(device->dev_lock, wait);
980     if (ret != ESP_OK) {
981         return ret;
982     }
983     host->device_acquiring_lock = device;
984 
985     ESP_LOGD(SPI_TAG, "device%d locked the bus", device->id);
986 
987 #ifdef CONFIG_PM_ENABLE
988     // though we don't suggest to block the task before ``release_bus``, still allow doing so.
989     // this keeps the spi clock at 80MHz even if all tasks are blocked
990     esp_pm_lock_acquire(host->bus_attr->pm_lock);
991 #endif
992     //configure the device ahead so that we don't need to do it again in the following transactions
993     spi_setup_device(host->device[device->id]);
994     //the DMA is also occupied by the device, all the slave devices that using DMA should wait until bus released.
995 
996 #if CONFIG_IDF_TARGET_ESP32
997     if (host->bus_attr->dma_enabled) {
998         //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
999         spicommon_dmaworkaround_transfer_active(host->bus_attr->tx_dma_chan);
1000     }
1001 #endif  //#if CONFIG_IDF_TARGET_ESP32
1002 
1003     return ESP_OK;
1004 }
1005 
1006 // This function restore configurations required in the non-polling mode
spi_device_release_bus(spi_device_t * dev)1007 void SPI_MASTER_ISR_ATTR spi_device_release_bus(spi_device_t *dev)
1008 {
1009     spi_host_t *host = dev->host;
1010 
1011     if (spi_bus_device_is_polling(dev)){
1012         ESP_EARLY_LOGE(SPI_TAG, "Cannot release bus when a polling transaction is in progress.");
1013         assert(0);
1014     }
1015 
1016 #if CONFIG_IDF_TARGET_ESP32
1017     if (host->bus_attr->dma_enabled) {
1018         //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
1019         spicommon_dmaworkaround_idle(host->bus_attr->tx_dma_chan);
1020     }
1021     //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
1022 #endif  //#if CONFIG_IDF_TARGET_ESP32
1023 
1024     //allow clock to be lower than 80MHz when all tasks blocked
1025 #ifdef CONFIG_PM_ENABLE
1026     //Release APB frequency lock
1027     esp_pm_lock_release(host->bus_attr->pm_lock);
1028 #endif
1029     ESP_LOGD(SPI_TAG, "device%d release bus", dev->id);
1030 
1031     host->device_acquiring_lock = NULL;
1032     esp_err_t ret = spi_bus_lock_acquire_end(dev->dev_lock);
1033     assert(ret == ESP_OK);
1034     (void) ret;
1035 }
1036 
spi_device_polling_start(spi_device_handle_t handle,spi_transaction_t * trans_desc,TickType_t ticks_to_wait)1037 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_start(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
1038 {
1039     esp_err_t ret;
1040     SPI_CHECK(ticks_to_wait == portMAX_DELAY, "currently timeout is not available for polling transactions", ESP_ERR_INVALID_ARG);
1041     ret = check_trans_valid(handle, trans_desc);
1042     if (ret!=ESP_OK) return ret;
1043     SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot send polling transaction while the previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
1044 
1045     /* If device_acquiring_lock is set to handle, it means that the user has already
1046      * acquired the bus thanks to the function `spi_device_acquire_bus()`.
1047      * In that case, we don't need to take the lock again. */
1048     spi_host_t *host = handle->host;
1049     if (host->device_acquiring_lock != handle) {
1050         /* The user cannot ask for the CS to keep active has the bus is not locked/acquired. */
1051         if ((trans_desc->flags & SPI_TRANS_CS_KEEP_ACTIVE) != 0) {
1052             ret = ESP_ERR_INVALID_ARG;
1053         } else {
1054             ret = spi_bus_lock_acquire_start(handle->dev_lock, ticks_to_wait);
1055         }
1056     } else {
1057         ret = spi_bus_lock_wait_bg_done(handle->dev_lock, ticks_to_wait);
1058     }
1059     if (ret != ESP_OK) return ret;
1060 
1061     ret = setup_priv_desc(trans_desc, &host->cur_trans_buf, (host->bus_attr->dma_enabled));
1062     if (ret!=ESP_OK) return ret;
1063 
1064     //Polling, no interrupt is used.
1065     host->polling = true;
1066 
1067     ESP_LOGV(SPI_TAG, "polling trans");
1068     spi_new_trans(handle, &host->cur_trans_buf);
1069 
1070     return ESP_OK;
1071 }
1072 
spi_device_polling_end(spi_device_handle_t handle,TickType_t ticks_to_wait)1073 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_end(spi_device_handle_t handle, TickType_t ticks_to_wait)
1074 {
1075     SPI_CHECK(handle != NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
1076     spi_host_t *host = handle->host;
1077 
1078     assert(host->cur_cs == handle->id);
1079     assert(handle == get_acquiring_dev(host));
1080 
1081     TickType_t start = xTaskGetTickCount();
1082     while (!spi_hal_usr_is_done(&host->hal)) {
1083         TickType_t end = xTaskGetTickCount();
1084         if (end - start > ticks_to_wait) {
1085             return ESP_ERR_TIMEOUT;
1086         }
1087     }
1088 
1089     ESP_LOGV(SPI_TAG, "polling trans done");
1090     //deal with the in-flight transaction
1091     spi_post_trans(host);
1092     //release temporary buffers
1093     uninstall_priv_desc(&host->cur_trans_buf);
1094 
1095     host->polling = false;
1096     /* Once again here, if device_acquiring_lock is set to `handle`, it means that the user has already
1097      * acquired the bus thanks to the function `spi_device_acquire_bus()`.
1098      * In that case, the lock must not be released now because . */
1099     if (host->device_acquiring_lock != handle) {
1100         assert(host->device_acquiring_lock == NULL);
1101         spi_bus_lock_acquire_end(handle->dev_lock);
1102     }
1103 
1104     return ESP_OK;
1105 }
1106 
spi_device_polling_transmit(spi_device_handle_t handle,spi_transaction_t * trans_desc)1107 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_transmit(spi_device_handle_t handle, spi_transaction_t* trans_desc)
1108 {
1109     esp_err_t ret;
1110     ret = spi_device_polling_start(handle, trans_desc, portMAX_DELAY);
1111     if (ret != ESP_OK) return ret;
1112 
1113     return spi_device_polling_end(handle, portMAX_DELAY);
1114 }
1115 
spi_bus_get_max_transaction_len(spi_host_device_t host_id,size_t * max_bytes)1116 esp_err_t spi_bus_get_max_transaction_len(spi_host_device_t host_id, size_t *max_bytes)
1117 {
1118     SPI_CHECK(is_valid_host(host_id), "invalid host", ESP_ERR_INVALID_ARG);
1119     if (bus_driver_ctx[host_id] == NULL || max_bytes == NULL) {
1120         return ESP_ERR_INVALID_ARG;
1121     }
1122 
1123     spi_host_t *host = bus_driver_ctx[host_id];
1124     if (host->bus_attr->dma_enabled) {
1125         *max_bytes = MIN(host->bus_attr->max_transfer_sz, (SPI_LL_DMA_MAX_BIT_LEN / 8));
1126     } else {
1127         *max_bytes = MIN(host->bus_attr->max_transfer_sz, (SPI_LL_CPU_MAX_BIT_LEN / 8));
1128     }
1129 
1130     return ESP_OK;
1131 }
1132