1 // Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 /*
16 Architecture:
17
18 We can initialize a SPI driver, but we don't talk to the SPI driver itself, we address a device. A device essentially
19 is a combination of SPI port and CS pin, plus some information about the specifics of communication to the device
20 (timing, command/address length etc). The arbitration between tasks is also in conception of devices.
21
22 A device can work in interrupt mode and polling mode, and a third but
23 complicated mode which combines the two modes above:
24
25 1. Work in the ISR with a set of queues; one per device.
26
27 The idea is that to send something to a SPI device, you allocate a
28 transaction descriptor. It contains some information about the transfer
29 like the lenghth, address, command etc, plus pointers to transmit and
30 receive buffer. The address of this block gets pushed into the transmit
31 queue. The SPI driver does its magic, and sends and retrieves the data
32 eventually. The data gets written to the receive buffers, if needed the
33 transaction descriptor is modified to indicate returned parameters and
34 the entire thing goes into the return queue, where whatever software
35 initiated the transaction can retrieve it.
36
37 The entire thing is run from the SPI interrupt handler. If SPI is done
38 transmitting/receiving but nothing is in the queue, it will not clear the
39 SPI interrupt but just disable it by esp_intr_disable. This way, when a
40 new thing is sent, pushing the packet into the send queue and re-enabling
41 the interrupt (by esp_intr_enable) will trigger the interrupt again, which
42 can then take care of the sending.
43
44 2. Work in the polling mode in the task.
45
46 In this mode we get rid of the ISR, FreeRTOS queue and task switching, the
47 task is no longer blocked during a transaction. This increase the cpu
48 load, but decrease the interval of SPI transactions. Each time only one
49 device (in one task) can send polling transactions, transactions to
50 other devices are blocked until the polling transaction of current device
51 is done.
52
53 In the polling mode, the queue is not used, all the operations are done
54 in the task. The task calls ``spi_device_polling_start`` to setup and start
55 a new transaction, then call ``spi_device_polling_end`` to handle the
56 return value of the transaction.
57
58 To handle the arbitration among devices, the device "temporarily" acquire
59 a bus by the ``device_acquire_bus_internal`` function, which writes
60 dev_request by CAS operation. Other devices which wants to send polling
61 transactions but don't own the bus will block and wait until given the
62 semaphore which indicates the ownership of bus.
63
64 In case of the ISR is still sending transactions to other devices, the ISR
65 should maintain an ``random_idle`` flag indicating that it's not doing
66 transactions. When the bus is locked, the ISR can only send new
67 transactions to the acquiring device. The ISR will automatically disable
68 itself and send semaphore to the device if the ISR is free. If the device
69 sees the random_idle flag, it can directly start its polling transaction.
70 Otherwise it should block and wait for the semaphore from the ISR.
71
72 After the polling transaction, the driver will release the bus. During the
73 release of the bus, the driver search all other devices to see whether
74 there is any device waiting to acquire the bus, if so, acquire for it and
75 send it a semaphore if the device queue is empty, or invoke the ISR for
76 it. If all other devices don't need to acquire the bus, but there are
77 still transactions in the queues, the ISR will also be invoked.
78
79 To get better polling efficiency, user can call ``spi_device_acquire_bus``
80 function, which also calls the ``spi_bus_lock_acquire_core`` function,
81 before a series of polling transactions to a device. The bus acquiring and
82 task switching before and after the polling transaction will be escaped.
83
84 3. Mixed mode
85
86 The driver is written under the assumption that polling and interrupt
87 transactions are not happening simultaneously. When sending polling
88 transactions, it will check whether the ISR is active, which includes the
89 case the ISR is sending the interrupt transactions of the acquiring
90 device. If the ISR is still working, the routine sending a polling
91 transaction will get blocked and wait until the semaphore from the ISR
92 which indicates the ISR is free now.
93
94 A fatal case is, a polling transaction is in flight, but the ISR received
95 an interrupt transaction. The behavior of the driver is unpredictable,
96 which should be strictly forbidden.
97
98 We have two bits to control the interrupt:
99
100 1. The slave->trans_done bit, which is automatically asserted when a transaction is done.
101
102 This bit is cleared during an interrupt transaction, so that the interrupt
103 will be triggered when the transaction is done, or the SW can check the
104 bit to see if the transaction is done for polling transactions.
105
106 When no transaction is in-flight, the bit is kept active, so that the SW
107 can easily invoke the ISR by enable the interrupt.
108
109 2. The system interrupt enable/disable, controlled by esp_intr_enable and esp_intr_disable.
110
111 The interrupt is disabled (by the ISR itself) when no interrupt transaction
112 is queued. When the bus is not occupied, any task, which queues a
113 transaction into the queue, will enable the interrupt to invoke the ISR.
114 When the bus is occupied by a device, other device will put off the
115 invoking of ISR to the moment when the bus is released. The device
116 acquiring the bus can still send interrupt transactions by enable the
117 interrupt.
118
119 */
120
121 #include <string.h>
122 #include "driver/spi_common_internal.h"
123 #include "driver/spi_master.h"
124
125 #include "esp_log.h"
126 #include "freertos/task.h"
127 #include "freertos/queue.h"
128 #include "soc/soc_memory_layout.h"
129 #include "driver/gpio.h"
130 #include "hal/spi_hal.h"
131 #include "esp_heap_caps.h"
132
133
134 typedef struct spi_device_t spi_device_t;
135
136 /// struct to hold private transaction data (like tx and rx buffer for DMA).
137 typedef struct {
138 spi_transaction_t *trans;
139 const uint32_t *buffer_to_send; //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is;
140 //otherwise sets to the original buffer or NULL if no buffer is assigned.
141 uint32_t *buffer_to_rcv; // similar to buffer_to_send
142 } spi_trans_priv_t;
143
144 typedef struct {
145 int id;
146 spi_device_t* device[DEV_NUM_MAX];
147 intr_handle_t intr;
148 spi_hal_context_t hal;
149 spi_trans_priv_t cur_trans_buf;
150 int cur_cs; //current device doing transaction
151 const spi_bus_attr_t* bus_attr;
152
153 /**
154 * the bus is permanently controlled by a device until `spi_bus_release_bus`` is called. Otherwise
155 * the acquiring of SPI bus will be freed when `spi_device_polling_end` is called.
156 */
157 spi_device_t* device_acquiring_lock;
158
159 //debug information
160 bool polling; //in process of a polling, avoid of queue new transactions into ISR
161 } spi_host_t;
162
163 struct spi_device_t {
164 int id;
165 QueueHandle_t trans_queue;
166 QueueHandle_t ret_queue;
167 spi_device_interface_config_t cfg;
168 spi_hal_dev_config_t hal_dev;
169 spi_host_t *host;
170 spi_bus_lock_dev_handle_t dev_lock;
171 };
172
173 static spi_host_t* bus_driver_ctx[SOC_SPI_PERIPH_NUM] = {};
174
175 static const char *SPI_TAG = "spi_master";
176 #define SPI_CHECK(a, str, ret_val, ...) \
177 if (unlikely(!(a))) { \
178 ESP_LOGE(SPI_TAG,"%s(%d): "str, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
179 return (ret_val); \
180 }
181
182
183 static void spi_intr(void *arg);
184 static void spi_bus_intr_enable(void *host);
185 static void spi_bus_intr_disable(void *host);
186
187 static esp_err_t spi_master_deinit_driver(void* arg);
188
is_valid_host(spi_host_device_t host)189 static inline bool is_valid_host(spi_host_device_t host)
190 {
191 //SPI1 can be used as GPSPI only on ESP32
192 #if CONFIG_IDF_TARGET_ESP32
193 return host >= SPI1_HOST && host <= SPI3_HOST;
194 #elif (SOC_SPI_PERIPH_NUM == 2)
195 return host == SPI2_HOST;
196 #elif (SOC_SPI_PERIPH_NUM == 3)
197 return host >= SPI2_HOST && host <= SPI3_HOST;
198 #endif
199 }
200
201 // Should be called before any devices are actually registered or used.
202 // Currently automatically called after `spi_bus_initialize()` and when first device is registered.
spi_master_init_driver(spi_host_device_t host_id)203 static esp_err_t spi_master_init_driver(spi_host_device_t host_id)
204 {
205 esp_err_t err = ESP_OK;
206
207 const spi_bus_attr_t* bus_attr = spi_bus_get_attr(host_id);
208 SPI_CHECK(bus_attr != NULL, "host_id not initialized", ESP_ERR_INVALID_STATE);
209 SPI_CHECK(bus_attr->lock != NULL, "SPI Master cannot attach to bus. (Check CONFIG_SPI_FLASH_SHARE_SPI1_BUS)", ESP_ERR_INVALID_ARG);
210 // spihost contains atomic variables, which should not be put in PSRAM
211 spi_host_t* host = heap_caps_malloc(sizeof(spi_host_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
212 if (host == NULL) {
213 err = ESP_ERR_NO_MEM;
214 goto cleanup;
215 }
216
217 *host = (spi_host_t) {
218 .id = host_id,
219 .cur_cs = DEV_NUM_MAX,
220 .polling = false,
221 .device_acquiring_lock = NULL,
222 .bus_attr = bus_attr,
223 };
224
225 if (host_id != SPI1_HOST) {
226 // interrupts are not allowed on SPI1 bus
227 err = esp_intr_alloc(spicommon_irqsource_for_host(host_id),
228 bus_attr->bus_cfg.intr_flags | ESP_INTR_FLAG_INTRDISABLED,
229 spi_intr, host, &host->intr);
230 if (err != ESP_OK) {
231 goto cleanup;
232 }
233 }
234
235 //assign the SPI, RX DMA and TX DMA peripheral registers beginning address
236 spi_hal_config_t hal_config = {
237 //On ESP32-S2 and earlier chips, DMA registers are part of SPI registers. Pass the registers of SPI peripheral to control it.
238 .dma_in = SPI_LL_GET_HW(host_id),
239 .dma_out = SPI_LL_GET_HW(host_id),
240 .dma_enabled = bus_attr->dma_enabled,
241 .dmadesc_tx = bus_attr->dmadesc_tx,
242 .dmadesc_rx = bus_attr->dmadesc_rx,
243 .tx_dma_chan = bus_attr->tx_dma_chan,
244 .rx_dma_chan = bus_attr->rx_dma_chan,
245 .dmadesc_n = bus_attr->dma_desc_num,
246 };
247 spi_hal_init(&host->hal, host_id, &hal_config);
248
249 if (host_id != SPI1_HOST) {
250 //SPI1 attributes are already initialized at start up.
251 spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
252 spi_bus_lock_set_bg_control(lock, spi_bus_intr_enable, spi_bus_intr_disable, host);
253 spi_bus_register_destroy_func(host_id, spi_master_deinit_driver, host);
254 }
255
256 bus_driver_ctx[host_id] = host;
257 return ESP_OK;
258
259 cleanup:
260 if (host) {
261 spi_hal_deinit(&host->hal);
262 if (host->intr) {
263 esp_intr_free(host->intr);
264 }
265 }
266 free(host);
267 return err;
268 }
269
spi_master_deinit_driver(void * arg)270 static esp_err_t spi_master_deinit_driver(void* arg)
271 {
272 spi_host_t *host = (spi_host_t*)arg;
273 SPI_CHECK(host != NULL, "host_id not in use", ESP_ERR_INVALID_STATE);
274
275 int host_id = host->id;
276 SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
277
278 int x;
279 for (x=0; x<DEV_NUM_MAX; x++) {
280 SPI_CHECK(host->device[x] == NULL, "not all CSses freed", ESP_ERR_INVALID_STATE);
281 }
282
283 spi_hal_deinit(&host->hal);
284
285 if (host->intr) {
286 esp_intr_free(host->intr);
287 }
288 free(host);
289 bus_driver_ctx[host_id] = NULL;
290 return ESP_OK;
291 }
292
spi_get_timing(bool gpio_is_used,int input_delay_ns,int eff_clk,int * dummy_o,int * cycles_remain_o)293 void spi_get_timing(bool gpio_is_used, int input_delay_ns, int eff_clk, int* dummy_o, int* cycles_remain_o)
294 {
295 int timing_dummy;
296 int timing_miso_delay;
297
298 spi_hal_cal_timing(eff_clk, gpio_is_used, input_delay_ns, &timing_dummy, &timing_miso_delay);
299 if (dummy_o) *dummy_o = timing_dummy;
300 if (cycles_remain_o) *cycles_remain_o = timing_miso_delay;
301 }
302
spi_get_freq_limit(bool gpio_is_used,int input_delay_ns)303 int spi_get_freq_limit(bool gpio_is_used, int input_delay_ns)
304 {
305 return spi_hal_get_freq_limit(gpio_is_used, input_delay_ns);
306 }
307
308 /*
309 Add a device. This allocates a CS line for the device, allocates memory for the device structure and hooks
310 up the CS pin to whatever is specified.
311 */
spi_bus_add_device(spi_host_device_t host_id,const spi_device_interface_config_t * dev_config,spi_device_handle_t * handle)312 esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interface_config_t *dev_config, spi_device_handle_t *handle)
313 {
314 spi_device_t *dev = NULL;
315 esp_err_t err = ESP_OK;
316
317 SPI_CHECK(is_valid_host(host_id), "invalid host", ESP_ERR_INVALID_ARG);
318 if (bus_driver_ctx[host_id] == NULL) {
319 //lazy initialization the driver, get deinitialized by the bus is freed
320 err = spi_master_init_driver(host_id);
321 if (err != ESP_OK) {
322 return err;
323 }
324 }
325
326 spi_host_t *host = bus_driver_ctx[host_id];
327 const spi_bus_attr_t* bus_attr = host->bus_attr;
328 SPI_CHECK(dev_config->spics_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(dev_config->spics_io_num), "spics pin invalid", ESP_ERR_INVALID_ARG);
329 SPI_CHECK(dev_config->clock_speed_hz > 0, "invalid sclk speed", ESP_ERR_INVALID_ARG);
330 #ifdef CONFIG_IDF_TARGET_ESP32
331 //The hardware looks like it would support this, but actually setting cs_ena_pretrans when transferring in full
332 //duplex mode does absolutely nothing on the ESP32.
333 SPI_CHECK(dev_config->cs_ena_pretrans <= 1 || (dev_config->address_bits == 0 && dev_config->command_bits == 0) ||
334 (dev_config->flags & SPI_DEVICE_HALFDUPLEX), "In full-duplex mode, only support cs pretrans delay = 1 and without address_bits and command_bits", ESP_ERR_INVALID_ARG);
335 #endif
336 uint32_t lock_flag = ((dev_config->spics_io_num != -1)? SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED: 0);
337
338 spi_bus_lock_dev_config_t lock_config = {
339 .flags = lock_flag,
340 };
341 spi_bus_lock_dev_handle_t dev_handle;
342 err = spi_bus_lock_register_dev(bus_attr->lock, &lock_config, &dev_handle);
343 if (err != ESP_OK) {
344 goto nomem;
345 }
346
347 int freecs = spi_bus_lock_get_dev_id(dev_handle);
348 SPI_CHECK(freecs != -1, "no free cs pins for the host", ESP_ERR_NOT_FOUND);
349
350 //input parameters to calculate timing configuration
351 int half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
352 int no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
353 int duty_cycle = (dev_config->duty_cycle_pos==0) ? 128 : dev_config->duty_cycle_pos;
354 int use_gpio = !(bus_attr->flags & SPICOMMON_BUSFLAG_IOMUX_PINS);
355 spi_hal_timing_param_t timing_param = {
356 .half_duplex = half_duplex,
357 .no_compensate = no_compensate,
358 .clock_speed_hz = dev_config->clock_speed_hz,
359 .duty_cycle = duty_cycle,
360 .input_delay_ns = dev_config->input_delay_ns,
361 .use_gpio = use_gpio
362 };
363
364 //output values of timing configuration
365 spi_hal_timing_conf_t temp_timing_conf;
366 int freq;
367 esp_err_t ret = spi_hal_cal_clock_conf(&timing_param, &freq, &temp_timing_conf);
368 SPI_CHECK(ret==ESP_OK, "assigned clock speed not supported", ret);
369
370 //Allocate memory for device
371 dev = malloc(sizeof(spi_device_t));
372 if (dev == NULL) goto nomem;
373 memset(dev, 0, sizeof(spi_device_t));
374
375 dev->id = freecs;
376 dev->dev_lock = dev_handle;
377
378 //Allocate queues, set defaults
379 dev->trans_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
380 dev->ret_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
381 if (!dev->trans_queue || !dev->ret_queue) {
382 goto nomem;
383 }
384
385 //We want to save a copy of the dev config in the dev struct.
386 memcpy(&dev->cfg, dev_config, sizeof(spi_device_interface_config_t));
387 dev->cfg.duty_cycle_pos = duty_cycle;
388 // TODO: if we have to change the apb clock among transactions, re-calculate this each time the apb clock lock is locked.
389
390 //Set CS pin, CS options
391 if (dev_config->spics_io_num >= 0) {
392 spicommon_cs_initialize(host_id, dev_config->spics_io_num, freecs, use_gpio);
393 }
394
395 //save a pointer to device in spi_host_t
396 host->device[freecs] = dev;
397 //save a pointer to host in spi_device_t
398 dev->host= host;
399
400 //initialise the device specific configuration
401 spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
402 hal_dev->mode = dev_config->mode;
403 hal_dev->cs_setup = dev_config->cs_ena_pretrans;
404 hal_dev->cs_hold = dev_config->cs_ena_posttrans;
405 //set hold_time to 0 will not actually append delay to CS
406 //set it to 1 since we do need at least one clock of hold time in most cases
407 if (hal_dev->cs_hold == 0) {
408 hal_dev->cs_hold = 1;
409 }
410 hal_dev->cs_pin_id = dev->id;
411 hal_dev->timing_conf = temp_timing_conf;
412 hal_dev->sio = (dev_config->flags) & SPI_DEVICE_3WIRE ? 1 : 0;
413 hal_dev->half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
414 hal_dev->tx_lsbfirst = dev_config->flags & SPI_DEVICE_TXBIT_LSBFIRST ? 1 : 0;
415 hal_dev->rx_lsbfirst = dev_config->flags & SPI_DEVICE_RXBIT_LSBFIRST ? 1 : 0;
416 hal_dev->no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
417 #if SOC_SPI_SUPPORT_AS_CS
418 hal_dev->as_cs = dev_config->flags& SPI_DEVICE_CLK_AS_CS ? 1 : 0;
419 #endif
420 hal_dev->positive_cs = dev_config->flags & SPI_DEVICE_POSITIVE_CS ? 1 : 0;
421
422 *handle = dev;
423 ESP_LOGD(SPI_TAG, "SPI%d: New device added to CS%d, effective clock: %dkHz", host_id+1, freecs, freq/1000);
424
425 return ESP_OK;
426
427 nomem:
428 if (dev) {
429 if (dev->trans_queue) vQueueDelete(dev->trans_queue);
430 if (dev->ret_queue) vQueueDelete(dev->ret_queue);
431 spi_bus_lock_unregister_dev(dev->dev_lock);
432 }
433 free(dev);
434 return ESP_ERR_NO_MEM;
435 }
436
spi_bus_remove_device(spi_device_handle_t handle)437 esp_err_t spi_bus_remove_device(spi_device_handle_t handle)
438 {
439 SPI_CHECK(handle!=NULL, "invalid handle", ESP_ERR_INVALID_ARG);
440 //These checks aren't exhaustive; another thread could sneak in a transaction inbetween. These are only here to
441 //catch design errors and aren't meant to be triggered during normal operation.
442 SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
443 SPI_CHECK(handle->host->cur_cs == DEV_NUM_MAX || handle->host->device[handle->host->cur_cs] != handle, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
444 SPI_CHECK(uxQueueMessagesWaiting(handle->ret_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
445
446 //return
447 int spics_io_num = handle->cfg.spics_io_num;
448 if (spics_io_num >= 0) spicommon_cs_free_io(spics_io_num);
449
450 //Kill queues
451 vQueueDelete(handle->trans_queue);
452 vQueueDelete(handle->ret_queue);
453 spi_bus_lock_unregister_dev(handle->dev_lock);
454
455 assert(handle->host->device[handle->id] == handle);
456 handle->host->device[handle->id] = NULL;
457 free(handle);
458 return ESP_OK;
459 }
460
spi_cal_clock(int fapb,int hz,int duty_cycle,uint32_t * reg_o)461 int spi_cal_clock(int fapb, int hz, int duty_cycle, uint32_t *reg_o)
462 {
463 return spi_ll_master_cal_clock(fapb, hz, duty_cycle, reg_o);
464 }
465
spi_get_actual_clock(int fapb,int hz,int duty_cycle)466 int spi_get_actual_clock(int fapb, int hz, int duty_cycle)
467 {
468 return spi_hal_master_cal_clock(fapb, hz, duty_cycle);
469 }
470
471 // Setup the device-specified configuration registers. Called every time a new
472 // transaction is to be sent, but only apply new configurations when the device
473 // changes.
spi_setup_device(spi_device_t * dev)474 static SPI_MASTER_ISR_ATTR void spi_setup_device(spi_device_t *dev)
475 {
476 spi_bus_lock_dev_handle_t dev_lock = dev->dev_lock;
477
478 if (!spi_bus_lock_touch(dev_lock)) {
479 //if the configuration is already applied, skip the following.
480 return;
481 }
482 spi_hal_context_t *hal = &dev->host->hal;
483 spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
484 spi_hal_setup_device(hal, hal_dev);
485 }
486
get_acquiring_dev(spi_host_t * host)487 static SPI_MASTER_ISR_ATTR spi_device_t *get_acquiring_dev(spi_host_t *host)
488 {
489 spi_bus_lock_dev_handle_t dev_lock = spi_bus_lock_get_acquiring_dev(host->bus_attr->lock);
490 if (!dev_lock) return NULL;
491
492 return host->device[spi_bus_lock_get_dev_id(dev_lock)];
493 }
494
495 // Debug only
496 // NOTE if the acquiring is not fully completed, `spi_bus_lock_get_acquiring_dev`
497 // may return a false `NULL` cause the function returning false `false`.
spi_bus_device_is_polling(spi_device_t * dev)498 static inline SPI_MASTER_ISR_ATTR bool spi_bus_device_is_polling(spi_device_t *dev)
499 {
500 return get_acquiring_dev(dev->host) == dev && dev->host->polling;
501 }
502
503 /*-----------------------------------------------------------------------------
504 Working Functions
505 -----------------------------------------------------------------------------*/
506
507 // The interrupt may get invoked by the bus lock.
spi_bus_intr_enable(void * host)508 static void spi_bus_intr_enable(void *host)
509 {
510 esp_intr_enable(((spi_host_t*)host)->intr);
511 }
512
513 // The interrupt is always disabled by the ISR itself, not exposed
spi_bus_intr_disable(void * host)514 static void spi_bus_intr_disable(void *host)
515 {
516 esp_intr_disable(((spi_host_t*)host)->intr);
517 }
518
519 // The function is called to send a new transaction, in ISR or in the task.
520 // Setup the transaction-specified registers and linked-list used by the DMA (or FIFO if DMA is not used)
spi_new_trans(spi_device_t * dev,spi_trans_priv_t * trans_buf)521 static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_t *trans_buf)
522 {
523 spi_transaction_t *trans = NULL;
524 spi_host_t *host = dev->host;
525 spi_hal_context_t *hal = &(host->hal);
526 spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
527
528 trans = trans_buf->trans;
529 host->cur_cs = dev->id;
530
531 //Reconfigure according to device settings, the function only has effect when the dev_id is changed.
532 spi_setup_device(dev);
533
534 //set the transaction specific configuration each time before a transaction setup
535 spi_hal_trans_config_t hal_trans = {};
536 hal_trans.tx_bitlen = trans->length;
537 hal_trans.rx_bitlen = trans->rxlength;
538 hal_trans.rcv_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_rcv;
539 hal_trans.send_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_send;
540 hal_trans.cmd = trans->cmd;
541 hal_trans.addr = trans->addr;
542 //Set up QIO/DIO if needed
543 hal_trans.io_mode = (trans->flags & SPI_TRANS_MODE_DIO ?
544 (trans->flags & SPI_TRANS_MODE_DIOQIO_ADDR ? SPI_LL_IO_MODE_DIO : SPI_LL_IO_MODE_DUAL) :
545 (trans->flags & SPI_TRANS_MODE_QIO ?
546 (trans->flags & SPI_TRANS_MODE_DIOQIO_ADDR ? SPI_LL_IO_MODE_QIO : SPI_LL_IO_MODE_QUAD) :
547 SPI_LL_IO_MODE_NORMAL
548 ));
549
550 if (trans->flags & SPI_TRANS_VARIABLE_CMD) {
551 hal_trans.cmd_bits = ((spi_transaction_ext_t *)trans)->command_bits;
552 } else {
553 hal_trans.cmd_bits = dev->cfg.command_bits;
554 }
555 if (trans->flags & SPI_TRANS_VARIABLE_ADDR) {
556 hal_trans.addr_bits = ((spi_transaction_ext_t *)trans)->address_bits;
557 } else {
558 hal_trans.addr_bits = dev->cfg.address_bits;
559 }
560 if (trans->flags & SPI_TRANS_VARIABLE_DUMMY) {
561 hal_trans.dummy_bits = ((spi_transaction_ext_t *)trans)->dummy_bits;
562 } else {
563 hal_trans.dummy_bits = dev->cfg.dummy_bits;
564 }
565
566 spi_hal_setup_trans(hal, hal_dev, &hal_trans);
567 spi_hal_prepare_data(hal, hal_dev, &hal_trans);
568
569 //Call pre-transmission callback, if any
570 if (dev->cfg.pre_cb) dev->cfg.pre_cb(trans);
571 //Kick off transfer
572 spi_hal_user_start(hal);
573 }
574
575 // The function is called when a transaction is done, in ISR or in the task.
576 // Fetch the data from FIFO and call the ``post_cb``.
spi_post_trans(spi_host_t * host)577 static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host)
578 {
579 spi_transaction_t *cur_trans = host->cur_trans_buf.trans;
580
581 spi_hal_fetch_result(&host->hal);
582 //Call post-transaction callback, if any
583 spi_device_t* dev = host->device[host->cur_cs];
584 if (dev->cfg.post_cb) dev->cfg.post_cb(cur_trans);
585
586 host->cur_cs = DEV_NUM_MAX;
587 }
588
589 // This is run in interrupt context.
spi_intr(void * arg)590 static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
591 {
592 BaseType_t do_yield = pdFALSE;
593 spi_host_t *host = (spi_host_t *)arg;
594 const spi_bus_attr_t* bus_attr = host->bus_attr;
595
596 assert(spi_hal_usr_is_done(&host->hal));
597
598 /*
599 * Help to skip the handling of in-flight transaction, and disable of the interrupt.
600 * The esp_intr_enable will be called (b) after new BG request is queued (a) in the task;
601 * while esp_intr_disable should be called (c) if we check and found the sending queue is empty (d).
602 * If (c) is called after (d), then there is a risk that things happens in this sequence:
603 * (d) -> (a) -> (b) -> (c), and in this case the interrupt is disabled while there's pending BG request in the queue.
604 * To avoid this, interrupt is disabled here, and re-enabled later if required.
605 */
606 if (!spi_bus_lock_bg_entry(bus_attr->lock)) {
607 /*------------ deal with the in-flight transaction -----------------*/
608 assert(host->cur_cs != DEV_NUM_MAX);
609 //Okay, transaction is done.
610 const int cs = host->cur_cs;
611 //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
612 if (bus_attr->dma_enabled) {
613 //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
614 spicommon_dmaworkaround_idle(bus_attr->tx_dma_chan);
615 }
616
617 //cur_cs is changed to DEV_NUM_MAX here
618 spi_post_trans(host);
619 // spi_bus_lock_bg_pause(bus_attr->lock);
620 //Return transaction descriptor.
621 xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_trans_buf, &do_yield);
622 #ifdef CONFIG_PM_ENABLE
623 //Release APB frequency lock
624 esp_pm_lock_release(bus_attr->pm_lock);
625 #endif
626 }
627
628 /*------------ new transaction starts here ------------------*/
629 assert(host->cur_cs == DEV_NUM_MAX);
630
631 spi_bus_lock_handle_t lock = host->bus_attr->lock;
632 BaseType_t trans_found = pdFALSE;
633
634
635 // There should be remaining requests
636 BUS_LOCK_DEBUG_EXECUTE_CHECK(spi_bus_lock_bg_req_exist(lock));
637
638 do {
639 spi_bus_lock_dev_handle_t acq_dev_lock = spi_bus_lock_get_acquiring_dev(lock);
640 spi_bus_lock_dev_handle_t desired_dev = acq_dev_lock;
641 bool resume_task = false;
642 spi_device_t* device_to_send = NULL;
643
644 if (!acq_dev_lock) {
645 // This function may assign a new acquiring device, otherwise it will suggest a desired device with BG active
646 // We use either of them without further searching in the devices.
647 // If the return value is true, it means either there's no acquiring device, or the acquiring device's BG is active,
648 // We stay in the ISR to deal with those transactions of desired device, otherwise nothing will be done, check whether we need to resume some other tasks, or just quit the ISR
649 resume_task = spi_bus_lock_bg_check_dev_acq(lock, &desired_dev);
650 }
651
652 if (!resume_task) {
653 bool dev_has_req = spi_bus_lock_bg_check_dev_req(desired_dev);
654 if (dev_has_req) {
655 device_to_send = host->device[spi_bus_lock_get_dev_id(desired_dev)];
656 trans_found = xQueueReceiveFromISR(device_to_send->trans_queue, &host->cur_trans_buf, &do_yield);
657 if (!trans_found) {
658 spi_bus_lock_bg_clear_req(desired_dev);
659 }
660 }
661 }
662
663 if (trans_found) {
664 spi_trans_priv_t *const cur_trans_buf = &host->cur_trans_buf;
665 if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) {
666 //mark channel as active, so that the DMA will not be reset by the slave
667 //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
668 spicommon_dmaworkaround_transfer_active(bus_attr->tx_dma_chan);
669 }
670 spi_new_trans(device_to_send, cur_trans_buf);
671 }
672 // Exit of the ISR, handle interrupt re-enable (if sending transaction), retry (if there's coming BG),
673 // or resume acquiring device task (if quit due to bus acquiring).
674 } while (!spi_bus_lock_bg_exit(lock, trans_found, &do_yield));
675
676 if (do_yield) portYIELD_FROM_ISR();
677 }
678
check_trans_valid(spi_device_handle_t handle,spi_transaction_t * trans_desc)679 static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handle, spi_transaction_t *trans_desc)
680 {
681 SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
682 spi_host_t *host = handle->host;
683 const spi_bus_attr_t* bus_attr = host->bus_attr;
684 bool tx_enabled = (trans_desc->flags & SPI_TRANS_USE_TXDATA) || (trans_desc->tx_buffer);
685 bool rx_enabled = (trans_desc->flags & SPI_TRANS_USE_RXDATA) || (trans_desc->rx_buffer);
686 spi_transaction_ext_t *t_ext = (spi_transaction_ext_t *)trans_desc;
687 bool dummy_enabled = (((trans_desc->flags & SPI_TRANS_VARIABLE_DUMMY)? t_ext->dummy_bits: handle->cfg.dummy_bits) != 0);
688 bool extra_dummy_enabled = handle->hal_dev.timing_conf.timing_dummy;
689 bool is_half_duplex = ((handle->cfg.flags & SPI_DEVICE_HALFDUPLEX) != 0);
690
691 //check transmission length
692 SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA)==0 || trans_desc->rxlength <= 32, "SPI_TRANS_USE_RXDATA only available for rxdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
693 SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA)==0 || trans_desc->length <= 32, "SPI_TRANS_USE_TXDATA only available for txdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
694 SPI_CHECK(trans_desc->length <= bus_attr->max_transfer_sz*8, "txdata transfer > host maximum", ESP_ERR_INVALID_ARG);
695 SPI_CHECK(trans_desc->rxlength <= bus_attr->max_transfer_sz*8, "rxdata transfer > host maximum", ESP_ERR_INVALID_ARG);
696 SPI_CHECK(is_half_duplex || trans_desc->rxlength <= trans_desc->length, "rx length > tx length in full duplex mode", ESP_ERR_INVALID_ARG);
697 //check working mode
698 SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "incompatible iface params", ESP_ERR_INVALID_ARG);
699 SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && !is_half_duplex), "incompatible iface params", ESP_ERR_INVALID_ARG);
700 #ifdef CONFIG_IDF_TARGET_ESP32
701 SPI_CHECK(!is_half_duplex || !bus_attr->dma_enabled || !rx_enabled || !tx_enabled, "SPI half duplex mode does not support using DMA with both MOSI and MISO phases.", ESP_ERR_INVALID_ARG );
702 #elif CONFIG_IDF_TARGET_ESP32S3
703 SPI_CHECK(!is_half_duplex || !tx_enabled || !rx_enabled, "SPI half duplex mode is not supported when both MOSI and MISO phases are enabled.", ESP_ERR_INVALID_ARG);
704 #endif
705 //MOSI phase is skipped only when both tx_buffer and SPI_TRANS_USE_TXDATA are not set.
706 SPI_CHECK(trans_desc->length != 0 || !tx_enabled, "trans tx_buffer should be NULL and SPI_TRANS_USE_TXDATA should be cleared to skip MOSI phase.", ESP_ERR_INVALID_ARG);
707 //MISO phase is skipped only when both rx_buffer and SPI_TRANS_USE_RXDATA are not set.
708 //If set rxlength=0 in full_duplex mode, it will be automatically set to length
709 SPI_CHECK(!is_half_duplex || trans_desc->rxlength != 0 || !rx_enabled, "trans rx_buffer should be NULL and SPI_TRANS_USE_RXDATA should be cleared to skip MISO phase.", ESP_ERR_INVALID_ARG);
710 //In Full duplex mode, default rxlength to be the same as length, if not filled in.
711 // set rxlength to length is ok, even when rx buffer=NULL
712 if (trans_desc->rxlength==0 && !is_half_duplex) {
713 trans_desc->rxlength=trans_desc->length;
714 }
715 //Dummy phase is not available when both data out and in are enabled, regardless of FD or HD mode.
716 SPI_CHECK(!tx_enabled || !rx_enabled || !dummy_enabled || !extra_dummy_enabled, "Dummy phase is not available when both data out and in are enabled", ESP_ERR_INVALID_ARG);
717
718 return ESP_OK;
719 }
720
uninstall_priv_desc(spi_trans_priv_t * trans_buf)721 static SPI_MASTER_ISR_ATTR void uninstall_priv_desc(spi_trans_priv_t* trans_buf)
722 {
723 spi_transaction_t *trans_desc = trans_buf->trans;
724 if ((void *)trans_buf->buffer_to_send != &trans_desc->tx_data[0] &&
725 trans_buf->buffer_to_send != trans_desc->tx_buffer) {
726 free((void *)trans_buf->buffer_to_send); //force free, ignore const
727 }
728 // copy data from temporary DMA-capable buffer back to IRAM buffer and free the temporary one.
729 if ((void *)trans_buf->buffer_to_rcv != &trans_desc->rx_data[0] &&
730 trans_buf->buffer_to_rcv != trans_desc->rx_buffer) { // NOLINT(clang-analyzer-unix.Malloc)
731 if (trans_desc->flags & SPI_TRANS_USE_RXDATA) {
732 memcpy((uint8_t *) & trans_desc->rx_data[0], trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
733 } else {
734 memcpy(trans_desc->rx_buffer, trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
735 }
736 free(trans_buf->buffer_to_rcv);
737 }
738 }
739
setup_priv_desc(spi_transaction_t * trans_desc,spi_trans_priv_t * new_desc,bool isdma)740 static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_transaction_t *trans_desc, spi_trans_priv_t* new_desc, bool isdma)
741 {
742 *new_desc = (spi_trans_priv_t) { .trans = trans_desc, };
743
744 // rx memory assign
745 uint32_t* rcv_ptr;
746 if ( trans_desc->flags & SPI_TRANS_USE_RXDATA ) {
747 rcv_ptr = (uint32_t *)&trans_desc->rx_data[0];
748 } else {
749 //if not use RXDATA neither rx_buffer, buffer_to_rcv assigned to NULL
750 rcv_ptr = trans_desc->rx_buffer;
751 }
752 if (rcv_ptr && isdma && (!esp_ptr_dma_capable(rcv_ptr) || ((int)rcv_ptr % 4 != 0))) {
753 //if rxbuf in the desc not DMA-capable, malloc a new one. The rx buffer need to be length of multiples of 32 bits to avoid heap corruption.
754 ESP_LOGD(SPI_TAG, "Allocate RX buffer for DMA" );
755 rcv_ptr = heap_caps_malloc((trans_desc->rxlength + 31) / 8, MALLOC_CAP_DMA);
756 if (rcv_ptr == NULL) goto clean_up;
757 }
758 new_desc->buffer_to_rcv = rcv_ptr;
759
760 // tx memory assign
761 const uint32_t *send_ptr;
762 if ( trans_desc->flags & SPI_TRANS_USE_TXDATA ) {
763 send_ptr = (uint32_t *)&trans_desc->tx_data[0];
764 } else {
765 //if not use TXDATA neither tx_buffer, tx data assigned to NULL
766 send_ptr = trans_desc->tx_buffer ;
767 }
768 if (send_ptr && isdma && !esp_ptr_dma_capable( send_ptr )) {
769 //if txbuf in the desc not DMA-capable, malloc a new one
770 ESP_LOGD(SPI_TAG, "Allocate TX buffer for DMA" );
771 uint32_t *temp = heap_caps_malloc((trans_desc->length + 7) / 8, MALLOC_CAP_DMA);
772 if (temp == NULL) goto clean_up;
773
774 memcpy( temp, send_ptr, (trans_desc->length + 7) / 8 );
775 send_ptr = temp;
776 }
777 new_desc->buffer_to_send = send_ptr;
778
779 return ESP_OK;
780
781 clean_up:
782 uninstall_priv_desc(new_desc);
783 return ESP_ERR_NO_MEM;
784 }
785
spi_device_queue_trans(spi_device_handle_t handle,spi_transaction_t * trans_desc,TickType_t ticks_to_wait)786 esp_err_t SPI_MASTER_ATTR spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
787 {
788 esp_err_t ret = check_trans_valid(handle, trans_desc);
789 if (ret != ESP_OK) return ret;
790
791 spi_host_t *host = handle->host;
792
793 SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
794
795 spi_trans_priv_t trans_buf;
796 ret = setup_priv_desc(trans_desc, &trans_buf, (host->bus_attr->dma_enabled));
797 if (ret != ESP_OK) return ret;
798
799 #ifdef CONFIG_PM_ENABLE
800 esp_pm_lock_acquire(host->bus_attr->pm_lock);
801 #endif
802 //Send to queue and invoke the ISR.
803
804 BaseType_t r = xQueueSend(handle->trans_queue, (void *)&trans_buf, ticks_to_wait);
805 if (!r) {
806 ret = ESP_ERR_TIMEOUT;
807 #ifdef CONFIG_PM_ENABLE
808 //Release APB frequency lock
809 esp_pm_lock_release(host->bus_attr->pm_lock);
810 #endif
811 goto clean_up;
812 }
813
814 // The ISR will be invoked at correct time by the lock with `spi_bus_intr_enable`.
815 ret = spi_bus_lock_bg_request(handle->dev_lock);
816 if (ret != ESP_OK) {
817 goto clean_up;
818 }
819 return ESP_OK;
820
821 clean_up:
822 uninstall_priv_desc(&trans_buf);
823 return ret;
824 }
825
spi_device_get_trans_result(spi_device_handle_t handle,spi_transaction_t ** trans_desc,TickType_t ticks_to_wait)826 esp_err_t SPI_MASTER_ATTR spi_device_get_trans_result(spi_device_handle_t handle, spi_transaction_t **trans_desc, TickType_t ticks_to_wait)
827 {
828 BaseType_t r;
829 spi_trans_priv_t trans_buf;
830 SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
831
832 //use the interrupt, block until return
833 r=xQueueReceive(handle->ret_queue, (void*)&trans_buf, ticks_to_wait);
834 if (!r) {
835 // The memory occupied by rx and tx DMA buffer destroyed only when receiving from the queue (transaction finished).
836 // If timeout, wait and retry.
837 // Every in-flight transaction request occupies internal memory as DMA buffer if needed.
838 return ESP_ERR_TIMEOUT;
839 }
840 //release temporary buffers
841 uninstall_priv_desc(&trans_buf);
842 (*trans_desc) = trans_buf.trans;
843
844 return ESP_OK;
845 }
846
847 //Porcelain to do one blocking transmission.
spi_device_transmit(spi_device_handle_t handle,spi_transaction_t * trans_desc)848 esp_err_t SPI_MASTER_ATTR spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc)
849 {
850 esp_err_t ret;
851 spi_transaction_t *ret_trans;
852 //ToDo: check if any spi transfers in flight
853 ret = spi_device_queue_trans(handle, trans_desc, portMAX_DELAY);
854 if (ret != ESP_OK) return ret;
855
856 ret = spi_device_get_trans_result(handle, &ret_trans, portMAX_DELAY);
857 if (ret != ESP_OK) return ret;
858
859 assert(ret_trans == trans_desc);
860 return ESP_OK;
861 }
862
spi_device_acquire_bus(spi_device_t * device,TickType_t wait)863 esp_err_t SPI_MASTER_ISR_ATTR spi_device_acquire_bus(spi_device_t *device, TickType_t wait)
864 {
865 spi_host_t *const host = device->host;
866 SPI_CHECK(wait==portMAX_DELAY, "acquire finite time not supported now.", ESP_ERR_INVALID_ARG);
867 SPI_CHECK(!spi_bus_device_is_polling(device), "Cannot acquire bus when a polling transaction is in progress.", ESP_ERR_INVALID_STATE );
868
869 esp_err_t ret = spi_bus_lock_acquire_start(device->dev_lock, wait);
870 if (ret != ESP_OK) {
871 return ret;
872 }
873 host->device_acquiring_lock = device;
874
875 ESP_LOGD(SPI_TAG, "device%d locked the bus", device->id);
876
877 #ifdef CONFIG_PM_ENABLE
878 // though we don't suggest to block the task before ``release_bus``, still allow doing so.
879 // this keeps the spi clock at 80MHz even if all tasks are blocked
880 esp_pm_lock_acquire(host->bus_attr->pm_lock);
881 #endif
882 //configure the device ahead so that we don't need to do it again in the following transactions
883 spi_setup_device(host->device[device->id]);
884 //the DMA is also occupied by the device, all the slave devices that using DMA should wait until bus released.
885 if (host->bus_attr->dma_enabled) {
886 //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
887 spicommon_dmaworkaround_transfer_active(host->bus_attr->tx_dma_chan);
888 }
889 return ESP_OK;
890 }
891
892 // This function restore configurations required in the non-polling mode
spi_device_release_bus(spi_device_t * dev)893 void SPI_MASTER_ISR_ATTR spi_device_release_bus(spi_device_t *dev)
894 {
895 spi_host_t *host = dev->host;
896
897 if (spi_bus_device_is_polling(dev)){
898 ESP_EARLY_LOGE(SPI_TAG, "Cannot release bus when a polling transaction is in progress.");
899 assert(0);
900 }
901
902 if (host->bus_attr->dma_enabled) {
903 //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
904 spicommon_dmaworkaround_idle(host->bus_attr->tx_dma_chan);
905 }
906 //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
907
908 //allow clock to be lower than 80MHz when all tasks blocked
909 #ifdef CONFIG_PM_ENABLE
910 //Release APB frequency lock
911 esp_pm_lock_release(host->bus_attr->pm_lock);
912 #endif
913 ESP_LOGD(SPI_TAG, "device%d release bus", dev->id);
914
915 host->device_acquiring_lock = NULL;
916 esp_err_t ret = spi_bus_lock_acquire_end(dev->dev_lock);
917 assert(ret == ESP_OK);
918 }
919
spi_device_polling_start(spi_device_handle_t handle,spi_transaction_t * trans_desc,TickType_t ticks_to_wait)920 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_start(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
921 {
922 esp_err_t ret;
923 SPI_CHECK(ticks_to_wait == portMAX_DELAY, "currently timeout is not available for polling transactions", ESP_ERR_INVALID_ARG);
924
925 spi_host_t *host = handle->host;
926 ret = check_trans_valid(handle, trans_desc);
927 if (ret!=ESP_OK) return ret;
928
929 SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot send polling transaction while the previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
930
931 if (host->device_acquiring_lock != handle) {
932 ret = spi_bus_lock_acquire_start(handle->dev_lock, ticks_to_wait);
933 } else {
934 ret = spi_bus_lock_wait_bg_done(handle->dev_lock, ticks_to_wait);
935 }
936 if (ret != ESP_OK) return ret;
937
938 ret = setup_priv_desc(trans_desc, &host->cur_trans_buf, (host->bus_attr->dma_enabled));
939 if (ret!=ESP_OK) return ret;
940
941 //Polling, no interrupt is used.
942 host->polling = true;
943
944 ESP_LOGV(SPI_TAG, "polling trans");
945 spi_new_trans(handle, &host->cur_trans_buf);
946
947 return ESP_OK;
948 }
949
spi_device_polling_end(spi_device_handle_t handle,TickType_t ticks_to_wait)950 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_end(spi_device_handle_t handle, TickType_t ticks_to_wait)
951 {
952 SPI_CHECK(handle != NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
953 spi_host_t *host = handle->host;
954
955 assert(host->cur_cs == handle->id);
956 assert(handle == get_acquiring_dev(host));
957
958 TickType_t start = xTaskGetTickCount();
959 while (!spi_hal_usr_is_done(&host->hal)) {
960 TickType_t end = xTaskGetTickCount();
961 if (end - start > ticks_to_wait) {
962 return ESP_ERR_TIMEOUT;
963 }
964 }
965
966 ESP_LOGV(SPI_TAG, "polling trans done");
967 //deal with the in-flight transaction
968 spi_post_trans(host);
969 //release temporary buffers
970 uninstall_priv_desc(&host->cur_trans_buf);
971
972 host->polling = false;
973 if (host->device_acquiring_lock != handle) {
974 assert(host->device_acquiring_lock == NULL);
975 spi_bus_lock_acquire_end(handle->dev_lock);
976 }
977
978 return ESP_OK;
979 }
980
spi_device_polling_transmit(spi_device_handle_t handle,spi_transaction_t * trans_desc)981 esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_transmit(spi_device_handle_t handle, spi_transaction_t* trans_desc)
982 {
983 esp_err_t ret;
984 ret = spi_device_polling_start(handle, trans_desc, portMAX_DELAY);
985 if (ret != ESP_OK) return ret;
986
987 return spi_device_polling_end(handle, portMAX_DELAY);
988 }
989