1 /*
2 * Copyright (c) 2020 Espressif Systems (Shanghai) Co., Ltd.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT espressif_esp32_spi
8
9 /* Include esp-idf headers first to avoid redefining BIT() macro */
10 #include <hal/spi_hal.h>
11 #include <esp_attr.h>
12 #include <esp_clk_tree.h>
13
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(esp32_spi, CONFIG_SPI_LOG_LEVEL);
16
17 #include <soc.h>
18 #include <esp_memory_utils.h>
19 #include <zephyr/drivers/spi.h>
20 #include <zephyr/drivers/spi/rtio.h>
21 #if defined(CONFIG_SOC_SERIES_ESP32C2) || defined(CONFIG_SOC_SERIES_ESP32C3) || \
22 defined(CONFIG_SOC_SERIES_ESP32C6)
23 #include <zephyr/drivers/interrupt_controller/intc_esp32c3.h>
24 #else
25 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
26 #endif
27 #ifdef SOC_GDMA_SUPPORTED
28 #include <hal/gdma_hal.h>
29 #include <hal/gdma_ll.h>
30 #endif
31 #include <zephyr/drivers/clock_control.h>
32 #include "spi_context.h"
33 #include "spi_esp32_spim.h"
34
35 #if defined(CONFIG_SOC_SERIES_ESP32C2) || defined(CONFIG_SOC_SERIES_ESP32C3) || \
36 defined(CONFIG_SOC_SERIES_ESP32C6)
37 #define ISR_HANDLER isr_handler_t
38 #else
39 #define ISR_HANDLER intr_handler_t
40 #endif
41
42 #define SPI_DMA_MAX_BUFFER_SIZE 4092
43
spi_esp32_transfer_ongoing(struct spi_esp32_data * data)44 static bool spi_esp32_transfer_ongoing(struct spi_esp32_data *data)
45 {
46 return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
47 }
48
spi_esp32_complete(const struct device * dev,struct spi_esp32_data * data,spi_dev_t * spi,int status)49 static inline void spi_esp32_complete(const struct device *dev,
50 struct spi_esp32_data *data,
51 spi_dev_t *spi, int status)
52 {
53 #ifdef CONFIG_SPI_ESP32_INTERRUPT
54 spi_ll_disable_int(spi);
55 spi_ll_clear_int_stat(spi);
56 #endif
57
58 spi_context_cs_control(&data->ctx, false);
59
60 #ifdef CONFIG_SPI_ESP32_INTERRUPT
61 spi_context_complete(&data->ctx, dev, status);
62 #endif
63
64 }
65
spi_esp32_transfer(const struct device * dev)66 static int IRAM_ATTR spi_esp32_transfer(const struct device *dev)
67 {
68 struct spi_esp32_data *data = dev->data;
69 const struct spi_esp32_config *cfg = dev->config;
70 struct spi_context *ctx = &data->ctx;
71 spi_hal_context_t *hal = &data->hal;
72 spi_hal_dev_config_t *hal_dev = &data->dev_config;
73 spi_hal_trans_config_t *hal_trans = &data->trans_config;
74 size_t chunk_len_bytes = spi_context_max_continuous_chunk(&data->ctx) * data->dfs;
75 size_t max_buf_sz =
76 cfg->dma_enabled ? SPI_DMA_MAX_BUFFER_SIZE : SOC_SPI_MAXIMUM_BUFFER_SIZE;
77 size_t transfer_len_bytes = MIN(chunk_len_bytes, max_buf_sz);
78 size_t transfer_len_frames = transfer_len_bytes / data->dfs;
79 size_t bit_len = transfer_len_bytes << 3;
80 uint8_t *rx_temp = NULL;
81 uint8_t *tx_temp = NULL;
82 uint8_t dma_len_tx = MIN(ctx->tx_len * data->dfs, SPI_DMA_MAX_BUFFER_SIZE);
83 uint8_t dma_len_rx = MIN(ctx->rx_len * data->dfs, SPI_DMA_MAX_BUFFER_SIZE);
84
85 if (cfg->dma_enabled) {
86 /* bit_len needs to be at least one byte long when using DMA */
87 bit_len = !bit_len ? 8 : bit_len;
88 if (ctx->tx_buf && !esp_ptr_dma_capable((uint32_t *)&ctx->tx_buf[0])) {
89 LOG_DBG("Tx buffer not DMA capable");
90 tx_temp = k_malloc(dma_len_tx);
91 if (!tx_temp) {
92 LOG_ERR("Error allocating temp buffer Tx");
93 return -ENOMEM;
94 }
95 memcpy(tx_temp, &ctx->tx_buf[0], dma_len_tx);
96 }
97 if (ctx->rx_buf && (!esp_ptr_dma_capable((uint32_t *)&ctx->rx_buf[0]) ||
98 ((int)&ctx->rx_buf[0] % 4 != 0) || (dma_len_rx % 4 != 0))) {
99 /* The rx buffer need to be length of
100 * multiples of 32 bits to avoid heap
101 * corruption.
102 */
103 LOG_DBG("Rx buffer not DMA capable");
104 rx_temp = k_calloc(((dma_len_rx << 3) + 31) / 8, sizeof(uint8_t));
105 if (!rx_temp) {
106 LOG_ERR("Error allocating temp buffer Rx");
107 k_free(tx_temp);
108 return -ENOMEM;
109 }
110 }
111 }
112
113 /* clean up and prepare SPI hal */
114 memset((uint32_t *)hal->hw->data_buf, 0, sizeof(hal->hw->data_buf));
115 hal_trans->send_buffer = tx_temp ? tx_temp : (uint8_t *)ctx->tx_buf;
116 hal_trans->rcv_buffer = rx_temp ? rx_temp : ctx->rx_buf;
117 hal_trans->tx_bitlen = bit_len;
118 hal_trans->rx_bitlen = bit_len;
119
120 /* keep cs line active until last transmission */
121 hal_trans->cs_keep_active =
122 (!ctx->num_cs_gpios &&
123 (ctx->rx_count > 1 || ctx->tx_count > 1 || ctx->rx_len > transfer_len_frames ||
124 ctx->tx_len > transfer_len_frames));
125
126 /* configure SPI */
127 spi_hal_setup_trans(hal, hal_dev, hal_trans);
128 spi_hal_prepare_data(hal, hal_dev, hal_trans);
129
130 /* send data */
131 spi_hal_user_start(hal);
132 spi_context_update_tx(&data->ctx, data->dfs, transfer_len_frames);
133
134 while (!spi_hal_usr_is_done(hal)) {
135 /* nop */
136 }
137
138 /* read data */
139 spi_hal_fetch_result(hal);
140
141 if (rx_temp) {
142 memcpy(&ctx->rx_buf[0], rx_temp, transfer_len_bytes);
143 }
144
145 spi_context_update_rx(&data->ctx, data->dfs, transfer_len_frames);
146
147 k_free(tx_temp);
148 k_free(rx_temp);
149
150 return 0;
151 }
152
153 #ifdef CONFIG_SPI_ESP32_INTERRUPT
spi_esp32_isr(void * arg)154 static void IRAM_ATTR spi_esp32_isr(void *arg)
155 {
156 const struct device *dev = (const struct device *)arg;
157 const struct spi_esp32_config *cfg = dev->config;
158 struct spi_esp32_data *data = dev->data;
159
160 do {
161 spi_esp32_transfer(dev);
162 } while (spi_esp32_transfer_ongoing(data));
163
164 spi_esp32_complete(dev, data, cfg->spi, 0);
165 }
166 #endif
167
spi_esp32_init_dma(const struct device * dev)168 static int spi_esp32_init_dma(const struct device *dev)
169 {
170 const struct spi_esp32_config *cfg = dev->config;
171 struct spi_esp32_data *data = dev->data;
172 uint8_t channel_offset;
173
174 if (clock_control_on(cfg->clock_dev, (clock_control_subsys_t)cfg->dma_clk_src)) {
175 LOG_ERR("Could not enable DMA clock");
176 return -EIO;
177 }
178
179 #ifdef SOC_GDMA_SUPPORTED
180 gdma_hal_init(&data->hal_gdma, 0);
181 gdma_ll_enable_clock(data->hal_gdma.dev, true);
182 gdma_ll_tx_reset_channel(data->hal_gdma.dev, cfg->dma_host);
183 gdma_ll_rx_reset_channel(data->hal_gdma.dev, cfg->dma_host);
184 gdma_ll_tx_connect_to_periph(data->hal_gdma.dev, cfg->dma_host, GDMA_TRIG_PERIPH_SPI,
185 cfg->dma_host);
186 gdma_ll_rx_connect_to_periph(data->hal_gdma.dev, cfg->dma_host, GDMA_TRIG_PERIPH_SPI,
187 cfg->dma_host);
188 channel_offset = 0;
189 #else
190 channel_offset = 1;
191 #endif /* SOC_GDMA_SUPPORTED */
192 #ifdef CONFIG_SOC_SERIES_ESP32
193 /*Connect SPI and DMA*/
194 DPORT_SET_PERI_REG_BITS(DPORT_SPI_DMA_CHAN_SEL_REG, 3, cfg->dma_host + 1,
195 ((cfg->dma_host + 1) * 2));
196 #endif /* CONFIG_SOC_SERIES_ESP32 */
197
198 data->hal_config.dma_in = (spi_dma_dev_t *)cfg->spi;
199 data->hal_config.dma_out = (spi_dma_dev_t *)cfg->spi;
200 data->hal_config.dma_enabled = true;
201 data->hal_config.tx_dma_chan = cfg->dma_host + channel_offset;
202 data->hal_config.rx_dma_chan = cfg->dma_host + channel_offset;
203 data->hal_config.dmadesc_n = 1;
204 data->hal_config.dmadesc_rx = &data->dma_desc_rx;
205 data->hal_config.dmadesc_tx = &data->dma_desc_tx;
206
207 if (data->hal_config.dmadesc_tx == NULL || data->hal_config.dmadesc_rx == NULL) {
208 k_free(data->hal_config.dmadesc_tx);
209 k_free(data->hal_config.dmadesc_rx);
210 return -ENOMEM;
211 }
212
213 spi_hal_init(&data->hal, cfg->dma_host + 1, &data->hal_config);
214 return 0;
215 }
216
spi_esp32_init(const struct device * dev)217 static int spi_esp32_init(const struct device *dev)
218 {
219 int err;
220 const struct spi_esp32_config *cfg = dev->config;
221 struct spi_esp32_data *data = dev->data;
222 spi_hal_context_t *hal = &data->hal;
223
224 if (!cfg->clock_dev) {
225 return -EINVAL;
226 }
227
228 if (!device_is_ready(cfg->clock_dev)) {
229 LOG_ERR("clock control device not ready");
230 return -ENODEV;
231 }
232
233 /* Enables SPI peripheral */
234 err = clock_control_on(cfg->clock_dev, cfg->clock_subsys);
235 if (err < 0) {
236 LOG_ERR("Error enabling SPI clock");
237 return err;
238 }
239
240 spi_ll_master_init(hal->hw);
241
242 if (cfg->dma_enabled) {
243 spi_esp32_init_dma(dev);
244 }
245
246 #ifdef CONFIG_SPI_ESP32_INTERRUPT
247 spi_ll_disable_int(cfg->spi);
248 spi_ll_clear_int_stat(cfg->spi);
249
250 err = esp_intr_alloc(cfg->irq_source,
251 ESP_PRIO_TO_FLAGS(cfg->irq_priority) |
252 ESP_INT_FLAGS_CHECK(cfg->irq_flags) | ESP_INTR_FLAG_IRAM,
253 (ISR_HANDLER)spi_esp32_isr,
254 (void *)dev,
255 NULL);
256
257 if (err != 0) {
258 LOG_ERR("could not allocate interrupt (err %d)", err);
259 return err;
260 }
261 #endif
262
263 err = spi_context_cs_configure_all(&data->ctx);
264 if (err < 0) {
265 return err;
266 }
267
268 err = esp_clk_tree_src_get_freq_hz(
269 cfg->clock_source, ESP_CLK_TREE_SRC_FREQ_PRECISION_APPROX, &data->clock_source_hz);
270 if (err) {
271 LOG_ERR("Could not get clock source frequency (%d)", err);
272 return err;
273 }
274
275 spi_context_unlock_unconditionally(&data->ctx);
276
277 return 0;
278 }
279
spi_esp32_get_line_mode(uint16_t operation)280 static inline uint8_t spi_esp32_get_line_mode(uint16_t operation)
281 {
282 if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES)) {
283 switch (operation & SPI_LINES_MASK) {
284 case SPI_LINES_SINGLE:
285 return 1;
286 case SPI_LINES_DUAL:
287 return 2;
288 case SPI_LINES_OCTAL:
289 return 8;
290 case SPI_LINES_QUAD:
291 return 4;
292 default:
293 break;
294 }
295 }
296
297 return 1;
298 }
299
spi_esp32_configure(const struct device * dev,const struct spi_config * spi_cfg)300 static int IRAM_ATTR spi_esp32_configure(const struct device *dev,
301 const struct spi_config *spi_cfg)
302 {
303 const struct spi_esp32_config *cfg = dev->config;
304 struct spi_esp32_data *data = dev->data;
305 struct spi_context *ctx = &data->ctx;
306 spi_hal_context_t *hal = &data->hal;
307 spi_hal_dev_config_t *hal_dev = &data->dev_config;
308 int freq;
309
310 if (spi_context_configured(ctx, spi_cfg)) {
311 return 0;
312 }
313
314 ctx->config = spi_cfg;
315
316 if (spi_cfg->operation & SPI_HALF_DUPLEX) {
317 LOG_ERR("Half-duplex not supported");
318 return -ENOTSUP;
319 }
320
321 if (spi_cfg->operation & SPI_OP_MODE_SLAVE) {
322 LOG_ERR("Slave mode not supported");
323 return -ENOTSUP;
324 }
325
326 if (spi_cfg->operation & SPI_MODE_LOOP) {
327 LOG_ERR("Loopback mode is not supported");
328 return -ENOTSUP;
329 }
330
331 hal_dev->cs_pin_id = ctx->config->slave;
332 int ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
333
334 if (ret) {
335 LOG_ERR("Failed to configure SPI pins");
336 return ret;
337 }
338
339 /* input parameters to calculate timing configuration */
340 spi_hal_timing_param_t timing_param = {
341 .half_duplex = hal_dev->half_duplex,
342 .no_compensate = hal_dev->no_compensate,
343 .expected_freq = spi_cfg->frequency,
344 .duty_cycle = cfg->duty_cycle == 0 ? 128 : cfg->duty_cycle,
345 .input_delay_ns = cfg->input_delay_ns,
346 .use_gpio = !cfg->use_iomux,
347 .clk_src_hz = data->clock_source_hz,
348 };
349
350 spi_hal_cal_clock_conf(&timing_param, &freq, &hal_dev->timing_conf);
351
352 data->trans_config.dummy_bits = hal_dev->timing_conf.timing_dummy;
353
354 hal_dev->tx_lsbfirst = spi_cfg->operation & SPI_TRANSFER_LSB ? 1 : 0;
355 hal_dev->rx_lsbfirst = spi_cfg->operation & SPI_TRANSFER_LSB ? 1 : 0;
356
357 data->trans_config.line_mode.data_lines = spi_esp32_get_line_mode(spi_cfg->operation);
358
359 /* multiline for command and address not supported */
360 data->trans_config.line_mode.addr_lines = 1;
361 data->trans_config.line_mode.cmd_lines = 1;
362
363 /* SPI mode */
364 hal_dev->mode = 0;
365 if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) {
366 hal_dev->mode = BIT(0);
367 }
368 if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) {
369 hal_dev->mode |= BIT(1);
370 }
371
372 /* Chip select setup and hold times */
373 /* GPIO CS have their own delay parameter*/
374 if (!spi_cs_is_gpio(spi_cfg)) {
375 hal_dev->cs_hold = cfg->cs_hold;
376 hal_dev->cs_setup = cfg->cs_setup;
377 }
378
379 spi_hal_setup_device(hal, hal_dev);
380
381 /* Workaround to handle default state of MISO and MOSI lines */
382 #ifndef CONFIG_SOC_SERIES_ESP32
383 spi_dev_t *hw = hal->hw;
384
385 if (cfg->line_idle_low) {
386 hw->ctrl.d_pol = 0;
387 hw->ctrl.q_pol = 0;
388 } else {
389 hw->ctrl.d_pol = 1;
390 hw->ctrl.q_pol = 1;
391 }
392 #endif
393
394 /*
395 * Workaround for ESP32S3 and ESP32Cx SoC's. This dummy transaction is needed
396 * to sync CLK and software controlled CS when SPI is in mode 3
397 */
398 #if defined(CONFIG_SOC_SERIES_ESP32S3) || defined(CONFIG_SOC_SERIES_ESP32C2) || \
399 defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6)
400 if (ctx->num_cs_gpios && (hal_dev->mode & (SPI_MODE_CPOL | SPI_MODE_CPHA))) {
401 spi_esp32_transfer(dev);
402 }
403 #endif
404
405 return 0;
406 }
407
spi_esp32_get_frame_size(const struct spi_config * spi_cfg)408 static inline uint8_t spi_esp32_get_frame_size(const struct spi_config *spi_cfg)
409 {
410 uint8_t dfs = SPI_WORD_SIZE_GET(spi_cfg->operation);
411
412 dfs /= 8;
413 if ((dfs == 0) || (dfs > 4)) {
414 LOG_WRN("Unsupported dfs, 1-byte size will be used");
415 dfs = 1;
416 }
417 return dfs;
418 }
419
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)420 static int transceive(const struct device *dev,
421 const struct spi_config *spi_cfg,
422 const struct spi_buf_set *tx_bufs,
423 const struct spi_buf_set *rx_bufs, bool asynchronous,
424 spi_callback_t cb,
425 void *userdata)
426 {
427 const struct spi_esp32_config *cfg = dev->config;
428 struct spi_esp32_data *data = dev->data;
429 int ret;
430
431 if (!tx_bufs && !rx_bufs) {
432 return 0;
433 }
434
435 #ifndef CONFIG_SPI_ESP32_INTERRUPT
436 if (asynchronous) {
437 return -ENOTSUP;
438 }
439 #endif
440
441 spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
442
443 data->dfs = spi_esp32_get_frame_size(spi_cfg);
444
445 ret = spi_esp32_configure(dev, spi_cfg);
446 if (ret) {
447 goto done;
448 }
449
450 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, data->dfs);
451
452 spi_context_cs_control(&data->ctx, true);
453
454 #ifdef CONFIG_SPI_ESP32_INTERRUPT
455 spi_ll_enable_int(cfg->spi);
456 spi_ll_set_int_stat(cfg->spi);
457 #else
458
459 do {
460 spi_esp32_transfer(dev);
461 } while (spi_esp32_transfer_ongoing(data));
462
463 spi_esp32_complete(dev, data, cfg->spi, 0);
464
465 #endif /* CONFIG_SPI_ESP32_INTERRUPT */
466
467 done:
468 spi_context_release(&data->ctx, ret);
469
470 return ret;
471 }
472
spi_esp32_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)473 static int spi_esp32_transceive(const struct device *dev,
474 const struct spi_config *spi_cfg,
475 const struct spi_buf_set *tx_bufs,
476 const struct spi_buf_set *rx_bufs)
477 {
478 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
479 }
480
481 #ifdef CONFIG_SPI_ASYNC
spi_esp32_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)482 static int spi_esp32_transceive_async(const struct device *dev,
483 const struct spi_config *spi_cfg,
484 const struct spi_buf_set *tx_bufs,
485 const struct spi_buf_set *rx_bufs,
486 spi_callback_t cb,
487 void *userdata)
488 {
489 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
490 }
491 #endif /* CONFIG_SPI_ASYNC */
492
spi_esp32_release(const struct device * dev,const struct spi_config * config)493 static int spi_esp32_release(const struct device *dev,
494 const struct spi_config *config)
495 {
496 struct spi_esp32_data *data = dev->data;
497
498 spi_context_unlock_unconditionally(&data->ctx);
499
500 return 0;
501 }
502
503 static DEVICE_API(spi, spi_api) = {
504 .transceive = spi_esp32_transceive,
505 #ifdef CONFIG_SPI_ASYNC
506 .transceive_async = spi_esp32_transceive_async,
507 #endif
508 #ifdef CONFIG_SPI_RTIO
509 .iodev_submit = spi_rtio_iodev_default_submit,
510 #endif
511 .release = spi_esp32_release
512 };
513
514 #ifdef CONFIG_SOC_SERIES_ESP32
515 #define GET_AS_CS(idx) .as_cs = DT_INST_PROP(idx, clk_as_cs),
516 #else
517 #define GET_AS_CS(idx)
518 #endif
519
520 #define ESP32_SPI_INIT(idx) \
521 \
522 PINCTRL_DT_INST_DEFINE(idx); \
523 \
524 static struct spi_esp32_data spi_data_##idx = { \
525 SPI_CONTEXT_INIT_LOCK(spi_data_##idx, ctx), \
526 SPI_CONTEXT_INIT_SYNC(spi_data_##idx, ctx), \
527 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(idx), ctx) \
528 .hal = { \
529 .hw = (spi_dev_t *)DT_INST_REG_ADDR(idx), \
530 }, \
531 .dev_config = { \
532 .half_duplex = DT_INST_PROP(idx, half_duplex), \
533 GET_AS_CS(idx) \
534 .positive_cs = DT_INST_PROP(idx, positive_cs), \
535 .no_compensate = DT_INST_PROP(idx, dummy_comp), \
536 .sio = DT_INST_PROP(idx, sio) \
537 } \
538 }; \
539 \
540 static const struct spi_esp32_config spi_config_##idx = { \
541 .spi = (spi_dev_t *)DT_INST_REG_ADDR(idx), \
542 \
543 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \
544 .duty_cycle = 0, \
545 .input_delay_ns = 0, \
546 .irq_source = DT_INST_IRQ_BY_IDX(idx, 0, irq), \
547 .irq_priority = DT_INST_IRQ_BY_IDX(idx, 0, priority), \
548 .irq_flags = DT_INST_IRQ_BY_IDX(idx, 0, flags), \
549 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \
550 .clock_subsys = \
551 (clock_control_subsys_t)DT_INST_CLOCKS_CELL(idx, offset), \
552 .use_iomux = DT_INST_PROP(idx, use_iomux), \
553 .dma_enabled = DT_INST_PROP(idx, dma_enabled), \
554 .dma_clk_src = DT_INST_PROP(idx, dma_clk), \
555 .dma_host = DT_INST_PROP(idx, dma_host), \
556 .cs_setup = DT_INST_PROP_OR(idx, cs_setup_time, 0), \
557 .cs_hold = DT_INST_PROP_OR(idx, cs_hold_time, 0), \
558 .line_idle_low = DT_INST_PROP(idx, line_idle_low), \
559 .clock_source = SPI_CLK_SRC_DEFAULT, \
560 }; \
561 \
562 SPI_DEVICE_DT_INST_DEFINE(idx, spi_esp32_init, \
563 NULL, &spi_data_##idx, \
564 &spi_config_##idx, POST_KERNEL, \
565 CONFIG_SPI_INIT_PRIORITY, &spi_api);
566
567 DT_INST_FOREACH_STATUS_OKAY(ESP32_SPI_INIT)
568