1 /*
2 * Copyright (c) 2020 Henrik Brix Andersen <henrik@brixandersen.dk>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT xlnx_xps_spi_2_00_a
8
9 #include <zephyr/device.h>
10 #include <zephyr/drivers/spi.h>
11 #include <zephyr/drivers/spi/rtio.h>
12 #include <zephyr/sys/sys_io.h>
13 #include <zephyr/logging/log.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/kernel.h>
16 LOG_MODULE_REGISTER(xlnx_quadspi, CONFIG_SPI_LOG_LEVEL);
17
18 #include "spi_context.h"
19
20 /* AXI Quad SPI v3.2 register offsets (See Xilinx PG153 for details) */
21 #define SRR_OFFSET 0x40
22 #define SPICR_OFFSET 0x60
23 #define SPISR_OFFSET 0x64
24 #define SPI_DTR_OFFSET 0x68
25 #define SPI_DRR_OFFSET 0x6c
26 #define SPISSR_OFFSET 0x70
27 #define SPI_TX_FIFO_OCR_OFFSET 0x74
28 #define SPI_RX_FIFO_OCR_OFFSET 0x78
29 #define DGIER_OFFSET 0x1c
30 #define IPISR_OFFSET 0x20
31 #define IPIER_OFFSET 0x28
32
33 /* SRR bit definitions */
34 #define SRR_SOFTRESET_MAGIC 0xa
35
36 /* SPICR bit definitions */
37 #define SPICR_LOOP BIT(0)
38 #define SPICR_SPE BIT(1)
39 #define SPICR_MASTER BIT(2)
40 #define SPICR_CPOL BIT(3)
41 #define SPICR_CPHA BIT(4)
42 #define SPICR_TX_FIFO_RESET BIT(5)
43 #define SPICR_RX_FIFO_RESET BIT(6)
44 #define SPICR_MANUAL_SS BIT(7)
45 #define SPICR_MASTER_XFER_INH BIT(8)
46 #define SPICR_LSB_FIRST BIT(9)
47
48 /* SPISR bit definitions */
49 #define SPISR_RX_EMPTY BIT(0)
50 #define SPISR_RX_FULL BIT(1)
51 #define SPISR_TX_EMPTY BIT(2)
52 #define SPISR_TX_FULL BIT(3)
53 #define SPISR_MODF BIT(4)
54 #define SPISR_SLAVE_MODE_SELECT BIT(5)
55 #define SPISR_CPOL_CPHA_ERROR BIT(6)
56 #define SPISR_SLAVE_MODE_ERROR BIT(7)
57 #define SPISR_MSB_ERROR BIT(8)
58 #define SPISR_LOOPBACK_ERROR BIT(9)
59 #define SPISR_COMMAND_ERROR BIT(10)
60
61 #define SPISR_ERROR_MASK (SPISR_COMMAND_ERROR | \
62 SPISR_LOOPBACK_ERROR | \
63 SPISR_MSB_ERROR | \
64 SPISR_SLAVE_MODE_ERROR | \
65 SPISR_CPOL_CPHA_ERROR)
66
67 /* DGIER bit definitions */
68 #define DGIER_GIE BIT(31)
69
70 /* IPISR and IPIER bit definitions */
71 #define IPIXR_MODF BIT(0)
72 #define IPIXR_SLAVE_MODF BIT(1)
73 #define IPIXR_DTR_EMPTY BIT(2)
74 #define IPIXR_DTR_UNDERRUN BIT(3)
75 #define IPIXR_DRR_FULL BIT(4)
76 #define IPIXR_DRR_OVERRUN BIT(5)
77 #define IPIXR_TX_FIFO_HALF_EMPTY BIT(6)
78 #define IPIXR_SLAVE_MODE_SELECT BIT(7)
79 #define IPIXR_DDR_NOT_EMPTY BIT(8)
80 #define IPIXR_CPOL_CPHA_ERROR BIT(9)
81 #define IPIXR_SLAVE_MODE_ERROR BIT(10)
82 #define IPIXR_MSB_ERROR BIT(11)
83 #define IPIXR_LOOPBACK_ERROR BIT(12)
84 #define IPIXR_COMMAND_ERROR BIT(13)
85
86 struct xlnx_quadspi_config {
87 mm_reg_t base;
88 void (*irq_config_func)(const struct device *dev);
89 uint8_t num_ss_bits;
90 uint8_t num_xfer_bytes;
91 uint16_t fifo_size;
92 #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(xlnx_startup_block)
93 bool startup_block;
94 #endif
95 };
96
97 struct xlnx_quadspi_data {
98 struct spi_context ctx;
99 struct k_event dtr_empty;
100 };
101
xlnx_quadspi_read32(const struct device * dev,mm_reg_t offset)102 static inline uint32_t xlnx_quadspi_read32(const struct device *dev,
103 mm_reg_t offset)
104 {
105 const struct xlnx_quadspi_config *config = dev->config;
106
107 return sys_read32(config->base + offset);
108 }
109
xlnx_quadspi_write32(const struct device * dev,uint32_t value,mm_reg_t offset)110 static inline void xlnx_quadspi_write32(const struct device *dev,
111 uint32_t value,
112 mm_reg_t offset)
113 {
114 const struct xlnx_quadspi_config *config = dev->config;
115
116 sys_write32(value, config->base + offset);
117 }
118
xlnx_quadspi_cs_control(const struct device * dev,bool on)119 static void xlnx_quadspi_cs_control(const struct device *dev, bool on)
120 {
121 const struct xlnx_quadspi_config *config = dev->config;
122 struct xlnx_quadspi_data *data = dev->data;
123 struct spi_context *ctx = &data->ctx;
124 uint32_t spissr = BIT_MASK(config->num_ss_bits);
125
126 if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) {
127 /* Skip slave select assert/de-assert in slave mode */
128 return;
129 }
130
131 if (on) {
132 /* SPISSR is one-hot, active-low */
133 spissr &= ~BIT(ctx->config->slave);
134 } else if (ctx->config->operation & SPI_HOLD_ON_CS) {
135 /* Skip slave select de-assert */
136 return;
137 }
138
139 xlnx_quadspi_write32(dev, spissr, SPISSR_OFFSET);
140 spi_context_cs_control(ctx, on);
141 }
142
xlnx_quadspi_configure(const struct device * dev,const struct spi_config * spi_cfg)143 static int xlnx_quadspi_configure(const struct device *dev,
144 const struct spi_config *spi_cfg)
145 {
146 const struct xlnx_quadspi_config *config = dev->config;
147 struct xlnx_quadspi_data *data = dev->data;
148 struct spi_context *ctx = &data->ctx;
149 uint32_t word_size;
150 uint32_t spicr;
151 uint32_t spisr;
152
153 if (spi_context_configured(ctx, spi_cfg)) {
154 /* Configuration already active, just enable SPI IOs */
155 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
156 spicr |= SPICR_SPE;
157 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
158 return 0;
159 }
160
161 if (spi_cfg->operation & SPI_HALF_DUPLEX) {
162 LOG_ERR("Half-duplex not supported");
163 return -ENOTSUP;
164 }
165
166 if (spi_cfg->slave >= config->num_ss_bits) {
167 LOG_ERR("unsupported slave %d, num_ss_bits %d",
168 spi_cfg->slave, config->num_ss_bits);
169 return -ENOTSUP;
170 }
171
172 if (spi_cfg->operation & SPI_CS_ACTIVE_HIGH) {
173 LOG_ERR("unsupported CS polarity active high");
174 return -ENOTSUP;
175 }
176
177 if (!IS_ENABLED(CONFIG_SPI_SLAVE) && \
178 (spi_cfg->operation & SPI_OP_MODE_SLAVE)) {
179 LOG_ERR("slave mode support not enabled");
180 return -ENOTSUP;
181 }
182
183 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
184 if (word_size != (config->num_xfer_bytes * 8)) {
185 LOG_ERR("unsupported word size %d bits, num_xfer_bytes %d",
186 word_size, config->num_xfer_bytes);
187 return -ENOTSUP;
188 }
189
190 /* Reset FIFOs, SPI IOs enabled */
191 spicr = SPICR_TX_FIFO_RESET | SPICR_RX_FIFO_RESET | SPICR_SPE;
192
193 /* Master mode, inhibit master transmit, manual slave select */
194 if (!IS_ENABLED(CONFIG_SPI_SLAVE) ||
195 (spi_cfg->operation & SPI_OP_MODE_SLAVE) == 0U) {
196 spicr |= SPICR_MASTER | SPICR_MASTER_XFER_INH | SPICR_MANUAL_SS;
197 }
198
199 if (spi_cfg->operation & SPI_MODE_CPOL) {
200 spicr |= SPICR_CPOL;
201 }
202
203 if (spi_cfg->operation & SPI_MODE_CPHA) {
204 spicr |= SPICR_CPHA;
205 }
206
207 if (spi_cfg->operation & SPI_MODE_LOOP) {
208 spicr |= SPICR_LOOP;
209 }
210
211 if (spi_cfg->operation & SPI_TRANSFER_LSB) {
212 spicr |= SPICR_LSB_FIRST;
213 }
214
215 /*
216 * Write configuration and verify it is compliant with the IP core
217 * configuration. Tri-state SPI IOs on error.
218 */
219 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
220 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
221 if (spisr & SPISR_ERROR_MASK) {
222 LOG_ERR("unsupported configuration, spisr = 0x%08x", spisr);
223 xlnx_quadspi_write32(dev, SPICR_MASTER_XFER_INH, SPICR_OFFSET);
224 ctx->config = NULL;
225 return -ENOTSUP;
226 }
227
228 ctx->config = spi_cfg;
229
230 return 0;
231 }
232
xlnx_quadspi_start_tx(const struct device * dev)233 static bool xlnx_quadspi_start_tx(const struct device *dev)
234 {
235 const struct xlnx_quadspi_config *config = dev->config;
236 struct xlnx_quadspi_data *data = dev->data;
237 struct spi_context *ctx = &data->ctx;
238 size_t xfer_len;
239 uint32_t spicr = 0U;
240 uint32_t spisr;
241 uint32_t dtr = 0U;
242 uint32_t fifo_avail_words = config->fifo_size ? config->fifo_size : 1;
243 bool complete = false;
244
245 if (!spi_context_tx_on(ctx) && !spi_context_rx_on(ctx)) {
246 /* All done, de-assert slave select */
247 xlnx_quadspi_cs_control(dev, false);
248
249 if ((ctx->config->operation & SPI_HOLD_ON_CS) == 0U) {
250 /* Tri-state SPI IOs */
251 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
252 spicr &= ~(SPICR_SPE);
253 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
254 }
255
256 spi_context_complete(ctx, dev, 0);
257 complete = true;
258 return complete;
259 }
260
261 if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
262 /* Inhibit master transaction while writing TX data */
263 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
264 spicr |= SPICR_MASTER_XFER_INH;
265 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
266 }
267
268 /* We can only see as far as the current rx buffer */
269 xfer_len = spi_context_longest_current_buf(ctx);
270
271 /* Write TX data */
272 while (xfer_len--) {
273 if (spi_context_tx_buf_on(ctx)) {
274 switch (config->num_xfer_bytes) {
275 case 1:
276 dtr = UNALIGNED_GET((uint8_t *)(ctx->tx_buf));
277 break;
278 case 2:
279 dtr = UNALIGNED_GET((uint16_t *)(ctx->tx_buf));
280 break;
281 case 4:
282 dtr = UNALIGNED_GET((uint32_t *)(ctx->tx_buf));
283 break;
284 default:
285 __ASSERT(0, "unsupported num_xfer_bytes");
286 }
287 } else {
288 /* No TX buffer. Use dummy TX data */
289 dtr = 0U;
290 }
291
292 xlnx_quadspi_write32(dev, dtr, SPI_DTR_OFFSET);
293 spi_context_update_tx(ctx, config->num_xfer_bytes, 1);
294
295 if (--fifo_avail_words == 0) {
296 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
297 if (spisr & SPISR_TX_FULL) {
298 break;
299 }
300 if (!config->fifo_size) {
301 fifo_avail_words = 1;
302 } else if (spisr & SPISR_TX_EMPTY) {
303 fifo_avail_words = config->fifo_size;
304 } else {
305 fifo_avail_words = config->fifo_size -
306 xlnx_quadspi_read32(dev, SPI_TX_FIFO_OCR_OFFSET) - 1;
307 }
308 }
309 }
310
311 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
312 if (spisr & SPISR_COMMAND_ERROR) {
313 /* Command not supported by memory type configured in IP core */
314 LOG_ERR("unsupported command");
315 xlnx_quadspi_cs_control(dev, false);
316
317 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
318 if ((ctx->config->operation & SPI_HOLD_ON_CS) == 0U) {
319 /* Tri-state SPI IOs */
320 spicr &= ~(SPICR_SPE);
321 }
322 xlnx_quadspi_write32(dev, spicr | SPICR_TX_FIFO_RESET,
323 SPICR_OFFSET);
324
325 spi_context_complete(ctx, dev, -ENOTSUP);
326 complete = true;
327 }
328
329 if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
330 /* Uninhibit master transaction */
331 spicr &= ~(SPICR_MASTER_XFER_INH);
332 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
333 }
334 return complete;
335 }
336
xlnx_quadspi_read_fifo(const struct device * dev)337 static void xlnx_quadspi_read_fifo(const struct device *dev)
338 {
339 const struct xlnx_quadspi_config *config = dev->config;
340 struct xlnx_quadspi_data *data = dev->data;
341 struct spi_context *ctx = &data->ctx;
342 uint32_t spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
343 /* RX FIFO occupancy register only exists if FIFO is implemented */
344 uint32_t rx_fifo_words = config->fifo_size ?
345 xlnx_quadspi_read32(dev, SPI_RX_FIFO_OCR_OFFSET) + 1 : 1;
346
347 /* Read RX data */
348 while (!(spisr & SPISR_RX_EMPTY)) {
349 uint32_t drr = xlnx_quadspi_read32(dev, SPI_DRR_OFFSET);
350
351 if (spi_context_rx_buf_on(ctx)) {
352 switch (config->num_xfer_bytes) {
353 case 1:
354 UNALIGNED_PUT(drr, (uint8_t *)ctx->rx_buf);
355 break;
356 case 2:
357 UNALIGNED_PUT(drr, (uint16_t *)ctx->rx_buf);
358 break;
359 case 4:
360 UNALIGNED_PUT(drr, (uint32_t *)ctx->rx_buf);
361 break;
362 default:
363 __ASSERT(0, "unsupported num_xfer_bytes");
364 }
365 }
366
367 spi_context_update_rx(ctx, config->num_xfer_bytes, 1);
368
369 if (--rx_fifo_words == 0) {
370 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
371 rx_fifo_words = config->fifo_size ?
372 xlnx_quadspi_read32(dev, SPI_RX_FIFO_OCR_OFFSET) + 1 : 1;
373 }
374 }
375 }
376
xlnx_quadspi_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool async,spi_callback_t cb,void * userdata)377 static int xlnx_quadspi_transceive(const struct device *dev,
378 const struct spi_config *spi_cfg,
379 const struct spi_buf_set *tx_bufs,
380 const struct spi_buf_set *rx_bufs,
381 bool async,
382 spi_callback_t cb,
383 void *userdata)
384 {
385 const struct xlnx_quadspi_config *config = dev->config;
386 struct xlnx_quadspi_data *data = dev->data;
387 struct spi_context *ctx = &data->ctx;
388 int ret;
389
390 spi_context_lock(ctx, async, cb, userdata, spi_cfg);
391
392 ret = xlnx_quadspi_configure(dev, spi_cfg);
393 if (ret) {
394 goto out;
395 }
396
397 spi_context_buffers_setup(ctx, tx_bufs, rx_bufs,
398 config->num_xfer_bytes);
399
400 xlnx_quadspi_cs_control(dev, true);
401
402 while (true) {
403 k_event_clear(&data->dtr_empty, 1);
404 bool complete = xlnx_quadspi_start_tx(dev);
405
406 if (complete || async) {
407 break;
408 }
409
410 /**
411 * 20ms should be long enough for 256 byte FIFO at any
412 * reasonable clock speed.
413 */
414 if (!k_event_wait(&data->dtr_empty, 1, false,
415 K_MSEC(20 + CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE))) {
416 /* Timeout */
417 LOG_ERR("DTR empty timeout");
418 spi_context_complete(ctx, dev, -ETIMEDOUT);
419 break;
420 }
421 xlnx_quadspi_read_fifo(dev);
422 }
423
424 ret = spi_context_wait_for_completion(ctx);
425 out:
426 spi_context_release(ctx, ret);
427
428 return ret;
429 }
430
xlnx_quadspi_transceive_blocking(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)431 static int xlnx_quadspi_transceive_blocking(const struct device *dev,
432 const struct spi_config *spi_cfg,
433 const struct spi_buf_set *tx_bufs,
434 const struct spi_buf_set *rx_bufs)
435 {
436 return xlnx_quadspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, false,
437 NULL, NULL);
438 }
439
440 #ifdef CONFIG_SPI_ASYNC
xlnx_quadspi_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)441 static int xlnx_quadspi_transceive_async(const struct device *dev,
442 const struct spi_config *spi_cfg,
443 const struct spi_buf_set *tx_bufs,
444 const struct spi_buf_set *rx_bufs,
445 spi_callback_t cb,
446 void *userdata)
447 {
448 return xlnx_quadspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, true,
449 cb, userdata);
450 }
451 #endif /* CONFIG_SPI_ASYNC */
452
xlnx_quadspi_release(const struct device * dev,const struct spi_config * spi_cfg)453 static int xlnx_quadspi_release(const struct device *dev,
454 const struct spi_config *spi_cfg)
455 {
456 const struct xlnx_quadspi_config *config = dev->config;
457 struct xlnx_quadspi_data *data = dev->data;
458 uint32_t spicr;
459
460 /* Force slave select de-assert */
461 xlnx_quadspi_write32(dev, BIT_MASK(config->num_ss_bits), SPISSR_OFFSET);
462
463 /* Tri-state SPI IOs */
464 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
465 spicr &= ~(SPICR_SPE);
466 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
467
468 spi_context_unlock_unconditionally(&data->ctx);
469
470 return 0;
471 }
472
xlnx_quadspi_isr(const struct device * dev)473 static void xlnx_quadspi_isr(const struct device *dev)
474 {
475 struct xlnx_quadspi_data *data = dev->data;
476 uint32_t ipisr;
477
478 /* Acknowledge interrupt */
479 ipisr = xlnx_quadspi_read32(dev, IPISR_OFFSET);
480 xlnx_quadspi_write32(dev, ipisr, IPISR_OFFSET);
481
482 if (ipisr & IPIXR_DTR_EMPTY) {
483 /**
484 * For async mode, we need to read the RX FIFO and refill the TX FIFO
485 * if needed here.
486 * For sync mode, we do this in the caller's context to avoid doing too much
487 * work in the ISR, so just post the event.
488 */
489 #ifdef CONFIG_SPI_ASYNC
490 struct spi_context *ctx = &data->ctx;
491
492 if (ctx->asynchronous) {
493 xlnx_quadspi_read_fifo(dev);
494 xlnx_quadspi_start_tx(dev);
495 return;
496 }
497 #endif
498 k_event_post(&data->dtr_empty, 1);
499 } else {
500 LOG_WRN("unhandled interrupt, ipisr = 0x%08x", ipisr);
501 }
502 }
503
504 #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(xlnx_startup_block)
xlnx_quadspi_startup_block_workaround(const struct device * dev)505 static int xlnx_quadspi_startup_block_workaround(const struct device *dev)
506 {
507 const struct xlnx_quadspi_config *config = dev->config;
508 uint32_t spissr = BIT_MASK(config->num_ss_bits);
509 uint32_t spicr;
510
511 /**
512 * See https://support.xilinx.com/s/article/52626?language=en_US
513 * Up to 3 clock cycles must be issued before the output clock signal
514 * is passed to the output CCLK pin from the SPI core.
515 * Use JEDEC READ ID as dummy command to chip select 0.
516 */
517 spissr &= ~BIT(0);
518 xlnx_quadspi_write32(dev, spissr, SPISSR_OFFSET);
519
520 xlnx_quadspi_write32(dev, 0x9F, SPI_DTR_OFFSET);
521 xlnx_quadspi_write32(dev, 0, SPI_DTR_OFFSET);
522 xlnx_quadspi_write32(dev, 0, SPI_DTR_OFFSET);
523
524 spicr = SPICR_MANUAL_SS | SPICR_MASTER | SPICR_SPE;
525 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
526
527 for (int i = 0;
528 i < 10 && (xlnx_quadspi_read32(dev, SPISR_OFFSET) & SPISR_TX_EMPTY) == 0; i++) {
529 k_msleep(1);
530 }
531 if ((xlnx_quadspi_read32(dev, SPISR_OFFSET) & SPISR_TX_EMPTY) == 0) {
532 LOG_ERR("timeout waiting for TX_EMPTY");
533 return -EIO;
534 }
535 spicr |= SPICR_MASTER_XFER_INH;
536 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
537
538 while ((xlnx_quadspi_read32(dev, SPISR_OFFSET) & SPISR_RX_EMPTY) == 0) {
539 xlnx_quadspi_read32(dev, SPI_DRR_OFFSET);
540 }
541
542 spissr = BIT_MASK(config->num_ss_bits);
543 xlnx_quadspi_write32(dev, spissr, SPISSR_OFFSET);
544
545 /* Reset controller to clean up */
546 xlnx_quadspi_write32(dev, SRR_SOFTRESET_MAGIC, SRR_OFFSET);
547
548 return 0;
549 }
550 #endif
551
xlnx_quadspi_init(const struct device * dev)552 static int xlnx_quadspi_init(const struct device *dev)
553 {
554 int err;
555 const struct xlnx_quadspi_config *config = dev->config;
556 struct xlnx_quadspi_data *data = dev->data;
557
558 k_event_init(&data->dtr_empty);
559
560 /* Reset controller */
561 xlnx_quadspi_write32(dev, SRR_SOFTRESET_MAGIC, SRR_OFFSET);
562
563 config->irq_config_func(dev);
564
565 err = spi_context_cs_configure_all(&data->ctx);
566 if (err < 0) {
567 return err;
568 }
569
570 spi_context_unlock_unconditionally(&data->ctx);
571
572 #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(xlnx_startup_block)
573 if (config->startup_block) {
574 err = xlnx_quadspi_startup_block_workaround(dev);
575 if (err < 0) {
576 return err;
577 }
578 }
579 #endif
580
581 /* Enable DTR Empty interrupt */
582 xlnx_quadspi_write32(dev, IPIXR_DTR_EMPTY, IPIER_OFFSET);
583 xlnx_quadspi_write32(dev, DGIER_GIE, DGIER_OFFSET);
584
585 return 0;
586 }
587
588 static DEVICE_API(spi, xlnx_quadspi_driver_api) = {
589 .transceive = xlnx_quadspi_transceive_blocking,
590 #ifdef CONFIG_SPI_ASYNC
591 .transceive_async = xlnx_quadspi_transceive_async,
592 #endif /* CONFIG_SPI_ASYNC */
593 #ifdef CONFIG_SPI_RTIO
594 .iodev_submit = spi_rtio_iodev_default_submit,
595 #endif
596 .release = xlnx_quadspi_release,
597 };
598 #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(xlnx_startup_block)
599 #define STARTUP_BLOCK_INIT(n) .startup_block = DT_INST_PROP(n, xlnx_startup_block),
600 #else
601 #define STARTUP_BLOCK_INIT(n)
602 #endif
603
604 #define XLNX_QUADSPI_INIT(n) \
605 static void xlnx_quadspi_config_func_##n(const struct device *dev); \
606 \
607 static const struct xlnx_quadspi_config xlnx_quadspi_config_##n = { \
608 .base = DT_INST_REG_ADDR(n), \
609 .irq_config_func = xlnx_quadspi_config_func_##n, \
610 .num_ss_bits = DT_INST_PROP(n, xlnx_num_ss_bits), \
611 .num_xfer_bytes = DT_INST_PROP(n, xlnx_num_transfer_bits) / 8, \
612 .fifo_size = DT_INST_PROP_OR(n, fifo_size, 0), \
613 STARTUP_BLOCK_INIT(n)}; \
614 \
615 static struct xlnx_quadspi_data xlnx_quadspi_data_##n = { \
616 SPI_CONTEXT_INIT_LOCK(xlnx_quadspi_data_##n, ctx), \
617 SPI_CONTEXT_INIT_SYNC(xlnx_quadspi_data_##n, ctx), \
618 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)}; \
619 \
620 SPI_DEVICE_DT_INST_DEFINE(n, &xlnx_quadspi_init, NULL, &xlnx_quadspi_data_##n, \
621 &xlnx_quadspi_config_##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
622 &xlnx_quadspi_driver_api); \
623 \
624 static void xlnx_quadspi_config_func_##n(const struct device *dev) \
625 { \
626 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), xlnx_quadspi_isr, \
627 DEVICE_DT_INST_GET(n), 0); \
628 irq_enable(DT_INST_IRQN(n)); \
629 }
630
631 DT_INST_FOREACH_STATUS_OKAY(XLNX_QUADSPI_INIT)
632