1 /*
2 * Copyright (c) 2020 Henrik Brix Andersen <henrik@brixandersen.dk>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT xlnx_xps_spi_2_00_a
8
9 #include <zephyr/device.h>
10 #include <zephyr/drivers/spi.h>
11 #include <zephyr/sys/sys_io.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/irq.h>
14 LOG_MODULE_REGISTER(xlnx_quadspi, CONFIG_SPI_LOG_LEVEL);
15
16 #include "spi_context.h"
17
18 /* AXI Quad SPI v3.2 register offsets (See Xilinx PG153 for details) */
19 #define SRR_OFFSET 0x40
20 #define SPICR_OFFSET 0x60
21 #define SPISR_OFFSET 0x64
22 #define SPI_DTR_OFFSET 0x68
23 #define SPI_DRR_OFFSET 0x6c
24 #define SPISSR_OFFSET 0x70
25 #define SPI_TX_FIFO_OCR_OFFSET 0x74
26 #define SPI_RX_FIFO_OCR_OFFSET 0x78
27 #define DGIER_OFFSET 0x1c
28 #define IPISR_OFFSET 0x20
29 #define IPIER_OFFSET 0x28
30
31 /* SRR bit definitions */
32 #define SRR_SOFTRESET_MAGIC 0xa
33
34 /* SPICR bit definitions */
35 #define SPICR_LOOP BIT(0)
36 #define SPICR_SPE BIT(1)
37 #define SPICR_MASTER BIT(2)
38 #define SPICR_CPOL BIT(3)
39 #define SPICR_CPHA BIT(4)
40 #define SPICR_TX_FIFO_RESET BIT(5)
41 #define SPICR_RX_FIFO_RESET BIT(6)
42 #define SPICR_MANUAL_SS BIT(7)
43 #define SPICR_MASTER_XFER_INH BIT(8)
44 #define SPICR_LSB_FIRST BIT(9)
45
46 /* SPISR bit definitions */
47 #define SPISR_RX_EMPTY BIT(0)
48 #define SPISR_RX_FULL BIT(1)
49 #define SPISR_TX_EMPTY BIT(2)
50 #define SPISR_TX_FULL BIT(3)
51 #define SPISR_MODF BIT(4)
52 #define SPISR_SLAVE_MODE_SELECT BIT(5)
53 #define SPISR_CPOL_CPHA_ERROR BIT(6)
54 #define SPISR_SLAVE_MODE_ERROR BIT(7)
55 #define SPISR_MSB_ERROR BIT(8)
56 #define SPISR_LOOPBACK_ERROR BIT(9)
57 #define SPISR_COMMAND_ERROR BIT(10)
58
59 #define SPISR_ERROR_MASK (SPISR_COMMAND_ERROR | \
60 SPISR_LOOPBACK_ERROR | \
61 SPISR_MSB_ERROR | \
62 SPISR_SLAVE_MODE_ERROR | \
63 SPISR_CPOL_CPHA_ERROR)
64
65 /* DGIER bit definitions */
66 #define DGIER_GIE BIT(31)
67
68 /* IPISR and IPIER bit definitions */
69 #define IPIXR_MODF BIT(0)
70 #define IPIXR_SLAVE_MODF BIT(1)
71 #define IPIXR_DTR_EMPTY BIT(2)
72 #define IPIXR_DTR_UNDERRUN BIT(3)
73 #define IPIXR_DRR_FULL BIT(4)
74 #define IPIXR_DRR_OVERRUN BIT(5)
75 #define IPIXR_TX_FIFO_HALF_EMPTY BIT(6)
76 #define IPIXR_SLAVE_MODE_SELECT BIT(7)
77 #define IPIXR_DDR_NOT_EMPTY BIT(8)
78 #define IPIXR_CPOL_CPHA_ERROR BIT(9)
79 #define IPIXR_SLAVE_MODE_ERROR BIT(10)
80 #define IPIXR_MSB_ERROR BIT(11)
81 #define IPIXR_LOOPBACK_ERROR BIT(12)
82 #define IPIXR_COMMAND_ERROR BIT(13)
83
84 struct xlnx_quadspi_config {
85 mm_reg_t base;
86 void (*irq_config_func)(const struct device *dev);
87 uint8_t num_ss_bits;
88 uint8_t num_xfer_bytes;
89 };
90
91 struct xlnx_quadspi_data {
92 struct spi_context ctx;
93 };
94
xlnx_quadspi_read32(const struct device * dev,mm_reg_t offset)95 static inline uint32_t xlnx_quadspi_read32(const struct device *dev,
96 mm_reg_t offset)
97 {
98 const struct xlnx_quadspi_config *config = dev->config;
99
100 return sys_read32(config->base + offset);
101 }
102
xlnx_quadspi_write32(const struct device * dev,uint32_t value,mm_reg_t offset)103 static inline void xlnx_quadspi_write32(const struct device *dev,
104 uint32_t value,
105 mm_reg_t offset)
106 {
107 const struct xlnx_quadspi_config *config = dev->config;
108
109 sys_write32(value, config->base + offset);
110 }
111
xlnx_quadspi_cs_control(const struct device * dev,bool on)112 static void xlnx_quadspi_cs_control(const struct device *dev, bool on)
113 {
114 const struct xlnx_quadspi_config *config = dev->config;
115 struct xlnx_quadspi_data *data = dev->data;
116 struct spi_context *ctx = &data->ctx;
117 uint32_t spissr = BIT_MASK(config->num_ss_bits);
118
119 if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) {
120 /* Skip slave select assert/de-assert in slave mode */
121 return;
122 }
123
124 if (on) {
125 /* SPISSR is one-hot, active-low */
126 spissr &= ~BIT(ctx->config->slave);
127 } else if (ctx->config->operation & SPI_HOLD_ON_CS) {
128 /* Skip slave select de-assert */
129 return;
130 }
131
132 xlnx_quadspi_write32(dev, spissr, SPISSR_OFFSET);
133 spi_context_cs_control(ctx, on);
134 }
135
xlnx_quadspi_configure(const struct device * dev,const struct spi_config * spi_cfg)136 static int xlnx_quadspi_configure(const struct device *dev,
137 const struct spi_config *spi_cfg)
138 {
139 const struct xlnx_quadspi_config *config = dev->config;
140 struct xlnx_quadspi_data *data = dev->data;
141 struct spi_context *ctx = &data->ctx;
142 uint32_t word_size;
143 uint32_t spicr;
144 uint32_t spisr;
145
146 if (spi_context_configured(ctx, spi_cfg)) {
147 /* Configuration already active, just enable SPI IOs */
148 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
149 spicr |= SPICR_SPE;
150 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
151 return 0;
152 }
153
154 if (spi_cfg->operation & SPI_HALF_DUPLEX) {
155 LOG_ERR("Half-duplex not supported");
156 return -ENOTSUP;
157 }
158
159 if (spi_cfg->slave >= config->num_ss_bits) {
160 LOG_ERR("unsupported slave %d, num_ss_bits %d",
161 spi_cfg->slave, config->num_ss_bits);
162 return -ENOTSUP;
163 }
164
165 if (spi_cfg->operation & SPI_CS_ACTIVE_HIGH) {
166 LOG_ERR("unsupported CS polarity active high");
167 return -ENOTSUP;
168 }
169
170 if (!IS_ENABLED(CONFIG_SPI_SLAVE) && \
171 (spi_cfg->operation & SPI_OP_MODE_SLAVE)) {
172 LOG_ERR("slave mode support not enabled");
173 return -ENOTSUP;
174 }
175
176 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
177 if (word_size != (config->num_xfer_bytes * 8)) {
178 LOG_ERR("unsupported word size %d bits, num_xfer_bytes %d",
179 word_size, config->num_xfer_bytes);
180 return -ENOTSUP;
181 }
182
183 /* Reset FIFOs, SPI IOs enabled */
184 spicr = SPICR_TX_FIFO_RESET | SPICR_RX_FIFO_RESET | SPICR_SPE;
185
186 /* Master mode, inhibit master transmit, manual slave select */
187 if (!IS_ENABLED(CONFIG_SPI_SLAVE) ||
188 (spi_cfg->operation & SPI_OP_MODE_SLAVE) == 0U) {
189 spicr |= SPICR_MASTER | SPICR_MASTER_XFER_INH | SPICR_MANUAL_SS;
190 }
191
192 if (spi_cfg->operation & SPI_MODE_CPOL) {
193 spicr |= SPICR_CPOL;
194 }
195
196 if (spi_cfg->operation & SPI_MODE_CPHA) {
197 spicr |= SPICR_CPHA;
198 }
199
200 if (spi_cfg->operation & SPI_MODE_LOOP) {
201 spicr |= SPICR_LOOP;
202 }
203
204 if (spi_cfg->operation & SPI_TRANSFER_LSB) {
205 spicr |= SPICR_LSB_FIRST;
206 }
207
208 /*
209 * Write configuration and verify it is compliant with the IP core
210 * configuration. Tri-state SPI IOs on error.
211 */
212 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
213 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
214 if (spisr & SPISR_ERROR_MASK) {
215 LOG_ERR("unsupported configuration, spisr = 0x%08x", spisr);
216 xlnx_quadspi_write32(dev, SPICR_MASTER_XFER_INH, SPICR_OFFSET);
217 ctx->config = NULL;
218 return -ENOTSUP;
219 }
220
221 ctx->config = spi_cfg;
222
223 return 0;
224 }
225
xlnx_quadspi_start_tx(const struct device * dev)226 static void xlnx_quadspi_start_tx(const struct device *dev)
227 {
228 const struct xlnx_quadspi_config *config = dev->config;
229 struct xlnx_quadspi_data *data = dev->data;
230 struct spi_context *ctx = &data->ctx;
231 size_t xfer_len;
232 uint32_t spicr = 0U;
233 uint32_t spisr;
234 uint32_t dtr = 0U;
235
236 if (!spi_context_tx_on(ctx) && !spi_context_rx_on(ctx)) {
237 /* All done, de-assert slave select */
238 xlnx_quadspi_cs_control(dev, false);
239
240 if ((ctx->config->operation & SPI_HOLD_ON_CS) == 0U) {
241 /* Tri-state SPI IOs */
242 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
243 spicr &= ~(SPICR_SPE);
244 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
245 }
246
247 spi_context_complete(ctx, dev, 0);
248 return;
249 }
250
251 if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
252 /* Inhibit master transaction while writing TX data */
253 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
254 spicr |= SPICR_MASTER_XFER_INH;
255 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
256 }
257
258 /* We can only see as far as the current rx buffer */
259 xfer_len = spi_context_longest_current_buf(ctx);
260
261 /* Write TX data */
262 while (xfer_len--) {
263 if (spi_context_tx_buf_on(ctx)) {
264 switch (config->num_xfer_bytes) {
265 case 1:
266 dtr = UNALIGNED_GET((uint8_t *)(ctx->tx_buf));
267 break;
268 case 2:
269 dtr = UNALIGNED_GET((uint16_t *)(ctx->tx_buf));
270 break;
271 case 4:
272 dtr = UNALIGNED_GET((uint32_t *)(ctx->tx_buf));
273 break;
274 default:
275 __ASSERT(0, "unsupported num_xfer_bytes");
276 }
277 } else {
278 /* No TX buffer. Use dummy TX data */
279 dtr = 0U;
280 }
281
282 xlnx_quadspi_write32(dev, dtr, SPI_DTR_OFFSET);
283 spi_context_update_tx(ctx, config->num_xfer_bytes, 1);
284
285 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
286 if (spisr & SPISR_TX_FULL) {
287 break;
288 }
289 }
290
291 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
292 if (spisr & SPISR_COMMAND_ERROR) {
293 /* Command not supported by memory type configured in IP core */
294 LOG_ERR("unsupported command");
295 xlnx_quadspi_cs_control(dev, false);
296
297 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
298 if ((ctx->config->operation & SPI_HOLD_ON_CS) == 0U) {
299 /* Tri-state SPI IOs */
300 spicr &= ~(SPICR_SPE);
301 }
302 xlnx_quadspi_write32(dev, spicr | SPICR_TX_FIFO_RESET,
303 SPICR_OFFSET);
304
305 spi_context_complete(ctx, dev, -ENOTSUP);
306 }
307
308 if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
309 /* Uninhibit master transaction */
310 spicr &= ~(SPICR_MASTER_XFER_INH);
311 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
312 }
313 }
314
xlnx_quadspi_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool async,spi_callback_t cb,void * userdata)315 static int xlnx_quadspi_transceive(const struct device *dev,
316 const struct spi_config *spi_cfg,
317 const struct spi_buf_set *tx_bufs,
318 const struct spi_buf_set *rx_bufs,
319 bool async,
320 spi_callback_t cb,
321 void *userdata)
322 {
323 const struct xlnx_quadspi_config *config = dev->config;
324 struct xlnx_quadspi_data *data = dev->data;
325 struct spi_context *ctx = &data->ctx;
326 int ret;
327
328 spi_context_lock(ctx, async, cb, userdata, spi_cfg);
329
330 ret = xlnx_quadspi_configure(dev, spi_cfg);
331 if (ret) {
332 goto out;
333 }
334
335 spi_context_buffers_setup(ctx, tx_bufs, rx_bufs,
336 config->num_xfer_bytes);
337
338 xlnx_quadspi_cs_control(dev, true);
339
340 xlnx_quadspi_start_tx(dev);
341
342 ret = spi_context_wait_for_completion(ctx);
343 out:
344 spi_context_release(ctx, ret);
345
346 return ret;
347 }
348
xlnx_quadspi_transceive_blocking(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)349 static int xlnx_quadspi_transceive_blocking(const struct device *dev,
350 const struct spi_config *spi_cfg,
351 const struct spi_buf_set *tx_bufs,
352 const struct spi_buf_set *rx_bufs)
353 {
354 return xlnx_quadspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, false,
355 NULL, NULL);
356 }
357
358 #ifdef CONFIG_SPI_ASYNC
xlnx_quadspi_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)359 static int xlnx_quadspi_transceive_async(const struct device *dev,
360 const struct spi_config *spi_cfg,
361 const struct spi_buf_set *tx_bufs,
362 const struct spi_buf_set *rx_bufs,
363 spi_callback_t cb,
364 void *userdata)
365 {
366 return xlnx_quadspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, true,
367 cb, userdata);
368 }
369 #endif /* CONFIG_SPI_ASYNC */
370
xlnx_quadspi_release(const struct device * dev,const struct spi_config * spi_cfg)371 static int xlnx_quadspi_release(const struct device *dev,
372 const struct spi_config *spi_cfg)
373 {
374 const struct xlnx_quadspi_config *config = dev->config;
375 struct xlnx_quadspi_data *data = dev->data;
376 uint32_t spicr;
377
378 /* Force slave select de-assert */
379 xlnx_quadspi_write32(dev, BIT_MASK(config->num_ss_bits), SPISSR_OFFSET);
380
381 /* Tri-state SPI IOs */
382 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
383 spicr &= ~(SPICR_SPE);
384 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
385
386 spi_context_unlock_unconditionally(&data->ctx);
387
388 return 0;
389 }
390
xlnx_quadspi_isr(const struct device * dev)391 static void xlnx_quadspi_isr(const struct device *dev)
392 {
393 const struct xlnx_quadspi_config *config = dev->config;
394 struct xlnx_quadspi_data *data = dev->data;
395 struct spi_context *ctx = &data->ctx;
396 uint32_t temp;
397 uint32_t drr;
398
399 /* Acknowledge interrupt */
400 temp = xlnx_quadspi_read32(dev, IPISR_OFFSET);
401 xlnx_quadspi_write32(dev, temp, IPISR_OFFSET);
402
403 if (temp & IPIXR_DTR_EMPTY) {
404 temp = xlnx_quadspi_read32(dev, SPISR_OFFSET);
405
406 /* Read RX data */
407 while (!(temp & SPISR_RX_EMPTY)) {
408 drr = xlnx_quadspi_read32(dev, SPI_DRR_OFFSET);
409
410 if (spi_context_rx_buf_on(ctx)) {
411 switch (config->num_xfer_bytes) {
412 case 1:
413 UNALIGNED_PUT(drr,
414 (uint8_t *)ctx->rx_buf);
415 break;
416 case 2:
417 UNALIGNED_PUT(drr,
418 (uint16_t *)ctx->rx_buf);
419 break;
420 case 4:
421 UNALIGNED_PUT(drr,
422 (uint32_t *)ctx->rx_buf);
423 break;
424 default:
425 __ASSERT(0,
426 "unsupported num_xfer_bytes");
427 }
428 }
429
430 spi_context_update_rx(ctx, config->num_xfer_bytes, 1);
431
432 temp = xlnx_quadspi_read32(dev, SPISR_OFFSET);
433 }
434
435 /* Start next TX */
436 xlnx_quadspi_start_tx(dev);
437 } else {
438 LOG_WRN("unhandled interrupt, ipisr = 0x%08x", temp);
439 }
440 }
441
xlnx_quadspi_init(const struct device * dev)442 static int xlnx_quadspi_init(const struct device *dev)
443 {
444 int err;
445 const struct xlnx_quadspi_config *config = dev->config;
446 struct xlnx_quadspi_data *data = dev->data;
447
448 /* Reset controller */
449 xlnx_quadspi_write32(dev, SRR_SOFTRESET_MAGIC, SRR_OFFSET);
450
451 config->irq_config_func(dev);
452
453 /* Enable DTR Empty interrupt */
454 xlnx_quadspi_write32(dev, IPIXR_DTR_EMPTY, IPIER_OFFSET);
455 xlnx_quadspi_write32(dev, DGIER_GIE, DGIER_OFFSET);
456
457 err = spi_context_cs_configure_all(&data->ctx);
458 if (err < 0) {
459 return err;
460 }
461
462 spi_context_unlock_unconditionally(&data->ctx);
463
464 return 0;
465 }
466
467 static const struct spi_driver_api xlnx_quadspi_driver_api = {
468 .transceive = xlnx_quadspi_transceive_blocking,
469 #ifdef CONFIG_SPI_ASYNC
470 .transceive_async = xlnx_quadspi_transceive_async,
471 #endif /* CONFIG_SPI_ASYNC */
472 .release = xlnx_quadspi_release,
473 };
474
475 #define XLNX_QUADSPI_INIT(n) \
476 static void xlnx_quadspi_config_func_##n(const struct device *dev); \
477 \
478 static const struct xlnx_quadspi_config xlnx_quadspi_config_##n = { \
479 .base = DT_INST_REG_ADDR(n), \
480 .irq_config_func = xlnx_quadspi_config_func_##n, \
481 .num_ss_bits = DT_INST_PROP(n, xlnx_num_ss_bits), \
482 .num_xfer_bytes = \
483 DT_INST_PROP(n, xlnx_num_transfer_bits) / 8, \
484 }; \
485 \
486 static struct xlnx_quadspi_data xlnx_quadspi_data_##n = { \
487 SPI_CONTEXT_INIT_LOCK(xlnx_quadspi_data_##n, ctx), \
488 SPI_CONTEXT_INIT_SYNC(xlnx_quadspi_data_##n, ctx), \
489 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
490 }; \
491 \
492 DEVICE_DT_INST_DEFINE(n, &xlnx_quadspi_init, \
493 NULL, \
494 &xlnx_quadspi_data_##n, \
495 &xlnx_quadspi_config_##n, POST_KERNEL, \
496 CONFIG_SPI_INIT_PRIORITY, \
497 &xlnx_quadspi_driver_api); \
498 \
499 static void xlnx_quadspi_config_func_##n(const struct device *dev) \
500 { \
501 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
502 xlnx_quadspi_isr, \
503 DEVICE_DT_INST_GET(n), 0); \
504 irq_enable(DT_INST_IRQN(n)); \
505 }
506
507 DT_INST_FOREACH_STATUS_OKAY(XLNX_QUADSPI_INIT)
508