1 /*
2 * Copyright (c) 2020 Henrik Brix Andersen <henrik@brixandersen.dk>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT xlnx_xps_spi_2_00_a
8
9 #include <device.h>
10 #include <drivers/spi.h>
11 #include <sys/sys_io.h>
12 #include <logging/log.h>
13 LOG_MODULE_REGISTER(xlnx_quadspi, CONFIG_SPI_LOG_LEVEL);
14
15 #include "spi_context.h"
16
17 /* AXI Quad SPI v3.2 register offsets (See Xilinx PG153 for details) */
18 #define SRR_OFFSET 0x40
19 #define SPICR_OFFSET 0x60
20 #define SPISR_OFFSET 0x64
21 #define SPI_DTR_OFFSET 0x68
22 #define SPI_DRR_OFFSET 0x6c
23 #define SPISSR_OFFSET 0x70
24 #define SPI_TX_FIFO_OCR_OFFSET 0x74
25 #define SPI_RX_FIFO_OCR_OFFSET 0x78
26 #define DGIER_OFFSET 0x1c
27 #define IPISR_OFFSET 0x20
28 #define IPIER_OFFSET 0x28
29
30 /* SRR bit definitions */
31 #define SRR_SOFTRESET_MAGIC 0xa
32
33 /* SPICR bit definitions */
34 #define SPICR_LOOP BIT(0)
35 #define SPICR_SPE BIT(1)
36 #define SPICR_MASTER BIT(2)
37 #define SPICR_CPOL BIT(3)
38 #define SPICR_CPHA BIT(4)
39 #define SPICR_TX_FIFO_RESET BIT(5)
40 #define SPICR_RX_FIFO_RESET BIT(6)
41 #define SPICR_MANUAL_SS BIT(7)
42 #define SPICR_MASTER_XFER_INH BIT(8)
43 #define SPICR_LSB_FIRST BIT(9)
44
45 /* SPISR bit definitions */
46 #define SPISR_RX_EMPTY BIT(0)
47 #define SPISR_RX_FULL BIT(1)
48 #define SPISR_TX_EMPTY BIT(2)
49 #define SPISR_TX_FULL BIT(3)
50 #define SPISR_MODF BIT(4)
51 #define SPISR_SLAVE_MODE_SELECT BIT(5)
52 #define SPISR_CPOL_CPHA_ERROR BIT(6)
53 #define SPISR_SLAVE_MODE_ERROR BIT(7)
54 #define SPISR_MSB_ERROR BIT(8)
55 #define SPISR_LOOPBACK_ERROR BIT(9)
56 #define SPISR_COMMAND_ERROR BIT(10)
57
58 #define SPISR_ERROR_MASK (SPISR_COMMAND_ERROR | \
59 SPISR_LOOPBACK_ERROR | \
60 SPISR_MSB_ERROR | \
61 SPISR_SLAVE_MODE_ERROR | \
62 SPISR_CPOL_CPHA_ERROR)
63
64 /* DGIER bit definitions */
65 #define DGIER_GIE BIT(31)
66
67 /* IPISR and IPIER bit definitions */
68 #define IPIXR_MODF BIT(0)
69 #define IPIXR_SLAVE_MODF BIT(1)
70 #define IPIXR_DTR_EMPTY BIT(2)
71 #define IPIXR_DTR_UNDERRUN BIT(3)
72 #define IPIXR_DRR_FULL BIT(4)
73 #define IPIXR_DRR_OVERRUN BIT(5)
74 #define IPIXR_TX_FIFO_HALF_EMPTY BIT(6)
75 #define IPIXR_SLAVE_MODE_SELECT BIT(7)
76 #define IPIXR_DDR_NOT_EMPTY BIT(8)
77 #define IPIXR_CPOL_CPHA_ERROR BIT(9)
78 #define IPIXR_SLAVE_MODE_ERROR BIT(10)
79 #define IPIXR_MSB_ERROR BIT(11)
80 #define IPIXR_LOOPBACK_ERROR BIT(12)
81 #define IPIXR_COMMAND_ERROR BIT(13)
82
83 struct xlnx_quadspi_config {
84 mm_reg_t base;
85 void (*irq_config_func)(const struct device *dev);
86 uint8_t num_ss_bits;
87 uint8_t num_xfer_bytes;
88 };
89
90 struct xlnx_quadspi_data {
91 struct spi_context ctx;
92 };
93
xlnx_quadspi_read32(const struct device * dev,mm_reg_t offset)94 static inline uint32_t xlnx_quadspi_read32(const struct device *dev,
95 mm_reg_t offset)
96 {
97 const struct xlnx_quadspi_config *config = dev->config;
98
99 return sys_read32(config->base + offset);
100 }
101
xlnx_quadspi_write32(const struct device * dev,uint32_t value,mm_reg_t offset)102 static inline void xlnx_quadspi_write32(const struct device *dev,
103 uint32_t value,
104 mm_reg_t offset)
105 {
106 const struct xlnx_quadspi_config *config = dev->config;
107
108 sys_write32(value, config->base + offset);
109 }
110
xlnx_quadspi_cs_control(const struct device * dev,bool on)111 static void xlnx_quadspi_cs_control(const struct device *dev, bool on)
112 {
113 const struct xlnx_quadspi_config *config = dev->config;
114 struct xlnx_quadspi_data *data = dev->data;
115 struct spi_context *ctx = &data->ctx;
116 uint32_t spissr = BIT_MASK(config->num_ss_bits);
117
118 if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) {
119 /* Skip slave select assert/de-assert in slave mode */
120 return;
121 }
122
123 if (on) {
124 /* SPISSR is one-hot, active-low */
125 spissr &= ~BIT(ctx->config->slave);
126 } else if (ctx->config->operation & SPI_HOLD_ON_CS) {
127 /* Skip slave select de-assert */
128 return;
129 }
130
131 xlnx_quadspi_write32(dev, spissr, SPISSR_OFFSET);
132 spi_context_cs_control(ctx, on);
133 }
134
xlnx_quadspi_configure(const struct device * dev,const struct spi_config * spi_cfg)135 static int xlnx_quadspi_configure(const struct device *dev,
136 const struct spi_config *spi_cfg)
137 {
138 const struct xlnx_quadspi_config *config = dev->config;
139 struct xlnx_quadspi_data *data = dev->data;
140 struct spi_context *ctx = &data->ctx;
141 uint32_t word_size;
142 uint32_t spicr;
143 uint32_t spisr;
144
145 if (spi_context_configured(ctx, spi_cfg)) {
146 /* Configuration already active, just enable SPI IOs */
147 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
148 spicr |= SPICR_SPE;
149 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
150 return 0;
151 }
152
153 if (spi_cfg->slave >= config->num_ss_bits) {
154 LOG_ERR("unsupported slave %d, num_ss_bits %d",
155 spi_cfg->slave, config->num_ss_bits);
156 return -ENOTSUP;
157 }
158
159 if (spi_cfg->operation & SPI_CS_ACTIVE_HIGH) {
160 LOG_ERR("unsupported CS polarity active high");
161 return -ENOTSUP;
162 }
163
164 if (!IS_ENABLED(CONFIG_SPI_SLAVE) && \
165 (spi_cfg->operation & SPI_OP_MODE_SLAVE)) {
166 LOG_ERR("slave mode support not enabled");
167 return -ENOTSUP;
168 }
169
170 word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
171 if (word_size != (config->num_xfer_bytes * 8)) {
172 LOG_ERR("unsupported word size %d bits, num_xfer_bytes %d",
173 word_size, config->num_xfer_bytes);
174 return -ENOTSUP;
175 }
176
177 /* Reset FIFOs, SPI IOs enabled */
178 spicr = SPICR_TX_FIFO_RESET | SPICR_RX_FIFO_RESET | SPICR_SPE;
179
180 /* Master mode, inhibit master transmit, manual slave select */
181 if (!IS_ENABLED(CONFIG_SPI_SLAVE) ||
182 (spi_cfg->operation & SPI_OP_MODE_SLAVE) == 0U) {
183 spicr |= SPICR_MASTER | SPICR_MASTER_XFER_INH | SPICR_MANUAL_SS;
184 }
185
186 if (spi_cfg->operation & SPI_MODE_CPOL) {
187 spicr |= SPICR_CPOL;
188 }
189
190 if (spi_cfg->operation & SPI_MODE_CPHA) {
191 spicr |= SPICR_CPHA;
192 }
193
194 if (spi_cfg->operation & SPI_MODE_LOOP) {
195 spicr |= SPICR_LOOP;
196 }
197
198 if (spi_cfg->operation & SPI_TRANSFER_LSB) {
199 spicr |= SPICR_LSB_FIRST;
200 }
201
202 /*
203 * Write configuration and verify it is compliant with the IP core
204 * configuration. Tri-state SPI IOs on error.
205 */
206 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
207 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
208 if (spisr & SPISR_ERROR_MASK) {
209 LOG_ERR("unsupported configuration, spisr = 0x%08x", spisr);
210 xlnx_quadspi_write32(dev, SPICR_MASTER_XFER_INH, SPICR_OFFSET);
211 ctx->config = NULL;
212 return -ENOTSUP;
213 }
214
215 ctx->config = spi_cfg;
216
217 if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
218 spi_context_cs_configure(ctx);
219 }
220
221 return 0;
222 }
223
xlnx_quadspi_start_tx(const struct device * dev)224 static void xlnx_quadspi_start_tx(const struct device *dev)
225 {
226 const struct xlnx_quadspi_config *config = dev->config;
227 struct xlnx_quadspi_data *data = dev->data;
228 struct spi_context *ctx = &data->ctx;
229 size_t xfer_len;
230 uint32_t spicr = 0U;
231 uint32_t spisr;
232 uint32_t dtr = 0U;
233
234 if (!spi_context_tx_on(ctx) && !spi_context_rx_on(ctx)) {
235 /* All done, de-assert slave select */
236 xlnx_quadspi_cs_control(dev, false);
237
238 if ((ctx->config->operation & SPI_HOLD_ON_CS) == 0U) {
239 /* Tri-state SPI IOs */
240 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
241 spicr &= ~(SPICR_SPE);
242 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
243 }
244
245 spi_context_complete(ctx, 0);
246 return;
247 }
248
249 if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
250 /* Inhibit master transaction while writing TX data */
251 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
252 spicr |= SPICR_MASTER_XFER_INH;
253 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
254 }
255
256 /* We can only see as far as the current rx buffer */
257 xfer_len = spi_context_longest_current_buf(ctx);
258
259 /* Write TX data */
260 while (xfer_len--) {
261 if (spi_context_tx_buf_on(ctx)) {
262 switch (config->num_xfer_bytes) {
263 case 1:
264 dtr = UNALIGNED_GET((uint8_t *)(ctx->tx_buf));
265 break;
266 case 2:
267 dtr = UNALIGNED_GET((uint16_t *)(ctx->tx_buf));
268 break;
269 case 4:
270 dtr = UNALIGNED_GET((uint32_t *)(ctx->tx_buf));
271 break;
272 default:
273 __ASSERT(0, "unsupported num_xfer_bytes");
274 }
275 } else {
276 /* No TX buffer. Use dummy TX data */
277 dtr = 0U;
278 }
279
280 xlnx_quadspi_write32(dev, dtr, SPI_DTR_OFFSET);
281 spi_context_update_tx(ctx, config->num_xfer_bytes, 1);
282
283 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
284 if (spisr & SPISR_TX_FULL) {
285 break;
286 }
287 }
288
289 spisr = xlnx_quadspi_read32(dev, SPISR_OFFSET);
290 if (spisr & SPISR_COMMAND_ERROR) {
291 /* Command not supported by memory type configured in IP core */
292 LOG_ERR("unsupported command");
293 xlnx_quadspi_cs_control(dev, false);
294
295 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
296 if ((ctx->config->operation & SPI_HOLD_ON_CS) == 0U) {
297 /* Tri-state SPI IOs */
298 spicr &= ~(SPICR_SPE);
299 }
300 xlnx_quadspi_write32(dev, spicr | SPICR_TX_FIFO_RESET,
301 SPICR_OFFSET);
302
303 spi_context_complete(ctx, -ENOTSUP);
304 }
305
306 if (!IS_ENABLED(CONFIG_SPI_SLAVE) || !spi_context_is_slave(ctx)) {
307 /* Uninhibit master transaction */
308 spicr &= ~(SPICR_MASTER_XFER_INH);
309 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
310 }
311 }
312
xlnx_quadspi_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool async,struct k_poll_signal * signal)313 static int xlnx_quadspi_transceive(const struct device *dev,
314 const struct spi_config *spi_cfg,
315 const struct spi_buf_set *tx_bufs,
316 const struct spi_buf_set *rx_bufs,
317 bool async, struct k_poll_signal *signal)
318 {
319 const struct xlnx_quadspi_config *config = dev->config;
320 struct xlnx_quadspi_data *data = dev->data;
321 struct spi_context *ctx = &data->ctx;
322 int ret;
323
324 spi_context_lock(ctx, async, signal, spi_cfg);
325
326 ret = xlnx_quadspi_configure(dev, spi_cfg);
327 if (ret) {
328 goto out;
329 }
330
331 spi_context_buffers_setup(ctx, tx_bufs, rx_bufs,
332 config->num_xfer_bytes);
333
334 xlnx_quadspi_cs_control(dev, true);
335
336 xlnx_quadspi_start_tx(dev);
337
338 ret = spi_context_wait_for_completion(ctx);
339 out:
340 spi_context_release(ctx, ret);
341
342 return ret;
343 }
344
xlnx_quadspi_transceive_blocking(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)345 static int xlnx_quadspi_transceive_blocking(const struct device *dev,
346 const struct spi_config *spi_cfg,
347 const struct spi_buf_set *tx_bufs,
348 const struct spi_buf_set *rx_bufs)
349 {
350 return xlnx_quadspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, false,
351 NULL);
352 }
353
354 #ifdef CONFIG_SPI_ASYNC
xlnx_quadspi_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,struct k_poll_signal * signal)355 static int xlnx_quadspi_transceive_async(const struct device *dev,
356 const struct spi_config *spi_cfg,
357 const struct spi_buf_set *tx_bufs,
358 const struct spi_buf_set *rx_bufs,
359 struct k_poll_signal *signal)
360 {
361 return xlnx_quadspi_transceive(dev, spi_cfg, tx_bufs, rx_bufs, true,
362 signal);
363 }
364 #endif /* CONFIG_SPI_ASYNC */
365
xlnx_quadspi_release(const struct device * dev,const struct spi_config * spi_cfg)366 static int xlnx_quadspi_release(const struct device *dev,
367 const struct spi_config *spi_cfg)
368 {
369 const struct xlnx_quadspi_config *config = dev->config;
370 struct xlnx_quadspi_data *data = dev->data;
371 uint32_t spicr;
372
373 /* Force slave select de-assert */
374 xlnx_quadspi_write32(dev, BIT_MASK(config->num_ss_bits), SPISSR_OFFSET);
375
376 /* Tri-state SPI IOs */
377 spicr = xlnx_quadspi_read32(dev, SPICR_OFFSET);
378 spicr &= ~(SPICR_SPE);
379 xlnx_quadspi_write32(dev, spicr, SPICR_OFFSET);
380
381 spi_context_unlock_unconditionally(&data->ctx);
382
383 return 0;
384 }
385
xlnx_quadspi_isr(const struct device * dev)386 static void xlnx_quadspi_isr(const struct device *dev)
387 {
388 const struct xlnx_quadspi_config *config = dev->config;
389 struct xlnx_quadspi_data *data = dev->data;
390 struct spi_context *ctx = &data->ctx;
391 uint32_t temp;
392 uint32_t drr;
393
394 /* Acknowledge interrupt */
395 temp = xlnx_quadspi_read32(dev, IPISR_OFFSET);
396 xlnx_quadspi_write32(dev, temp, IPISR_OFFSET);
397
398 if (temp & IPIXR_DTR_EMPTY) {
399 temp = xlnx_quadspi_read32(dev, SPISR_OFFSET);
400
401 /* Read RX data */
402 while (!(temp & SPISR_RX_EMPTY)) {
403 drr = xlnx_quadspi_read32(dev, SPI_DRR_OFFSET);
404
405 if (spi_context_rx_buf_on(ctx)) {
406 switch (config->num_xfer_bytes) {
407 case 1:
408 UNALIGNED_PUT(drr,
409 (uint8_t *)ctx->rx_buf);
410 break;
411 case 2:
412 UNALIGNED_PUT(drr,
413 (uint16_t *)ctx->rx_buf);
414 break;
415 case 4:
416 UNALIGNED_PUT(drr,
417 (uint32_t *)ctx->rx_buf);
418 break;
419 default:
420 __ASSERT(0,
421 "unsupported num_xfer_bytes");
422 }
423 }
424
425 spi_context_update_rx(ctx, config->num_xfer_bytes, 1);
426
427 temp = xlnx_quadspi_read32(dev, SPISR_OFFSET);
428 }
429
430 /* Start next TX */
431 xlnx_quadspi_start_tx(dev);
432 } else {
433 LOG_WRN("unhandled interrupt, ipisr = 0x%08x", temp);
434 }
435 }
436
xlnx_quadspi_init(const struct device * dev)437 static int xlnx_quadspi_init(const struct device *dev)
438 {
439 const struct xlnx_quadspi_config *config = dev->config;
440 struct xlnx_quadspi_data *data = dev->data;
441
442 /* Reset controller */
443 xlnx_quadspi_write32(dev, SRR_SOFTRESET_MAGIC, SRR_OFFSET);
444
445 config->irq_config_func(dev);
446
447 /* Enable DTR Empty interrupt */
448 xlnx_quadspi_write32(dev, IPIXR_DTR_EMPTY, IPIER_OFFSET);
449 xlnx_quadspi_write32(dev, DGIER_GIE, DGIER_OFFSET);
450
451 spi_context_unlock_unconditionally(&data->ctx);
452
453 return 0;
454 }
455
456 static const struct spi_driver_api xlnx_quadspi_driver_api = {
457 .transceive = xlnx_quadspi_transceive_blocking,
458 #ifdef CONFIG_SPI_ASYNC
459 .transceive_async = xlnx_quadspi_transceive_async,
460 #endif /* CONFIG_SPI_ASYNC */
461 .release = xlnx_quadspi_release,
462 };
463
464 #define XLNX_QUADSPI_INIT(n) \
465 static void xlnx_quadspi_config_func_##n(const struct device *dev); \
466 \
467 static const struct xlnx_quadspi_config xlnx_quadspi_config_##n = { \
468 .base = DT_INST_REG_ADDR(n), \
469 .irq_config_func = xlnx_quadspi_config_func_##n, \
470 .num_ss_bits = DT_INST_PROP(n, xlnx_num_ss_bits), \
471 .num_xfer_bytes = \
472 DT_INST_PROP(n, xlnx_num_transfer_bits) / 8, \
473 }; \
474 \
475 static struct xlnx_quadspi_data xlnx_quadspi_data_##n = { \
476 SPI_CONTEXT_INIT_LOCK(xlnx_quadspi_data_##n, ctx), \
477 SPI_CONTEXT_INIT_SYNC(xlnx_quadspi_data_##n, ctx), \
478 }; \
479 \
480 DEVICE_DT_INST_DEFINE(n, &xlnx_quadspi_init, \
481 NULL, \
482 &xlnx_quadspi_data_##n, \
483 &xlnx_quadspi_config_##n, POST_KERNEL, \
484 CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
485 &xlnx_quadspi_driver_api); \
486 \
487 static void xlnx_quadspi_config_func_##n(const struct device *dev) \
488 { \
489 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
490 xlnx_quadspi_isr, \
491 DEVICE_DT_INST_GET(n), 0); \
492 irq_enable(DT_INST_IRQN(n)); \
493 }
494
495 DT_INST_FOREACH_STATUS_OKAY(XLNX_QUADSPI_INIT)
496