1 /*
2 * Copyright (c) 2021 Microchip Technology Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT microchip_xec_qmspi_ldma
8
9 #include <errno.h>
10 #include <soc.h>
11
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h>
15 #include <zephyr/drivers/gpio.h>
16 #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #include <zephyr/drivers/spi.h>
19 #include <zephyr/dt-bindings/clock/mchp_xec_pcr.h>
20 #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h>
21 #include <zephyr/irq.h>
22 #include <zephyr/pm/device.h>
23 #include <zephyr/sys/sys_io.h>
24 #include <zephyr/sys/util.h>
25 #include <zephyr/logging/log.h>
26 LOG_MODULE_REGISTER(spi_xec, CONFIG_SPI_LOG_LEVEL);
27
28 #include "spi_context.h"
29
30 /* #define MCHP_XEC_QMSPI_DEBUG 1 */
31
32 /* MEC172x QMSPI controller SPI Mode 3 signalling has an anomaly where
33 * received data is shifted off the input line(s) improperly. Received
34 * data bytes will be left shifted by 1. Work-around for SPI Mode 3 is
35 * to sample input line(s) on same edge as output data is ready.
36 */
37 #define XEC_QMSPI_SPI_MODE_3_ANOMALY 1
38
39 /* common clock control device node for all Microchip XEC chips */
40 #define MCHP_XEC_CLOCK_CONTROL_NODE DT_NODELABEL(pcr)
41
42 /* spin loops waiting for HW to clear soft reset bit */
43 #define XEC_QMSPI_SRST_LOOPS 16
44
45 /* microseconds for busy wait and total wait interval */
46 #define XEC_QMSPI_WAIT_INTERVAL 8
47 #define XEC_QMSPI_WAIT_COUNT 64
48
49 /* QSPI transfer and DMA done */
50 #define XEC_QSPI_HW_XFR_DMA_DONE (MCHP_QMSPI_STS_DONE | MCHP_QMSPI_STS_DMA_DONE)
51
52 /* QSPI hardware error status
53 * Misprogrammed control or descriptors (software error)
54 * Overflow TX FIFO
55 * Underflow RX FIFO
56 */
57 #define XEC_QSPI_HW_ERRORS (MCHP_QMSPI_STS_PROG_ERR | \
58 MCHP_QMSPI_STS_TXB_ERR | \
59 MCHP_QMSPI_STS_RXB_ERR)
60
61 #define XEC_QSPI_HW_ERRORS_LDMA (MCHP_QMSPI_STS_LDMA_RX_ERR | \
62 MCHP_QMSPI_STS_LDMA_TX_ERR)
63
64 #define XEC_QSPI_HW_ERRORS_ALL (XEC_QSPI_HW_ERRORS | \
65 XEC_QSPI_HW_ERRORS_LDMA)
66
67 #define XEC_QSPI_TIMEOUT_US (100 * 1000) /* 100 ms */
68
69 /* Device constant configuration parameters */
70 struct spi_qmspi_config {
71 struct qmspi_regs *regs;
72 const struct device *clk_dev;
73 struct mchp_xec_pcr_clk_ctrl clksrc;
74 uint32_t clock_freq;
75 uint32_t cs1_freq;
76 uint32_t cs_timing;
77 uint16_t taps_adj;
78 uint8_t girq;
79 uint8_t girq_pos;
80 uint8_t girq_nvic_aggr;
81 uint8_t girq_nvic_direct;
82 uint8_t irq_pri;
83 uint8_t chip_sel;
84 uint8_t width; /* 0(half) 1(single), 2(dual), 4(quad) */
85 uint8_t unused[1];
86 const struct pinctrl_dev_config *pcfg;
87 void (*irq_config_func)(void);
88 };
89
90 #define XEC_QMSPI_XFR_FLAG_TX BIT(0)
91 #define XEC_QMSPI_XFR_FLAG_RX BIT(1)
92
93 /* Device run time data */
94 struct spi_qmspi_data {
95 struct spi_context ctx;
96 uint32_t base_freq_hz;
97 uint32_t spi_freq_hz;
98 uint32_t qstatus;
99 uint8_t np; /* number of data pins: 1, 2, or 4 */
100 #ifdef CONFIG_SPI_ASYNC
101 spi_callback_t cb;
102 void *userdata;
103 size_t xfr_len;
104 #endif
105 uint32_t tempbuf[2];
106 #ifdef MCHP_XEC_QMSPI_DEBUG
107 uint32_t bufcnt_status;
108 uint32_t rx_ldma_ctrl0;
109 uint32_t tx_ldma_ctrl0;
110 uint32_t qunits;
111 uint32_t qxfru;
112 uint32_t xfrlen;
113
114 #endif
115 };
116
xec_qmspi_spin_yield(int * counter,int max_count)117 static int xec_qmspi_spin_yield(int *counter, int max_count)
118 {
119 *counter = *counter + 1;
120
121 if (*counter > max_count) {
122 return -ETIMEDOUT;
123 }
124
125 k_busy_wait(XEC_QMSPI_WAIT_INTERVAL);
126
127 return 0;
128 }
129
130 /*
131 * reset QMSPI controller with save/restore of timing registers.
132 * Some QMSPI timing register may be modified by the Boot-ROM OTP
133 * values.
134 */
qmspi_reset(struct qmspi_regs * regs)135 static void qmspi_reset(struct qmspi_regs *regs)
136 {
137 uint32_t taps[3];
138 uint32_t malt1;
139 uint32_t cstm;
140 uint32_t mode;
141 uint32_t cnt = XEC_QMSPI_SRST_LOOPS;
142
143 taps[0] = regs->TM_TAPS;
144 taps[1] = regs->TM_TAPS_ADJ;
145 taps[2] = regs->TM_TAPS_CTRL;
146 malt1 = regs->MODE_ALT1;
147 cstm = regs->CSTM;
148 mode = regs->MODE;
149 regs->MODE = MCHP_QMSPI_M_SRST;
150 while (regs->MODE & MCHP_QMSPI_M_SRST) {
151 if (cnt == 0) {
152 break;
153 }
154 cnt--;
155 }
156 regs->MODE = 0;
157 regs->MODE = mode & ~MCHP_QMSPI_M_ACTIVATE;
158 regs->CSTM = cstm;
159 regs->MODE_ALT1 = malt1;
160 regs->TM_TAPS = taps[0];
161 regs->TM_TAPS_ADJ = taps[1];
162 regs->TM_TAPS_CTRL = taps[2];
163 }
164
qmspi_encoded_fdiv(const struct device * dev,uint32_t freq_hz)165 static uint32_t qmspi_encoded_fdiv(const struct device *dev, uint32_t freq_hz)
166 {
167 struct spi_qmspi_data *qdata = dev->data;
168
169 if (freq_hz == 0u) {
170 return 0u; /* maximum frequency divider */
171 }
172
173 return (qdata->base_freq_hz / freq_hz);
174 }
175
176 /* Program QMSPI frequency divider field in the mode register.
177 * MEC172x QMSPI input clock source is the Fast Peripheral domain whose
178 * clock is controlled by the PCR turbo clock. 96 MHz if turbo mode
179 * enabled else 48 MHz. Query the clock control driver to get clock
180 * rate of fast peripheral domain. MEC172x QMSPI clock divider has
181 * been expanded to a 16-bit field encoded as:
182 * 0 = divide by 0x10000
183 * 1 to 0xffff = divide by this value.
184 */
qmspi_set_frequency(struct spi_qmspi_data * qdata,struct qmspi_regs * regs,uint32_t freq_hz)185 static int qmspi_set_frequency(struct spi_qmspi_data *qdata, struct qmspi_regs *regs,
186 uint32_t freq_hz)
187 {
188 uint32_t clk = MCHP_QMSPI_INPUT_CLOCK_FREQ_HZ;
189 uint32_t fdiv = 0u; /* maximum divider */
190
191 if (qdata->base_freq_hz) {
192 clk = qdata->base_freq_hz;
193 }
194
195 if (freq_hz) {
196 fdiv = 1u;
197 if (freq_hz < clk) {
198 fdiv = clk / freq_hz;
199 }
200 }
201
202 regs->MODE = ((regs->MODE & ~(MCHP_QMSPI_M_FDIV_MASK)) |
203 ((fdiv << MCHP_QMSPI_M_FDIV_POS) & MCHP_QMSPI_M_FDIV_MASK));
204
205 if (!fdiv) {
206 fdiv = 0x10000u;
207 }
208
209 qdata->spi_freq_hz = clk / fdiv;
210
211 return 0;
212 }
213
214 /*
215 * SPI signalling mode: CPOL and CPHA
216 * CPOL = 0 is clock idles low, 1 is clock idle high
217 * CPHA = 0 Transmitter changes data on trailing of preceding clock cycle.
218 * Receiver samples data on leading edge of clock cyle.
219 * 1 Transmitter changes data on leading edge of current clock cycle.
220 * Receiver samples data on the trailing edge of clock cycle.
221 * SPI Mode nomenclature:
222 * Mode CPOL CPHA
223 * 0 0 0
224 * 1 0 1
225 * 2 1 0
226 * 3 1 1
227 * QMSPI has three controls, CPOL, CPHA for output and CPHA for input.
228 * SPI frequency < 48MHz
229 * Mode 0: CPOL=0 CHPA=0 (CHPA_MISO=0 and CHPA_MOSI=0)
230 * Mode 3: CPOL=1 CHPA=1 (CHPA_MISO=1 and CHPA_MOSI=1)
231 * Data sheet recommends when QMSPI set at max. SPI frequency (48MHz).
232 * SPI frequency == 48MHz sample and change data on same edge.
233 * Mode 0: CPOL=0 CHPA=0 (CHPA_MISO=1 and CHPA_MOSI=0)
234 * Mode 3: CPOL=1 CHPA=1 (CHPA_MISO=0 and CHPA_MOSI=1)
235 *
236 * There is an anomaly in MEC172x for SPI signalling mode 3. We must
237 * set CHPA_MISO=0 for SPI Mode 3 at all frequencies.
238 */
239
240 const uint8_t smode_tbl[4] = {
241 0x00u, 0x06u, 0x01u,
242 #ifdef XEC_QMSPI_SPI_MODE_3_ANOMALY
243 0x03u, /* CPOL=1, CPHA_MOSI=1, CPHA_MISO=0 */
244 #else
245 0x07u, /* CPOL=1, CPHA_MOSI=1, CPHA_MISO=1 */
246 #endif
247 };
248
249 const uint8_t smode48_tbl[4] = {
250 0x04u, 0x02u, 0x05u, 0x03u
251 };
252
qmspi_set_signalling_mode(struct spi_qmspi_data * qdata,struct qmspi_regs * regs,uint32_t smode)253 static void qmspi_set_signalling_mode(struct spi_qmspi_data *qdata,
254 struct qmspi_regs *regs, uint32_t smode)
255 {
256 const uint8_t *ptbl;
257 uint32_t m;
258
259 ptbl = smode_tbl;
260 if (qdata->spi_freq_hz >= MHZ(48)) {
261 ptbl = smode48_tbl;
262 }
263
264 m = (uint32_t)ptbl[smode & 0x03];
265 regs->MODE = (regs->MODE & ~(MCHP_QMSPI_M_SIG_MASK))
266 | (m << MCHP_QMSPI_M_SIG_POS);
267 }
268
269 #ifdef CONFIG_SPI_EXTENDED_MODES
270 /*
271 * QMSPI HW support single, dual, and quad.
272 * Return QMSPI Control/Descriptor register encoded value.
273 */
encode_lines(const struct spi_config * config)274 static uint32_t encode_lines(const struct spi_config *config)
275 {
276 uint32_t qlines;
277
278 switch (config->operation & SPI_LINES_MASK) {
279 case SPI_LINES_SINGLE:
280 qlines = MCHP_QMSPI_C_IFM_1X;
281 break;
282 #if DT_INST_PROP(0, lines) > 1
283 case SPI_LINES_DUAL:
284 qlines = MCHP_QMSPI_C_IFM_2X;
285 break;
286 #endif
287 #if DT_INST_PROP(0, lines) > 2
288 case SPI_LINES_QUAD:
289 qlines = MCHP_QMSPI_C_IFM_4X;
290 break;
291 #endif
292 default:
293 qlines = 0xffu;
294 }
295
296 return qlines;
297 }
298
npins_from_spi_config(const struct spi_config * config)299 static uint8_t npins_from_spi_config(const struct spi_config *config)
300 {
301 switch (config->operation & SPI_LINES_MASK) {
302 case SPI_LINES_DUAL:
303 return 2u;
304 case SPI_LINES_QUAD:
305 return 4u;
306 default:
307 return 1u;
308 }
309 }
310 #endif /* CONFIG_SPI_EXTENDED_MODES */
311
spi_feature_support(const struct spi_config * config)312 static int spi_feature_support(const struct spi_config *config)
313 {
314 if (config->operation & (SPI_TRANSFER_LSB | SPI_OP_MODE_SLAVE | SPI_MODE_LOOP)) {
315 LOG_ERR("Driver does not support LSB first, slave, or loop back");
316 return -ENOTSUP;
317 }
318
319 if (config->operation & SPI_CS_ACTIVE_HIGH) {
320 LOG_ERR("CS active high not supported");
321 return -ENOTSUP;
322 }
323
324 if (config->operation & SPI_LOCK_ON) {
325 LOG_ERR("Lock On not supported");
326 return -ENOTSUP;
327 }
328
329 if (SPI_WORD_SIZE_GET(config->operation) != 8) {
330 LOG_ERR("Word size != 8 not supported");
331 return -ENOTSUP;
332 }
333
334 return 0;
335 }
336
337 /* Configure QMSPI.
338 * NOTE: QMSPI Shared SPI port has two chip selects.
339 * Private SPI and internal SPI ports support one chip select.
340 * Hardware supports dual and quad I/O. Dual and quad are allowed
341 * if SPI extended mode is enabled at build time. User must
342 * provide pin configuration via DTS.
343 */
qmspi_configure(const struct device * dev,const struct spi_config * config)344 static int qmspi_configure(const struct device *dev,
345 const struct spi_config *config)
346 {
347 const struct spi_qmspi_config *cfg = dev->config;
348 struct spi_qmspi_data *qdata = dev->data;
349 const struct spi_config *curr_cfg = qdata->ctx.config;
350 struct qmspi_regs *regs = cfg->regs;
351 uint32_t smode;
352 int ret;
353
354 if (!config) {
355 return -EINVAL;
356 }
357
358 if (curr_cfg->frequency != config->frequency) {
359 qmspi_set_frequency(qdata, regs, config->frequency);
360 }
361
362 if (curr_cfg->operation == config->operation) {
363 return 0; /* no change required */
364 }
365
366 /* check new configuration */
367 ret = spi_feature_support(config);
368 if (ret) {
369 return ret;
370 }
371
372 #ifdef CONFIG_SPI_EXTENDED_MODES
373 smode = encode_lines(config);
374 if (smode == 0xff) {
375 LOG_ERR("Requested lines mode not supported");
376 return -ENOTSUP;
377 }
378 qdata->np = npins_from_spi_config(config);
379 #else
380 smode = MCHP_QMSPI_C_IFM_1X;
381 qdata->np = 1u;
382 #endif
383 regs->CTRL = smode;
384
385 smode = 0;
386 if ((config->operation & SPI_MODE_CPHA) != 0U) {
387 smode |= BIT(0);
388 }
389
390 if ((config->operation & SPI_MODE_CPOL) != 0U) {
391 smode |= BIT(1);
392 }
393
394 qmspi_set_signalling_mode(qdata, regs, smode);
395
396 /* chip select */
397 smode = regs->MODE & ~(MCHP_QMSPI_M_CS_MASK);
398 if (cfg->chip_sel == 0) {
399 smode |= MCHP_QMSPI_M_CS0;
400 } else {
401 smode |= MCHP_QMSPI_M_CS1;
402 }
403 regs->MODE = smode;
404
405 /* chip select timing and TAPS adjust */
406 regs->CSTM = cfg->cs_timing;
407 regs->TM_TAPS_ADJ = cfg->taps_adj;
408
409 /* CS1 alternate mode (frequency) */
410 regs->MODE_ALT1 = 0;
411 if (cfg->cs1_freq) {
412 uint32_t fdiv = qmspi_encoded_fdiv(dev, cfg->cs1_freq);
413
414 regs->MODE_ALT1 = (fdiv << MCHP_QMSPI_MA1_CS1_CDIV_POS) &
415 MCHP_QMSPI_MA1_CS1_CDIV_MSK;
416 regs->MODE_ALT1 |= MCHP_QMSPI_MA1_CS1_CDIV_EN;
417 }
418
419 qdata->ctx.config = config;
420
421 regs->MODE |= MCHP_QMSPI_M_ACTIVATE;
422
423 return 0;
424 }
425
encode_npins(uint8_t npins)426 static uint32_t encode_npins(uint8_t npins)
427 {
428 if (npins == 4) {
429 return MCHP_QMSPI_C_IFM_4X;
430 } else if (npins == 2) {
431 return MCHP_QMSPI_C_IFM_2X;
432 } else {
433 return MCHP_QMSPI_C_IFM_1X;
434 }
435 }
436
437 /* Common controller transfer initialziation using Local-DMA.
438 * Full-duplex: controller configured to transmit and receive simultaneouly.
439 * Half-duplex(dual/quad): User may only specify TX or RX buffer sets.
440 * Passing both buffers sets is reported as an error.
441 */
qmspi_xfr_cm_init(const struct device * dev,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)442 static inline int qmspi_xfr_cm_init(const struct device *dev,
443 const struct spi_buf_set *tx_bufs,
444 const struct spi_buf_set *rx_bufs)
445 {
446 const struct spi_qmspi_config *devcfg = dev->config;
447 struct spi_qmspi_data *qdata = dev->data;
448 struct qmspi_regs *regs = devcfg->regs;
449
450 regs->IEN = 0;
451 regs->EXE = MCHP_QMSPI_EXE_CLR_FIFOS;
452 regs->LDMA_RX_DESCR_BM = 0;
453 regs->LDMA_TX_DESCR_BM = 0;
454 regs->MODE &= ~(MCHP_QMSPI_M_LDMA_TX_EN | MCHP_QMSPI_M_LDMA_RX_EN);
455 regs->STS = 0xffffffffu;
456 regs->CTRL = encode_npins(qdata->np);
457
458 qdata->qstatus = 0;
459
460 #ifdef CONFIG_SPI_EXTENDED_MODES
461 if (qdata->np != 1) {
462 if (tx_bufs && rx_bufs) {
463 LOG_ERR("Cannot specify both TX and RX buffers in half-duplex(dual/quad)");
464 return -EPROTONOSUPPORT;
465 }
466 }
467 #endif
468
469 return 0;
470 }
471
472 /* QMSPI Local-DMA transfer configuration:
473 * Support full and half(dual/quad) duplex transfers.
474 * Requires caller to have checked that only one direction was setup
475 * in the SPI context: TX or RX not both. (refer to qmspi_xfr_cm_init)
476 * Supports spi_buf's where data pointer is NULL and length non-zero.
477 * These buffers are used as TX tri-state I/O clock only generation or
478 * RX data discard for certain SPI command protocols using dual/quad I/O.
479 * 1. Get largest contiguous data size from SPI context.
480 * 2. If the SPI TX context has a non-zero length configure Local-DMA TX
481 * channel 1 for contigous data size. If TX context has valid buffer
482 * configure channel to use context buffer with address increment.
483 * If the TX buffer pointer is NULL interpret byte length as the number
484 * of clocks to generate with output line(s) tri-stated. NOTE: The controller
485 * must be configured with TX disabled to not drive output line(s) during
486 * clock generation. Also, no data should be written to TX FIFO. The unit
487 * size can be set to bits. The number of units to transfer must be computed
488 * based upon the number of output pins in the IOM field: full-duplex is one
489 * bit per clock, dual is 2 bits per clock, and quad is 4 bits per clock.
490 * For example, if I/O lines is 4 (quad) meaning 4 bits per clock and the
491 * user wants 7 clocks then the number of bit units is 4 * 7 = 28.
492 * 3. If instead, the SPI RX context has a non-zero length configure Local-DMA
493 * RX channel 1 for the contigous data size. If RX context has a valid
494 * buffer configure channel to use buffer with address increment else
495 * configure channel for driver data temporary buffer without address
496 * increment.
497 * 4. Update QMSPI Control register.
498 */
qmspi_ldma_encode_unit_size(uint32_t maddr,size_t len)499 static uint32_t qmspi_ldma_encode_unit_size(uint32_t maddr, size_t len)
500 {
501 uint8_t temp = (maddr | (uint32_t)len) & 0x3u;
502
503 if (temp == 0) {
504 return MCHP_QMSPI_LDC_ASZ_4;
505 } else if (temp == 2) {
506 return MCHP_QMSPI_LDC_ASZ_2;
507 } else {
508 return MCHP_QMSPI_LDC_ASZ_1;
509 }
510 }
511
qmspi_unit_size(size_t xfrlen)512 static uint32_t qmspi_unit_size(size_t xfrlen)
513 {
514 if ((xfrlen & 0xfu) == 0u) {
515 return 16u;
516 } else if ((xfrlen & 0x3u) == 0u) {
517 return 4u;
518 } else {
519 return 1u;
520 }
521 }
522
qmspi_encode_unit_size(uint32_t units_in_bytes)523 static uint32_t qmspi_encode_unit_size(uint32_t units_in_bytes)
524 {
525 if (units_in_bytes == 16u) {
526 return MCHP_QMSPI_C_XFR_UNITS_16;
527 } else if (units_in_bytes == 4u) {
528 return MCHP_QMSPI_C_XFR_UNITS_4;
529 } else {
530 return MCHP_QMSPI_C_XFR_UNITS_1;
531 }
532 }
533
q_ldma_cfg(const struct device * dev)534 static size_t q_ldma_cfg(const struct device *dev)
535 {
536 const struct spi_qmspi_config *devcfg = dev->config;
537 struct spi_qmspi_data *qdata = dev->data;
538 struct spi_context *ctx = &qdata->ctx;
539 struct qmspi_regs *regs = devcfg->regs;
540
541 size_t ctx_xfr_len = spi_context_max_continuous_chunk(ctx);
542 uint32_t ctrl, ldctrl, mstart, qunits, qxfru, xfrlen;
543
544 regs->EXE = MCHP_QMSPI_EXE_CLR_FIFOS;
545 regs->MODE &= ~(MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN);
546 regs->LDRX[0].CTRL = 0;
547 regs->LDRX[0].MSTART = 0;
548 regs->LDRX[0].LEN = 0;
549 regs->LDTX[0].CTRL = 0;
550 regs->LDTX[0].MSTART = 0;
551 regs->LDTX[0].LEN = 0;
552
553 if (ctx_xfr_len == 0) {
554 return 0;
555 }
556
557 qunits = qmspi_unit_size(ctx_xfr_len);
558 ctrl = qmspi_encode_unit_size(qunits);
559 qxfru = ctx_xfr_len / qunits;
560 if (qxfru > 0x7fffu) {
561 qxfru = 0x7fffu;
562 }
563 ctrl |= (qxfru << MCHP_QMSPI_C_XFR_NUNITS_POS);
564 xfrlen = qxfru * qunits;
565
566 #ifdef MCHP_XEC_QMSPI_DEBUG
567 qdata->qunits = qunits;
568 qdata->qxfru = qxfru;
569 qdata->xfrlen = xfrlen;
570 #endif
571 if (spi_context_tx_buf_on(ctx)) {
572 mstart = (uint32_t)ctx->tx_buf;
573 ctrl |= MCHP_QMSPI_C_TX_DATA | MCHP_QMSPI_C_TX_LDMA_CH0;
574 ldctrl = qmspi_ldma_encode_unit_size(mstart, xfrlen);
575 ldctrl |= MCHP_QMSPI_LDC_INCR_EN | MCHP_QMSPI_LDC_EN;
576 regs->MODE |= MCHP_QMSPI_M_LDMA_TX_EN;
577 regs->LDTX[0].LEN = xfrlen;
578 regs->LDTX[0].MSTART = mstart;
579 regs->LDTX[0].CTRL = ldctrl;
580 }
581
582 if (spi_context_rx_buf_on(ctx)) {
583 mstart = (uint32_t)ctx->rx_buf;
584 ctrl |= MCHP_QMSPI_C_RX_LDMA_CH0 | MCHP_QMSPI_C_RX_EN;
585 ldctrl = MCHP_QMSPI_LDC_EN | MCHP_QMSPI_LDC_INCR_EN;
586 ldctrl |= qmspi_ldma_encode_unit_size(mstart, xfrlen);
587 regs->MODE |= MCHP_QMSPI_M_LDMA_RX_EN;
588 regs->LDRX[0].LEN = xfrlen;
589 regs->LDRX[0].MSTART = mstart;
590 regs->LDRX[0].CTRL = ldctrl;
591 }
592
593 regs->CTRL = (regs->CTRL & 0x3u) | ctrl;
594
595 return xfrlen;
596 }
597
598 /* Start and wait for QMSPI synchronous transfer(s) to complete.
599 * Initialize QMSPI controller for Local-DMA operation.
600 * Iterate over SPI context with non-zero TX or RX data lengths.
601 * 1. Configure QMSPI Control register and Local-DMA channel(s)
602 * 2. Clear QMSPI status
603 * 3. Start QMSPI transfer
604 * 4. Poll QMSPI status for transfer done and DMA done with timeout.
605 * 5. Hardware anomaly work-around: Poll with timeout QMSPI Local-DMA
606 * TX and RX channels until hardware clears both channel enables.
607 * This indicates hardware is really done with transfer to/from memory.
608 * 6. Update SPI context with amount of data transmitted and received.
609 * If SPI configuration hold chip select on flag is not set then instruct
610 * QMSPI to de-assert chip select.
611 * Set SPI context as complete
612 */
qmspi_xfr_sync(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)613 static int qmspi_xfr_sync(const struct device *dev,
614 const struct spi_config *spi_cfg,
615 const struct spi_buf_set *tx_bufs,
616 const struct spi_buf_set *rx_bufs)
617 {
618 const struct spi_qmspi_config *devcfg = dev->config;
619 struct spi_qmspi_data *qdata = dev->data;
620 struct spi_context *ctx = &qdata->ctx;
621 struct qmspi_regs *regs = devcfg->regs;
622 size_t xfr_len;
623
624 int ret = qmspi_xfr_cm_init(dev, tx_bufs, rx_bufs);
625
626 if (ret) {
627 return ret;
628 }
629
630 while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)) {
631 xfr_len = q_ldma_cfg(dev);
632 regs->STS = 0xffffffffu;
633 regs->EXE = MCHP_QMSPI_EXE_START;
634
635 #ifdef MCHP_XEC_QMSPI_DEBUG
636 uint32_t temp = regs->STS;
637
638 while (!(temp & MCHP_QMSPI_STS_DONE)) {
639 temp = regs->STS;
640 }
641 qdata->qstatus = temp;
642 qdata->bufcnt_status = regs->BCNT_STS;
643 qdata->rx_ldma_ctrl0 = regs->LDRX[0].CTRL;
644 qdata->tx_ldma_ctrl0 = regs->LDTX[0].CTRL;
645 #else
646 uint32_t wcnt = 0;
647
648 qdata->qstatus = regs->STS;
649 while (!(qdata->qstatus & MCHP_QMSPI_STS_DONE)) {
650 k_busy_wait(1u);
651 if (++wcnt > XEC_QSPI_TIMEOUT_US) {
652 regs->EXE = MCHP_QMSPI_EXE_STOP;
653 return -ETIMEDOUT;
654 }
655 qdata->qstatus = regs->STS;
656 }
657 #endif
658 spi_context_update_tx(ctx, 1, xfr_len);
659 spi_context_update_rx(ctx, 1, xfr_len);
660 }
661
662 if (!(spi_cfg->operation & SPI_HOLD_ON_CS)) {
663 regs->EXE = MCHP_QMSPI_EXE_STOP;
664 }
665
666 spi_context_complete(ctx, dev, 0);
667
668 return 0;
669 }
670
671 #ifdef CONFIG_SPI_ASYNC
672 /* Configure QMSPI such that QMSPI transfer FSM and LDMA FSM are synchronized.
673 * Transfer length must be programmed into control/descriptor register(s) and
674 * LDMA register(s). LDMA override length bit must NOT be set.
675 */
qmspi_xfr_start_async(const struct device * dev,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)676 static int qmspi_xfr_start_async(const struct device *dev, const struct spi_buf_set *tx_bufs,
677 const struct spi_buf_set *rx_bufs)
678 {
679 const struct spi_qmspi_config *devcfg = dev->config;
680 struct spi_qmspi_data *qdata = dev->data;
681 struct qmspi_regs *regs = devcfg->regs;
682 int ret;
683
684 ret = qmspi_xfr_cm_init(dev, tx_bufs, rx_bufs);
685 if (ret) {
686 return ret;
687 }
688
689 qdata->xfr_len = q_ldma_cfg(dev);
690 if (!qdata->xfr_len) {
691 return 0; /* nothing to do */
692 }
693
694 regs->STS = 0xffffffffu;
695 regs->EXE = MCHP_QMSPI_EXE_START;
696 regs->IEN = MCHP_QMSPI_IEN_XFR_DONE | MCHP_QMSPI_IEN_PROG_ERR
697 | MCHP_QMSPI_IEN_LDMA_RX_ERR | MCHP_QMSPI_IEN_LDMA_TX_ERR;
698
699 return 0;
700 }
701
702 /* Wrapper to start asynchronous (interrupts enabled) SPI transction */
qmspi_xfr_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)703 static int qmspi_xfr_async(const struct device *dev,
704 const struct spi_config *config,
705 const struct spi_buf_set *tx_bufs,
706 const struct spi_buf_set *rx_bufs)
707 {
708 struct spi_qmspi_data *qdata = dev->data;
709 int err = 0;
710
711 qdata->qstatus = 0;
712 qdata->xfr_len = 0;
713
714 err = qmspi_xfr_start_async(dev, tx_bufs, rx_bufs);
715
716 return err;
717 }
718 #endif /* CONFIG_SPI_ASYNC */
719
720 /* Start (a)synchronous transaction using QMSPI Local-DMA */
qmspi_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * user_data)721 static int qmspi_transceive(const struct device *dev,
722 const struct spi_config *config,
723 const struct spi_buf_set *tx_bufs,
724 const struct spi_buf_set *rx_bufs,
725 bool asynchronous,
726 spi_callback_t cb,
727 void *user_data)
728 {
729 struct spi_qmspi_data *qdata = dev->data;
730 struct spi_context *ctx = &qdata->ctx;
731 int err = 0;
732
733 if (!config) {
734 return -EINVAL;
735 }
736
737 if (!tx_bufs && !rx_bufs) {
738 return 0;
739 }
740
741 spi_context_lock(&qdata->ctx, asynchronous, cb, user_data, config);
742
743 err = qmspi_configure(dev, config);
744 if (err != 0) {
745 spi_context_release(ctx, err);
746 return err;
747 }
748
749 spi_context_cs_control(ctx, true);
750 spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
751
752 #ifdef CONFIG_SPI_ASYNC
753 if (asynchronous) {
754 qdata->cb = cb;
755 qdata->userdata = user_data;
756 err = qmspi_xfr_async(dev, config, tx_bufs, rx_bufs);
757 } else {
758 err = qmspi_xfr_sync(dev, config, tx_bufs, rx_bufs);
759 }
760 #else
761 err = qmspi_xfr_sync(dev, config, tx_bufs, rx_bufs);
762 #endif
763 if (err) { /* de-assert CS# and give semaphore */
764 spi_context_unlock_unconditionally(ctx);
765 return err;
766 }
767
768 if (asynchronous) {
769 return err;
770 }
771
772 err = spi_context_wait_for_completion(ctx);
773 if (!(config->operation & SPI_HOLD_ON_CS)) {
774 spi_context_cs_control(ctx, false);
775 }
776 spi_context_release(ctx, err);
777
778 return err;
779 }
780
qmspi_transceive_sync(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)781 static int qmspi_transceive_sync(const struct device *dev,
782 const struct spi_config *config,
783 const struct spi_buf_set *tx_bufs,
784 const struct spi_buf_set *rx_bufs)
785 {
786 return qmspi_transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
787 }
788
789 #ifdef CONFIG_SPI_ASYNC
790
qmspi_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)791 static int qmspi_transceive_async(const struct device *dev,
792 const struct spi_config *config,
793 const struct spi_buf_set *tx_bufs,
794 const struct spi_buf_set *rx_bufs,
795 spi_callback_t cb,
796 void *userdata)
797 {
798 return qmspi_transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
799 }
800 #endif /* CONFIG_SPI_ASYNC */
801
qmspi_release(const struct device * dev,const struct spi_config * config)802 static int qmspi_release(const struct device *dev,
803 const struct spi_config *config)
804 {
805 struct spi_qmspi_data *data = dev->data;
806 const struct spi_qmspi_config *cfg = dev->config;
807 struct qmspi_regs *regs = cfg->regs;
808 int ret = 0;
809 int counter = 0;
810
811 if (regs->STS & MCHP_QMSPI_STS_ACTIVE_RO) {
812 /* Force CS# to de-assert on next unit boundary */
813 regs->EXE = MCHP_QMSPI_EXE_STOP;
814 while (regs->STS & MCHP_QMSPI_STS_ACTIVE_RO) {
815 ret = xec_qmspi_spin_yield(&counter, XEC_QMSPI_WAIT_COUNT);
816 if (ret != 0) {
817 break;
818 }
819 }
820 }
821
822 spi_context_unlock_unconditionally(&data->ctx);
823
824 return ret;
825 }
826
827 /* QMSPI interrupt handler called by Zephyr ISR
828 * All transfers use QMSPI Local-DMA specified by the Control register.
829 * QMSPI descriptor mode not used.
830 * Full-duplex always uses LDMA TX channel 0 and RX channel 0
831 * Half-duplex(dual/quad) use one of TX channel 0 or RX channel 0
832 */
qmspi_xec_isr(const struct device * dev)833 void qmspi_xec_isr(const struct device *dev)
834 {
835 const struct spi_qmspi_config *cfg = dev->config;
836 struct spi_qmspi_data *data = dev->data;
837 struct qmspi_regs *regs = cfg->regs;
838 uint32_t qstatus = regs->STS;
839 #ifdef CONFIG_SPI_ASYNC
840 struct spi_context *ctx = &data->ctx;
841 int xstatus = 0;
842 #endif
843
844 regs->IEN = 0;
845 data->qstatus = qstatus;
846 regs->STS = MCHP_QMSPI_STS_RW1C_MASK;
847 mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos);
848
849 #ifdef CONFIG_SPI_ASYNC
850 if (qstatus & XEC_QSPI_HW_ERRORS_ALL) {
851 xstatus = -EIO;
852 data->qstatus |= BIT(7);
853 regs->EXE = MCHP_QMSPI_EXE_STOP;
854 spi_context_cs_control(ctx, false);
855 spi_context_complete(ctx, dev, xstatus);
856 if (data->cb) {
857 data->cb(dev, xstatus, data->userdata);
858 }
859 return;
860 }
861
862 /* Clear Local-DMA enables in Mode and Control registers */
863 regs->MODE &= ~(MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN);
864 regs->CTRL &= MCHP_QMSPI_C_IFM_MASK;
865
866 spi_context_update_tx(ctx, 1, data->xfr_len);
867 spi_context_update_rx(ctx, 1, data->xfr_len);
868
869 data->xfr_len = q_ldma_cfg(dev);
870 if (data->xfr_len) {
871 regs->STS = 0xffffffffu;
872 regs->EXE = MCHP_QMSPI_EXE_START;
873 regs->IEN = MCHP_QMSPI_IEN_XFR_DONE | MCHP_QMSPI_IEN_PROG_ERR
874 | MCHP_QMSPI_IEN_LDMA_RX_ERR | MCHP_QMSPI_IEN_LDMA_TX_ERR;
875 return;
876 }
877
878 if (!(ctx->owner->operation & SPI_HOLD_ON_CS)) {
879 regs->EXE = MCHP_QMSPI_EXE_STOP;
880 spi_context_cs_control(&data->ctx, false);
881 }
882
883 spi_context_complete(&data->ctx, dev, xstatus);
884
885 if (data->cb) {
886 data->cb(dev, xstatus, data->userdata);
887 }
888 #endif /* CONFIG_SPI_ASYNC */
889 }
890
891 #ifdef CONFIG_PM_DEVICE
892 /* If the application wants the QMSPI pins to be disabled in suspend it must
893 * define pinctr-1 values for each pin in the app/project DT overlay.
894 */
qmspi_xec_pm_action(const struct device * dev,enum pm_device_action action)895 static int qmspi_xec_pm_action(const struct device *dev, enum pm_device_action action)
896 {
897 const struct spi_qmspi_config *devcfg = dev->config;
898 int ret;
899
900 switch (action) {
901 case PM_DEVICE_ACTION_RESUME:
902 ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT);
903 break;
904 case PM_DEVICE_ACTION_SUSPEND:
905 ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP);
906 if (ret == -ENOENT) { /* pinctrl-1 does not exist */
907 ret = 0;
908 }
909 break;
910 default:
911 ret = -ENOTSUP;
912 }
913
914 return ret;
915 }
916 #endif /* CONFIG_PM_DEVICE */
917
918 /*
919 * Called for each QMSPI controller instance
920 * Initialize QMSPI controller.
921 * Disable sleep control.
922 * Disable and clear interrupt status.
923 * Initialize SPI context.
924 * QMSPI will be fully configured and enabled when the transceive API
925 * is called.
926 */
qmspi_xec_init(const struct device * dev)927 static int qmspi_xec_init(const struct device *dev)
928 {
929 const struct spi_qmspi_config *cfg = dev->config;
930 struct spi_qmspi_data *qdata = dev->data;
931 struct qmspi_regs *regs = cfg->regs;
932 clock_control_subsys_t clkss = (clock_control_subsys_t)MCHP_XEC_PCR_CLK_PERIPH_FAST;
933 int ret = 0;
934
935 qdata->base_freq_hz = 0u;
936 qdata->qstatus = 0;
937 qdata->np = cfg->width;
938 #ifdef CONFIG_SPI_ASYNC
939 qdata->xfr_len = 0;
940 #endif
941
942 if (!cfg->clk_dev) {
943 LOG_ERR("XEC QMSPI-LDMA clock device not configured");
944 return -EINVAL;
945 }
946
947 ret = clock_control_on(cfg->clk_dev, (clock_control_subsys_t)&cfg->clksrc);
948 if (ret < 0) {
949 LOG_ERR("XEC QMSPI-LDMA enable clock source error %d", ret);
950 return ret;
951 }
952
953 ret = clock_control_get_rate(cfg->clk_dev, clkss, &qdata->base_freq_hz);
954 if (ret) {
955 LOG_ERR("XEC QMSPI-LDMA clock get rate error %d", ret);
956 return ret;
957 }
958
959 /* controller in known state before enabling pins */
960 qmspi_reset(regs);
961 mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos);
962
963 ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
964 if (ret != 0) {
965 LOG_ERR("XEC QMSPI-LDMA pinctrl setup failed (%d)", ret);
966 return ret;
967 }
968
969 /* default SPI Mode 0 signalling */
970 const struct spi_config spi_cfg = {
971 .frequency = cfg->clock_freq,
972 .operation = SPI_LINES_SINGLE | SPI_WORD_SET(8),
973 };
974
975 ret = qmspi_configure(dev, &spi_cfg);
976 if (ret) {
977 LOG_ERR("XEC QMSPI-LDMA init configure failed (%d)", ret);
978 return ret;
979 }
980
981 #ifdef CONFIG_SPI_ASYNC
982 cfg->irq_config_func();
983 mchp_xec_ecia_enable(cfg->girq, cfg->girq_pos);
984 #endif
985
986 spi_context_unlock_unconditionally(&qdata->ctx);
987
988 return 0;
989 }
990
991 static const struct spi_driver_api spi_qmspi_xec_driver_api = {
992 .transceive = qmspi_transceive_sync,
993 #ifdef CONFIG_SPI_ASYNC
994 .transceive_async = qmspi_transceive_async,
995 #endif
996 .release = qmspi_release,
997 };
998
999 #define XEC_QMSPI_CS_TIMING_VAL(a, b, c, d) (((a) & 0xFu) \
1000 | (((b) & 0xFu) << 8) \
1001 | (((c) & 0xFu) << 16) \
1002 | (((d) & 0xFu) << 24))
1003
1004 #define XEC_QMSPI_TAPS_ADJ_VAL(a, b) (((a) & 0xffu) | (((b) & 0xffu) << 8))
1005
1006 #define XEC_QMSPI_CS_TIMING(i) XEC_QMSPI_CS_TIMING_VAL( \
1007 DT_INST_PROP_OR(i, dcsckon, 6), \
1008 DT_INST_PROP_OR(i, dckcsoff, 4), \
1009 DT_INST_PROP_OR(i, dldh, 6), \
1010 DT_INST_PROP_OR(i, dcsda, 6))
1011
1012 #define XEC_QMSPI_TAPS_ADJ(i) XEC_QMSPI_TAPS_ADJ_VAL( \
1013 DT_INST_PROP_OR(i, tctradj, 0), \
1014 DT_INST_PROP_OR(i, tsckadj, 0))
1015
1016 #define XEC_QMSPI_GIRQ(i) \
1017 MCHP_XEC_ECIA_GIRQ(DT_INST_PROP_BY_IDX(i, girqs, 0))
1018
1019 #define XEC_QMSPI_GIRQ_POS(i) \
1020 MCHP_XEC_ECIA_GIRQ_POS(DT_INST_PROP_BY_IDX(i, girqs, 0))
1021
1022 #define XEC_QMSPI_NVIC_AGGR(i) \
1023 MCHP_XEC_ECIA_NVIC_AGGR(DT_INST_PROP_BY_IDX(i, girqs, 0))
1024
1025 #define XEC_QMSPI_NVIC_DIRECT(i) \
1026 MCHP_XEC_ECIA_NVIC_DIRECT(DT_INST_PROP_BY_IDX(i, girqs, 0))
1027
1028 #define XEC_QMSPI_PCR_INFO(i) \
1029 MCHP_XEC_PCR_SCR_ENCODE(DT_INST_CLOCKS_CELL(i, regidx), \
1030 DT_INST_CLOCKS_CELL(i, bitpos), \
1031 DT_INST_CLOCKS_CELL(i, domain))
1032
1033 /*
1034 * The instance number, i is not related to block ID's rather the
1035 * order the DT tools process all DT files in a build.
1036 */
1037 #define QMSPI_XEC_DEVICE(i) \
1038 \
1039 PINCTRL_DT_INST_DEFINE(i); \
1040 \
1041 static void qmspi_xec_irq_config_func_##i(void) \
1042 { \
1043 IRQ_CONNECT(DT_INST_IRQN(i), \
1044 DT_INST_IRQ(i, priority), \
1045 qmspi_xec_isr, \
1046 DEVICE_DT_INST_GET(i), 0); \
1047 irq_enable(DT_INST_IRQN(i)); \
1048 } \
1049 \
1050 static struct spi_qmspi_data qmspi_xec_data_##i = { \
1051 SPI_CONTEXT_INIT_LOCK(qmspi_xec_data_##i, ctx), \
1052 SPI_CONTEXT_INIT_SYNC(qmspi_xec_data_##i, ctx), \
1053 }; \
1054 static const struct spi_qmspi_config qmspi_xec_config_##i = { \
1055 .regs = (struct qmspi_regs *) DT_INST_REG_ADDR(i), \
1056 .clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(i)), \
1057 .clksrc = { .pcr_info = XEC_QMSPI_PCR_INFO(i), }, \
1058 .clock_freq = DT_INST_PROP_OR(i, clock_frequency, MHZ(12)), \
1059 .cs1_freq = DT_INST_PROP_OR(i, cs1_freq, 0), \
1060 .cs_timing = XEC_QMSPI_CS_TIMING(i), \
1061 .taps_adj = XEC_QMSPI_TAPS_ADJ(i), \
1062 .girq = XEC_QMSPI_GIRQ(i), \
1063 .girq_pos = XEC_QMSPI_GIRQ_POS(i), \
1064 .girq_nvic_aggr = XEC_QMSPI_NVIC_AGGR(i), \
1065 .girq_nvic_direct = XEC_QMSPI_NVIC_DIRECT(i), \
1066 .irq_pri = DT_INST_IRQ(i, priority), \
1067 .chip_sel = DT_INST_PROP_OR(i, chip_select, 0), \
1068 .width = DT_INST_PROP_OR(0, lines, 1), \
1069 .irq_config_func = qmspi_xec_irq_config_func_##i, \
1070 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(i), \
1071 }; \
1072 PM_DEVICE_DT_INST_DEFINE(i, qmspi_xec_pm_action); \
1073 DEVICE_DT_INST_DEFINE(i, &qmspi_xec_init, \
1074 PM_DEVICE_DT_INST_GET(i), \
1075 &qmspi_xec_data_##i, &qmspi_xec_config_##i, \
1076 POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
1077 &spi_qmspi_xec_driver_api);
1078
1079 DT_INST_FOREACH_STATUS_OKAY(QMSPI_XEC_DEVICE)
1080