1 /*
2  * Copyright (c) 2021 Microchip Technology Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT microchip_xec_qmspi_ldma
8 
9 #include <errno.h>
10 #include <soc.h>
11 
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h>
15 #include <zephyr/drivers/gpio.h>
16 #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #include <zephyr/drivers/spi.h>
19 #include <zephyr/drivers/spi/rtio.h>
20 #include <zephyr/dt-bindings/clock/mchp_xec_pcr.h>
21 #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h>
22 #include <zephyr/irq.h>
23 #include <zephyr/pm/device.h>
24 #include <zephyr/sys/sys_io.h>
25 #include <zephyr/sys/util.h>
26 #include <zephyr/logging/log.h>
27 LOG_MODULE_REGISTER(spi_xec, CONFIG_SPI_LOG_LEVEL);
28 
29 #include "spi_context.h"
30 
31 /* #define MCHP_XEC_QMSPI_DEBUG 1 */
32 
33 /* MEC172x QMSPI controller SPI Mode 3 signalling has an anomaly where
34  * received data is shifted off the input line(s) improperly. Received
35  * data bytes will be left shifted by 1. Work-around for SPI Mode 3 is
36  * to sample input line(s) on same edge as output data is ready.
37  */
38 #define XEC_QMSPI_SPI_MODE_3_ANOMALY 1
39 
40 /* common clock control device node for all Microchip XEC chips */
41 #define MCHP_XEC_CLOCK_CONTROL_NODE	DT_NODELABEL(pcr)
42 
43 /* spin loops waiting for HW to clear soft reset bit */
44 #define XEC_QMSPI_SRST_LOOPS		16
45 
46 /* microseconds for busy wait and total wait interval */
47 #define XEC_QMSPI_WAIT_INTERVAL		8
48 #define XEC_QMSPI_WAIT_COUNT		64
49 
50 /* QSPI transfer and DMA done */
51 #define XEC_QSPI_HW_XFR_DMA_DONE	(MCHP_QMSPI_STS_DONE | MCHP_QMSPI_STS_DMA_DONE)
52 
53 /* QSPI hardware error status
54  * Misprogrammed control or descriptors (software error)
55  * Overflow TX FIFO
56  * Underflow RX FIFO
57  */
58 #define XEC_QSPI_HW_ERRORS		(MCHP_QMSPI_STS_PROG_ERR |	\
59 					 MCHP_QMSPI_STS_TXB_ERR |	\
60 					 MCHP_QMSPI_STS_RXB_ERR)
61 
62 #define XEC_QSPI_HW_ERRORS_LDMA		(MCHP_QMSPI_STS_LDMA_RX_ERR |	\
63 					 MCHP_QMSPI_STS_LDMA_TX_ERR)
64 
65 #define XEC_QSPI_HW_ERRORS_ALL		(XEC_QSPI_HW_ERRORS |		\
66 					 XEC_QSPI_HW_ERRORS_LDMA)
67 
68 #define XEC_QSPI_TIMEOUT_US		(100 * 1000) /* 100 ms */
69 
70 /* Device constant configuration parameters */
71 struct spi_qmspi_config {
72 	struct qmspi_regs *regs;
73 	const struct device *clk_dev;
74 	struct mchp_xec_pcr_clk_ctrl clksrc;
75 	uint32_t clock_freq;
76 	uint32_t cs1_freq;
77 	uint32_t cs_timing;
78 	uint16_t taps_adj;
79 	uint8_t girq;
80 	uint8_t girq_pos;
81 	uint8_t girq_nvic_aggr;
82 	uint8_t girq_nvic_direct;
83 	uint8_t irq_pri;
84 	uint8_t chip_sel;
85 	uint8_t width;	/* 0(half) 1(single), 2(dual), 4(quad) */
86 	uint8_t unused[1];
87 	const struct pinctrl_dev_config *pcfg;
88 	void (*irq_config_func)(void);
89 };
90 
91 #define XEC_QMSPI_XFR_FLAG_TX		BIT(0)
92 #define XEC_QMSPI_XFR_FLAG_RX		BIT(1)
93 
94 /* Device run time data */
95 struct spi_qmspi_data {
96 	struct spi_context ctx;
97 	uint32_t base_freq_hz;
98 	uint32_t spi_freq_hz;
99 	uint32_t qstatus;
100 	uint8_t np; /* number of data pins: 1, 2, or 4 */
101 #ifdef CONFIG_SPI_ASYNC
102 	spi_callback_t cb;
103 	void *userdata;
104 	size_t xfr_len;
105 #endif
106 	uint32_t tempbuf[2];
107 #ifdef MCHP_XEC_QMSPI_DEBUG
108 	uint32_t bufcnt_status;
109 	uint32_t rx_ldma_ctrl0;
110 	uint32_t tx_ldma_ctrl0;
111 	uint32_t qunits;
112 	uint32_t qxfru;
113 	uint32_t xfrlen;
114 
115 #endif
116 };
117 
xec_qmspi_spin_yield(int * counter,int max_count)118 static int xec_qmspi_spin_yield(int *counter, int max_count)
119 {
120 	*counter = *counter + 1;
121 
122 	if (*counter > max_count) {
123 		return -ETIMEDOUT;
124 	}
125 
126 	k_busy_wait(XEC_QMSPI_WAIT_INTERVAL);
127 
128 	return 0;
129 }
130 
131 /*
132  * reset QMSPI controller with save/restore of timing registers.
133  * Some QMSPI timing register may be modified by the Boot-ROM OTP
134  * values.
135  */
qmspi_reset(struct qmspi_regs * regs)136 static void qmspi_reset(struct qmspi_regs *regs)
137 {
138 	uint32_t taps[3];
139 	uint32_t malt1;
140 	uint32_t cstm;
141 	uint32_t mode;
142 	uint32_t cnt = XEC_QMSPI_SRST_LOOPS;
143 
144 	taps[0] = regs->TM_TAPS;
145 	taps[1] = regs->TM_TAPS_ADJ;
146 	taps[2] = regs->TM_TAPS_CTRL;
147 	malt1 = regs->MODE_ALT1;
148 	cstm = regs->CSTM;
149 	mode = regs->MODE;
150 	regs->MODE = MCHP_QMSPI_M_SRST;
151 	while (regs->MODE & MCHP_QMSPI_M_SRST) {
152 		if (cnt == 0) {
153 			break;
154 		}
155 		cnt--;
156 	}
157 	regs->MODE = 0;
158 	regs->MODE = mode & ~MCHP_QMSPI_M_ACTIVATE;
159 	regs->CSTM = cstm;
160 	regs->MODE_ALT1 = malt1;
161 	regs->TM_TAPS = taps[0];
162 	regs->TM_TAPS_ADJ = taps[1];
163 	regs->TM_TAPS_CTRL = taps[2];
164 }
165 
qmspi_encoded_fdiv(const struct device * dev,uint32_t freq_hz)166 static uint32_t qmspi_encoded_fdiv(const struct device *dev, uint32_t freq_hz)
167 {
168 	struct spi_qmspi_data *qdata = dev->data;
169 
170 	if (freq_hz == 0u) {
171 		return 0u; /* maximum frequency divider */
172 	}
173 
174 	return (qdata->base_freq_hz / freq_hz);
175 }
176 
177 /* Program QMSPI frequency divider field in the mode register.
178  * MEC172x QMSPI input clock source is the Fast Peripheral domain whose
179  * clock is controlled by the PCR turbo clock. 96 MHz if turbo mode
180  * enabled else 48 MHz. Query the clock control driver to get clock
181  * rate of fast peripheral domain. MEC172x QMSPI clock divider has
182  * been expanded to a 16-bit field encoded as:
183  * 0 = divide by 0x10000
184  * 1 to 0xffff = divide by this value.
185  */
qmspi_set_frequency(struct spi_qmspi_data * qdata,struct qmspi_regs * regs,uint32_t freq_hz)186 static int qmspi_set_frequency(struct spi_qmspi_data *qdata, struct qmspi_regs *regs,
187 			       uint32_t freq_hz)
188 {
189 	uint32_t clk = MCHP_QMSPI_INPUT_CLOCK_FREQ_HZ;
190 	uint32_t fdiv = 0u; /* maximum divider */
191 
192 	if (qdata->base_freq_hz) {
193 		clk = qdata->base_freq_hz;
194 	}
195 
196 	if (freq_hz) {
197 		fdiv = 1u;
198 		if (freq_hz < clk) {
199 			fdiv = clk / freq_hz;
200 		}
201 	}
202 
203 	regs->MODE = ((regs->MODE & ~(MCHP_QMSPI_M_FDIV_MASK)) |
204 		((fdiv << MCHP_QMSPI_M_FDIV_POS) & MCHP_QMSPI_M_FDIV_MASK));
205 
206 	if (!fdiv) {
207 		fdiv = 0x10000u;
208 	}
209 
210 	qdata->spi_freq_hz = clk / fdiv;
211 
212 	return 0;
213 }
214 
215 /*
216  * SPI signalling mode: CPOL and CPHA
217  * CPOL = 0 is clock idles low, 1 is clock idle high
218  * CPHA = 0 Transmitter changes data on trailing of preceding clock cycle.
219  *          Receiver samples data on leading edge of clock cycle.
220  *        1 Transmitter changes data on leading edge of current clock cycle.
221  *          Receiver samples data on the trailing edge of clock cycle.
222  * SPI Mode nomenclature:
223  * Mode CPOL CPHA
224  *  0     0    0
225  *  1     0    1
226  *  2     1    0
227  *  3     1    1
228  * QMSPI has three controls, CPOL, CPHA for output and CPHA for input.
229  * SPI frequency < 48MHz
230  *	Mode 0: CPOL=0 CHPA=0 (CHPA_MISO=0 and CHPA_MOSI=0)
231  *	Mode 3: CPOL=1 CHPA=1 (CHPA_MISO=1 and CHPA_MOSI=1)
232  * Data sheet recommends when QMSPI set at max. SPI frequency (48MHz).
233  * SPI frequency == 48MHz sample and change data on same edge.
234  *  Mode 0: CPOL=0 CHPA=0 (CHPA_MISO=1 and CHPA_MOSI=0)
235  *  Mode 3: CPOL=1 CHPA=1 (CHPA_MISO=0 and CHPA_MOSI=1)
236  *
237  * There is an anomaly in MEC172x for SPI signalling mode 3. We must
238  * set CHPA_MISO=0 for SPI Mode 3 at all frequencies.
239  */
240 
241 const uint8_t smode_tbl[4] = {
242 	0x00u, 0x06u, 0x01u,
243 #ifdef XEC_QMSPI_SPI_MODE_3_ANOMALY
244 	0x03u, /* CPOL=1, CPHA_MOSI=1, CPHA_MISO=0 */
245 #else
246 	0x07u, /* CPOL=1, CPHA_MOSI=1, CPHA_MISO=1 */
247 #endif
248 };
249 
250 const uint8_t smode48_tbl[4] = {
251 	0x04u, 0x02u, 0x05u, 0x03u
252 };
253 
qmspi_set_signalling_mode(struct spi_qmspi_data * qdata,struct qmspi_regs * regs,uint32_t smode)254 static void qmspi_set_signalling_mode(struct spi_qmspi_data *qdata,
255 				      struct qmspi_regs *regs, uint32_t smode)
256 {
257 	const uint8_t *ptbl;
258 	uint32_t m;
259 
260 	ptbl = smode_tbl;
261 	if (qdata->spi_freq_hz >= MHZ(48)) {
262 		ptbl = smode48_tbl;
263 	}
264 
265 	m = (uint32_t)ptbl[smode & 0x03];
266 	regs->MODE = (regs->MODE & ~(MCHP_QMSPI_M_SIG_MASK))
267 		     | (m << MCHP_QMSPI_M_SIG_POS);
268 }
269 
270 #ifdef CONFIG_SPI_EXTENDED_MODES
271 /*
272  * QMSPI HW support single, dual, and quad.
273  * Return QMSPI Control/Descriptor register encoded value.
274  */
encode_lines(const struct spi_config * config)275 static uint32_t encode_lines(const struct spi_config *config)
276 {
277 	uint32_t qlines;
278 
279 	switch (config->operation & SPI_LINES_MASK) {
280 	case SPI_LINES_SINGLE:
281 		qlines = MCHP_QMSPI_C_IFM_1X;
282 		break;
283 #if DT_INST_PROP(0, lines) > 1
284 	case SPI_LINES_DUAL:
285 		qlines = MCHP_QMSPI_C_IFM_2X;
286 		break;
287 #endif
288 #if DT_INST_PROP(0, lines) > 2
289 	case SPI_LINES_QUAD:
290 		qlines = MCHP_QMSPI_C_IFM_4X;
291 		break;
292 #endif
293 	default:
294 		qlines = 0xffu;
295 	}
296 
297 	return qlines;
298 }
299 
npins_from_spi_config(const struct spi_config * config)300 static uint8_t npins_from_spi_config(const struct spi_config *config)
301 {
302 	switch (config->operation & SPI_LINES_MASK) {
303 	case SPI_LINES_DUAL:
304 		return 2u;
305 	case SPI_LINES_QUAD:
306 		return 4u;
307 	default:
308 		return 1u;
309 	}
310 }
311 #endif /* CONFIG_SPI_EXTENDED_MODES */
312 
spi_feature_support(const struct spi_config * config)313 static int spi_feature_support(const struct spi_config *config)
314 {
315 	if (config->operation & (SPI_TRANSFER_LSB | SPI_OP_MODE_SLAVE | SPI_MODE_LOOP)) {
316 		LOG_ERR("Driver does not support LSB first, slave, or loop back");
317 		return -ENOTSUP;
318 	}
319 
320 	if (config->operation & SPI_CS_ACTIVE_HIGH) {
321 		LOG_ERR("CS active high not supported");
322 		return -ENOTSUP;
323 	}
324 
325 	if (config->operation & SPI_LOCK_ON) {
326 		LOG_ERR("Lock On not supported");
327 		return -ENOTSUP;
328 	}
329 
330 	if (SPI_WORD_SIZE_GET(config->operation) != 8) {
331 		LOG_ERR("Word size != 8 not supported");
332 		return -ENOTSUP;
333 	}
334 
335 	return 0;
336 }
337 
338 /* Configure QMSPI.
339  * NOTE: QMSPI Shared SPI port has two chip selects.
340  * Private SPI and internal SPI ports support one chip select.
341  * Hardware supports dual and quad I/O. Dual and quad are allowed
342  * if SPI extended mode is enabled at build time. User must
343  * provide pin configuration via DTS.
344  */
qmspi_configure(const struct device * dev,const struct spi_config * config)345 static int qmspi_configure(const struct device *dev,
346 			   const struct spi_config *config)
347 {
348 	const struct spi_qmspi_config *cfg = dev->config;
349 	struct spi_qmspi_data *qdata = dev->data;
350 	struct qmspi_regs *regs = cfg->regs;
351 	uint32_t smode;
352 	int ret;
353 
354 	if (!config) {
355 		return -EINVAL;
356 	}
357 
358 	if (spi_context_configured(&qdata->ctx, config)) {
359 		return 0;
360 	}
361 
362 	qmspi_set_frequency(qdata, regs, config->frequency);
363 
364 	/* check new configuration */
365 	ret = spi_feature_support(config);
366 	if (ret) {
367 		return ret;
368 	}
369 
370 #ifdef CONFIG_SPI_EXTENDED_MODES
371 	smode = encode_lines(config);
372 	if (smode == 0xff) {
373 		LOG_ERR("Requested lines mode not supported");
374 		return -ENOTSUP;
375 	}
376 	qdata->np = npins_from_spi_config(config);
377 #else
378 	smode = MCHP_QMSPI_C_IFM_1X;
379 	qdata->np = 1u;
380 #endif
381 	regs->CTRL = smode;
382 
383 	smode = 0;
384 	if ((config->operation & SPI_MODE_CPHA) != 0U) {
385 		smode |= BIT(0);
386 	}
387 
388 	if ((config->operation & SPI_MODE_CPOL) != 0U) {
389 		smode |= BIT(1);
390 	}
391 
392 	qmspi_set_signalling_mode(qdata, regs, smode);
393 
394 	/* chip select */
395 	smode = regs->MODE & ~(MCHP_QMSPI_M_CS_MASK);
396 	if (cfg->chip_sel == 0) {
397 		smode |= MCHP_QMSPI_M_CS0;
398 	} else {
399 		smode |= MCHP_QMSPI_M_CS1;
400 	}
401 	regs->MODE = smode;
402 
403 	/* chip select timing and TAPS adjust */
404 	regs->CSTM = cfg->cs_timing;
405 	regs->TM_TAPS_ADJ = cfg->taps_adj;
406 
407 	/* CS1 alternate mode (frequency) */
408 	regs->MODE_ALT1 = 0;
409 	if (cfg->cs1_freq) {
410 		uint32_t fdiv = qmspi_encoded_fdiv(dev, cfg->cs1_freq);
411 
412 		regs->MODE_ALT1 = (fdiv << MCHP_QMSPI_MA1_CS1_CDIV_POS) &
413 				  MCHP_QMSPI_MA1_CS1_CDIV_MSK;
414 		regs->MODE_ALT1 |= MCHP_QMSPI_MA1_CS1_CDIV_EN;
415 	}
416 
417 	qdata->ctx.config = config;
418 
419 	regs->MODE |= MCHP_QMSPI_M_ACTIVATE;
420 
421 	return 0;
422 }
423 
encode_npins(uint8_t npins)424 static uint32_t encode_npins(uint8_t npins)
425 {
426 	if (npins == 4) {
427 		return MCHP_QMSPI_C_IFM_4X;
428 	} else if (npins == 2) {
429 		return MCHP_QMSPI_C_IFM_2X;
430 	} else {
431 		return MCHP_QMSPI_C_IFM_1X;
432 	}
433 }
434 
435 /* Common controller transfer initialziation using Local-DMA.
436  * Full-duplex: controller configured to transmit and receive simultaneouly.
437  * Half-duplex(dual/quad): User may only specify TX or RX buffer sets.
438  * Passing both buffers sets is reported as an error.
439  */
qmspi_xfr_cm_init(const struct device * dev,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)440 static inline int qmspi_xfr_cm_init(const struct device *dev,
441 				    const struct spi_buf_set *tx_bufs,
442 				    const struct spi_buf_set *rx_bufs)
443 {
444 	const struct spi_qmspi_config *devcfg = dev->config;
445 	struct spi_qmspi_data *qdata = dev->data;
446 	struct qmspi_regs *regs = devcfg->regs;
447 
448 	regs->IEN = 0;
449 	regs->EXE = MCHP_QMSPI_EXE_CLR_FIFOS;
450 	regs->LDMA_RX_DESCR_BM = 0;
451 	regs->LDMA_TX_DESCR_BM = 0;
452 	regs->MODE &= ~(MCHP_QMSPI_M_LDMA_TX_EN | MCHP_QMSPI_M_LDMA_RX_EN);
453 	regs->STS = 0xffffffffu;
454 	regs->CTRL = encode_npins(qdata->np);
455 
456 	qdata->qstatus = 0;
457 
458 #ifdef CONFIG_SPI_EXTENDED_MODES
459 	if (qdata->np != 1) {
460 		if (tx_bufs && rx_bufs) {
461 			LOG_ERR("Cannot specify both TX and RX buffers in half-duplex(dual/quad)");
462 			return -EPROTONOSUPPORT;
463 		}
464 	}
465 #endif
466 
467 	return 0;
468 }
469 
470 /* QMSPI Local-DMA transfer configuration:
471  * Support full and half(dual/quad) duplex transfers.
472  * Requires caller to have checked that only one direction was setup
473  * in the SPI context: TX or RX not both. (refer to qmspi_xfr_cm_init)
474  * Supports spi_buf's where data pointer is NULL and length non-zero.
475  * These buffers are used as TX tri-state I/O clock only generation or
476  * RX data discard for certain SPI command protocols using dual/quad I/O.
477  * 1. Get largest contiguous data size from SPI context.
478  * 2. If the SPI TX context has a non-zero length configure Local-DMA TX
479  *    channel 1 for contiguous data size. If TX context has valid buffer
480  *    configure channel to use context buffer with address increment.
481  *    If the TX buffer pointer is NULL interpret byte length as the number
482  *    of clocks to generate with output line(s) tri-stated. NOTE: The controller
483  *    must be configured with TX disabled to not drive output line(s) during
484  *    clock generation. Also, no data should be written to TX FIFO. The unit
485  *    size can be set to bits. The number of units to transfer must be computed
486  *    based upon the number of output pins in the IOM field: full-duplex is one
487  *    bit per clock, dual is 2 bits per clock, and quad is 4 bits per clock.
488  *    For example, if I/O lines is 4 (quad) meaning 4 bits per clock and the
489  *    user wants 7 clocks then the number of bit units is 4 * 7 = 28.
490  * 3. If instead, the SPI RX context has a non-zero length configure Local-DMA
491  *    RX channel 1 for the contiguous data size. If RX context has a valid
492  *    buffer configure channel to use buffer with address increment else
493  *    configure channel for driver data temporary buffer without address
494  *    increment.
495  * 4. Update QMSPI Control register.
496  */
qmspi_ldma_encode_unit_size(uint32_t maddr,size_t len)497 static uint32_t qmspi_ldma_encode_unit_size(uint32_t maddr, size_t len)
498 {
499 	uint8_t temp = (maddr | (uint32_t)len) & 0x3u;
500 
501 	if (temp == 0) {
502 		return MCHP_QMSPI_LDC_ASZ_4;
503 	} else if (temp == 2) {
504 		return MCHP_QMSPI_LDC_ASZ_2;
505 	} else {
506 		return MCHP_QMSPI_LDC_ASZ_1;
507 	}
508 }
509 
qmspi_unit_size(size_t xfrlen)510 static uint32_t qmspi_unit_size(size_t xfrlen)
511 {
512 	if ((xfrlen & 0xfu) == 0u) {
513 		return 16u;
514 	} else if ((xfrlen & 0x3u) == 0u) {
515 		return 4u;
516 	} else {
517 		return 1u;
518 	}
519 }
520 
qmspi_encode_unit_size(uint32_t units_in_bytes)521 static uint32_t qmspi_encode_unit_size(uint32_t units_in_bytes)
522 {
523 	if (units_in_bytes == 16u) {
524 		return MCHP_QMSPI_C_XFR_UNITS_16;
525 	} else if (units_in_bytes == 4u) {
526 		return MCHP_QMSPI_C_XFR_UNITS_4;
527 	} else {
528 		return MCHP_QMSPI_C_XFR_UNITS_1;
529 	}
530 }
531 
q_ldma_cfg(const struct device * dev)532 static size_t q_ldma_cfg(const struct device *dev)
533 {
534 	const struct spi_qmspi_config *devcfg = dev->config;
535 	struct spi_qmspi_data *qdata = dev->data;
536 	struct spi_context *ctx = &qdata->ctx;
537 	struct qmspi_regs *regs = devcfg->regs;
538 
539 	size_t ctx_xfr_len = spi_context_max_continuous_chunk(ctx);
540 	uint32_t ctrl, ldctrl, mstart, qunits, qxfru, xfrlen;
541 
542 	regs->EXE = MCHP_QMSPI_EXE_CLR_FIFOS;
543 	regs->MODE &= ~(MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN);
544 	regs->LDRX[0].CTRL = 0;
545 	regs->LDRX[0].MSTART = 0;
546 	regs->LDRX[0].LEN = 0;
547 	regs->LDTX[0].CTRL = 0;
548 	regs->LDTX[0].MSTART = 0;
549 	regs->LDTX[0].LEN = 0;
550 
551 	if (ctx_xfr_len == 0) {
552 		return 0;
553 	}
554 
555 	qunits = qmspi_unit_size(ctx_xfr_len);
556 	ctrl = qmspi_encode_unit_size(qunits);
557 	qxfru = ctx_xfr_len / qunits;
558 	if (qxfru > 0x7fffu) {
559 		qxfru = 0x7fffu;
560 	}
561 	ctrl |= (qxfru << MCHP_QMSPI_C_XFR_NUNITS_POS);
562 	xfrlen = qxfru * qunits;
563 
564 #ifdef MCHP_XEC_QMSPI_DEBUG
565 	qdata->qunits = qunits;
566 	qdata->qxfru = qxfru;
567 	qdata->xfrlen = xfrlen;
568 #endif
569 	if (spi_context_tx_buf_on(ctx)) {
570 		mstart = (uint32_t)ctx->tx_buf;
571 		ctrl |= MCHP_QMSPI_C_TX_DATA | MCHP_QMSPI_C_TX_LDMA_CH0;
572 		ldctrl = qmspi_ldma_encode_unit_size(mstart, xfrlen);
573 		ldctrl |= MCHP_QMSPI_LDC_INCR_EN | MCHP_QMSPI_LDC_EN;
574 		regs->MODE |= MCHP_QMSPI_M_LDMA_TX_EN;
575 		regs->LDTX[0].LEN = xfrlen;
576 		regs->LDTX[0].MSTART = mstart;
577 		regs->LDTX[0].CTRL = ldctrl;
578 	}
579 
580 	if (spi_context_rx_buf_on(ctx)) {
581 		mstart = (uint32_t)ctx->rx_buf;
582 		ctrl |= MCHP_QMSPI_C_RX_LDMA_CH0 | MCHP_QMSPI_C_RX_EN;
583 		ldctrl = MCHP_QMSPI_LDC_EN | MCHP_QMSPI_LDC_INCR_EN;
584 		ldctrl |= qmspi_ldma_encode_unit_size(mstart, xfrlen);
585 		regs->MODE |= MCHP_QMSPI_M_LDMA_RX_EN;
586 		regs->LDRX[0].LEN = xfrlen;
587 		regs->LDRX[0].MSTART = mstart;
588 		regs->LDRX[0].CTRL = ldctrl;
589 	}
590 
591 	regs->CTRL = (regs->CTRL & 0x3u) | ctrl;
592 
593 	return xfrlen;
594 }
595 
596 /* Start and wait for QMSPI synchronous transfer(s) to complete.
597  * Initialize QMSPI controller for Local-DMA operation.
598  * Iterate over SPI context with non-zero TX or RX data lengths.
599  *   1. Configure QMSPI Control register and Local-DMA channel(s)
600  *   2. Clear QMSPI status
601  *   3. Start QMSPI transfer
602  *   4. Poll QMSPI status for transfer done and DMA done with timeout.
603  *   5. Hardware anomaly work-around: Poll with timeout QMSPI Local-DMA
604  *      TX and RX channels until hardware clears both channel enables.
605  *      This indicates hardware is really done with transfer to/from memory.
606  *   6. Update SPI context with amount of data transmitted and received.
607  * If SPI configuration hold chip select on flag is not set then instruct
608  * QMSPI to de-assert chip select.
609  * Set SPI context as complete
610  */
qmspi_xfr_sync(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)611 static int qmspi_xfr_sync(const struct device *dev,
612 			  const struct spi_config *spi_cfg,
613 			  const struct spi_buf_set *tx_bufs,
614 			  const struct spi_buf_set *rx_bufs)
615 {
616 	const struct spi_qmspi_config *devcfg = dev->config;
617 	struct spi_qmspi_data *qdata = dev->data;
618 	struct spi_context *ctx = &qdata->ctx;
619 	struct qmspi_regs *regs = devcfg->regs;
620 	size_t xfr_len;
621 
622 	int ret = qmspi_xfr_cm_init(dev, tx_bufs, rx_bufs);
623 
624 	if (ret) {
625 		return ret;
626 	}
627 
628 	while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)) {
629 		xfr_len = q_ldma_cfg(dev);
630 		regs->STS = 0xffffffffu;
631 		regs->EXE = MCHP_QMSPI_EXE_START;
632 
633 #ifdef MCHP_XEC_QMSPI_DEBUG
634 		uint32_t temp = regs->STS;
635 
636 		while (!(temp & MCHP_QMSPI_STS_DONE)) {
637 			temp = regs->STS;
638 		}
639 		qdata->qstatus = temp;
640 		qdata->bufcnt_status = regs->BCNT_STS;
641 		qdata->rx_ldma_ctrl0 = regs->LDRX[0].CTRL;
642 		qdata->tx_ldma_ctrl0 = regs->LDTX[0].CTRL;
643 #else
644 		uint32_t wcnt = 0;
645 
646 		qdata->qstatus = regs->STS;
647 		while (!(qdata->qstatus & MCHP_QMSPI_STS_DONE)) {
648 			k_busy_wait(1u);
649 			if (++wcnt > XEC_QSPI_TIMEOUT_US) {
650 				regs->EXE = MCHP_QMSPI_EXE_STOP;
651 				return -ETIMEDOUT;
652 			}
653 			qdata->qstatus = regs->STS;
654 		}
655 #endif
656 		spi_context_update_tx(ctx, 1, xfr_len);
657 		spi_context_update_rx(ctx, 1, xfr_len);
658 	}
659 
660 	if (!(spi_cfg->operation & SPI_HOLD_ON_CS)) {
661 		regs->EXE = MCHP_QMSPI_EXE_STOP;
662 	}
663 
664 	spi_context_complete(ctx, dev, 0);
665 
666 	return 0;
667 }
668 
669 #ifdef CONFIG_SPI_ASYNC
670 /* Configure QMSPI such that QMSPI transfer FSM and LDMA FSM are synchronized.
671  * Transfer length must be programmed into control/descriptor register(s) and
672  * LDMA register(s). LDMA override length bit must NOT be set.
673  */
qmspi_xfr_start_async(const struct device * dev,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)674 static int qmspi_xfr_start_async(const struct device *dev, const struct spi_buf_set *tx_bufs,
675 				 const struct spi_buf_set *rx_bufs)
676 {
677 	const struct spi_qmspi_config *devcfg = dev->config;
678 	struct spi_qmspi_data *qdata = dev->data;
679 	struct qmspi_regs *regs = devcfg->regs;
680 	int ret;
681 
682 	ret = qmspi_xfr_cm_init(dev, tx_bufs, rx_bufs);
683 	if (ret) {
684 		return ret;
685 	}
686 
687 	qdata->xfr_len = q_ldma_cfg(dev);
688 	if (!qdata->xfr_len) {
689 		return 0; /* nothing to do */
690 	}
691 
692 	regs->STS = 0xffffffffu;
693 	regs->EXE = MCHP_QMSPI_EXE_START;
694 	regs->IEN = MCHP_QMSPI_IEN_XFR_DONE | MCHP_QMSPI_IEN_PROG_ERR
695 		    | MCHP_QMSPI_IEN_LDMA_RX_ERR | MCHP_QMSPI_IEN_LDMA_TX_ERR;
696 
697 	return 0;
698 }
699 
700 /* Wrapper to start asynchronous (interrupts enabled) SPI transaction */
qmspi_xfr_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)701 static int qmspi_xfr_async(const struct device *dev,
702 			   const struct spi_config *config,
703 			   const struct spi_buf_set *tx_bufs,
704 			   const struct spi_buf_set *rx_bufs)
705 {
706 	struct spi_qmspi_data *qdata = dev->data;
707 	int err = 0;
708 
709 	qdata->qstatus = 0;
710 	qdata->xfr_len = 0;
711 
712 	err = qmspi_xfr_start_async(dev, tx_bufs, rx_bufs);
713 
714 	return err;
715 }
716 #endif /* CONFIG_SPI_ASYNC */
717 
718 /* Start (a)synchronous transaction using QMSPI Local-DMA */
qmspi_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * user_data)719 static int qmspi_transceive(const struct device *dev,
720 			    const struct spi_config *config,
721 			    const struct spi_buf_set *tx_bufs,
722 			    const struct spi_buf_set *rx_bufs,
723 			    bool asynchronous,
724 			    spi_callback_t cb,
725 			    void *user_data)
726 {
727 	struct spi_qmspi_data *qdata = dev->data;
728 	struct spi_context *ctx = &qdata->ctx;
729 	int err = 0;
730 
731 	if (!config) {
732 		return -EINVAL;
733 	}
734 
735 	if (!tx_bufs && !rx_bufs) {
736 		return 0;
737 	}
738 
739 	spi_context_lock(&qdata->ctx, asynchronous, cb, user_data, config);
740 
741 	err = qmspi_configure(dev, config);
742 	if (err != 0) {
743 		spi_context_release(ctx, err);
744 		return err;
745 	}
746 
747 	spi_context_cs_control(ctx, true);
748 	spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
749 
750 #ifdef CONFIG_SPI_ASYNC
751 	if (asynchronous) {
752 		qdata->cb = cb;
753 		qdata->userdata = user_data;
754 		err = qmspi_xfr_async(dev, config, tx_bufs, rx_bufs);
755 	} else {
756 		err = qmspi_xfr_sync(dev, config, tx_bufs, rx_bufs);
757 	}
758 #else
759 	err = qmspi_xfr_sync(dev, config, tx_bufs, rx_bufs);
760 #endif
761 	if (err) { /* de-assert CS# and give semaphore */
762 		spi_context_unlock_unconditionally(ctx);
763 		return err;
764 	}
765 
766 	if (asynchronous) {
767 		return err;
768 	}
769 
770 	err = spi_context_wait_for_completion(ctx);
771 	if (!(config->operation & SPI_HOLD_ON_CS)) {
772 		spi_context_cs_control(ctx, false);
773 	}
774 	spi_context_release(ctx, err);
775 
776 	return err;
777 }
778 
qmspi_transceive_sync(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)779 static int qmspi_transceive_sync(const struct device *dev,
780 				 const struct spi_config *config,
781 				 const struct spi_buf_set *tx_bufs,
782 				 const struct spi_buf_set *rx_bufs)
783 {
784 	return qmspi_transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
785 }
786 
787 #ifdef CONFIG_SPI_ASYNC
788 
qmspi_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)789 static int qmspi_transceive_async(const struct device *dev,
790 				  const struct spi_config *config,
791 				  const struct spi_buf_set *tx_bufs,
792 				  const struct spi_buf_set *rx_bufs,
793 				  spi_callback_t cb,
794 				  void *userdata)
795 {
796 	return qmspi_transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
797 }
798 #endif /* CONFIG_SPI_ASYNC */
799 
qmspi_release(const struct device * dev,const struct spi_config * config)800 static int qmspi_release(const struct device *dev,
801 			 const struct spi_config *config)
802 {
803 	struct spi_qmspi_data *data = dev->data;
804 	const struct spi_qmspi_config *cfg = dev->config;
805 	struct qmspi_regs *regs = cfg->regs;
806 	int ret = 0;
807 	int counter = 0;
808 
809 	if (regs->STS & MCHP_QMSPI_STS_ACTIVE_RO) {
810 		/* Force CS# to de-assert on next unit boundary */
811 		regs->EXE = MCHP_QMSPI_EXE_STOP;
812 		while (regs->STS & MCHP_QMSPI_STS_ACTIVE_RO) {
813 			ret = xec_qmspi_spin_yield(&counter, XEC_QMSPI_WAIT_COUNT);
814 			if (ret != 0) {
815 				break;
816 			}
817 		}
818 	}
819 
820 	spi_context_unlock_unconditionally(&data->ctx);
821 
822 	return ret;
823 }
824 
825 /* QMSPI interrupt handler called by Zephyr ISR
826  * All transfers use QMSPI Local-DMA specified by the Control register.
827  * QMSPI descriptor mode not used.
828  * Full-duplex always uses LDMA TX channel 0 and RX channel 0
829  * Half-duplex(dual/quad) use one of TX channel 0 or RX channel 0
830  */
qmspi_xec_isr(const struct device * dev)831 void qmspi_xec_isr(const struct device *dev)
832 {
833 	const struct spi_qmspi_config *cfg = dev->config;
834 	struct spi_qmspi_data *data = dev->data;
835 	struct qmspi_regs *regs = cfg->regs;
836 	uint32_t qstatus = regs->STS;
837 #ifdef CONFIG_SPI_ASYNC
838 	struct spi_context *ctx = &data->ctx;
839 	int xstatus = 0;
840 #endif
841 
842 	regs->IEN = 0;
843 	data->qstatus = qstatus;
844 	regs->STS = MCHP_QMSPI_STS_RW1C_MASK;
845 	mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos);
846 
847 #ifdef CONFIG_SPI_ASYNC
848 	if (qstatus & XEC_QSPI_HW_ERRORS_ALL) {
849 		xstatus = -EIO;
850 		data->qstatus |= BIT(7);
851 		regs->EXE = MCHP_QMSPI_EXE_STOP;
852 		spi_context_cs_control(ctx, false);
853 		spi_context_complete(ctx, dev, xstatus);
854 		if (data->cb) {
855 			data->cb(dev, xstatus, data->userdata);
856 		}
857 		return;
858 	}
859 
860 	/* Clear Local-DMA enables in Mode and Control registers */
861 	regs->MODE &= ~(MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN);
862 	regs->CTRL &= MCHP_QMSPI_C_IFM_MASK;
863 
864 	spi_context_update_tx(ctx, 1, data->xfr_len);
865 	spi_context_update_rx(ctx, 1, data->xfr_len);
866 
867 	data->xfr_len = q_ldma_cfg(dev);
868 	if (data->xfr_len) {
869 		regs->STS = 0xffffffffu;
870 		regs->EXE = MCHP_QMSPI_EXE_START;
871 		regs->IEN = MCHP_QMSPI_IEN_XFR_DONE | MCHP_QMSPI_IEN_PROG_ERR
872 			    | MCHP_QMSPI_IEN_LDMA_RX_ERR | MCHP_QMSPI_IEN_LDMA_TX_ERR;
873 		return;
874 	}
875 
876 	if (!(ctx->owner->operation & SPI_HOLD_ON_CS)) {
877 		regs->EXE = MCHP_QMSPI_EXE_STOP;
878 		spi_context_cs_control(&data->ctx, false);
879 	}
880 
881 	spi_context_complete(&data->ctx, dev, xstatus);
882 
883 	if (data->cb) {
884 		data->cb(dev, xstatus, data->userdata);
885 	}
886 #endif /* CONFIG_SPI_ASYNC */
887 }
888 
889 #ifdef CONFIG_PM_DEVICE
890 /* If the application wants the QMSPI pins to be disabled in suspend it must
891  * define pinctr-1 values for each pin in the app/project DT overlay.
892  */
qmspi_xec_pm_action(const struct device * dev,enum pm_device_action action)893 static int qmspi_xec_pm_action(const struct device *dev, enum pm_device_action action)
894 {
895 	const struct spi_qmspi_config *devcfg = dev->config;
896 	int ret;
897 
898 	switch (action) {
899 	case PM_DEVICE_ACTION_RESUME:
900 		ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT);
901 		break;
902 	case PM_DEVICE_ACTION_SUSPEND:
903 		ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP);
904 		if (ret == -ENOENT) { /* pinctrl-1 does not exist */
905 			ret = 0;
906 		}
907 		break;
908 	default:
909 		ret = -ENOTSUP;
910 	}
911 
912 	return ret;
913 }
914 #endif /* CONFIG_PM_DEVICE */
915 
916 /*
917  * Called for each QMSPI controller instance
918  * Initialize QMSPI controller.
919  * Disable sleep control.
920  * Disable and clear interrupt status.
921  * Initialize SPI context.
922  * QMSPI will be fully configured and enabled when the transceive API
923  * is called.
924  */
qmspi_xec_init(const struct device * dev)925 static int qmspi_xec_init(const struct device *dev)
926 {
927 	const struct spi_qmspi_config *cfg = dev->config;
928 	struct spi_qmspi_data *qdata = dev->data;
929 	struct qmspi_regs *regs = cfg->regs;
930 	clock_control_subsys_t clkss = (clock_control_subsys_t)MCHP_XEC_PCR_CLK_PERIPH_FAST;
931 	int ret = 0;
932 
933 	qdata->base_freq_hz = 0u;
934 	qdata->qstatus = 0;
935 	qdata->np = cfg->width;
936 #ifdef CONFIG_SPI_ASYNC
937 	qdata->xfr_len = 0;
938 #endif
939 
940 	if (!cfg->clk_dev) {
941 		LOG_ERR("XEC QMSPI-LDMA clock device not configured");
942 		return -EINVAL;
943 	}
944 
945 	ret = clock_control_on(cfg->clk_dev, (clock_control_subsys_t)&cfg->clksrc);
946 	if (ret < 0) {
947 		LOG_ERR("XEC QMSPI-LDMA enable clock source error %d", ret);
948 		return ret;
949 	}
950 
951 	ret = clock_control_get_rate(cfg->clk_dev, clkss, &qdata->base_freq_hz);
952 	if (ret) {
953 		LOG_ERR("XEC QMSPI-LDMA clock get rate error %d", ret);
954 		return ret;
955 	}
956 
957 	/* controller in known state before enabling pins */
958 	qmspi_reset(regs);
959 	mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos);
960 
961 	ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
962 	if (ret != 0) {
963 		LOG_ERR("XEC QMSPI-LDMA pinctrl setup failed (%d)", ret);
964 		return ret;
965 	}
966 
967 	/* default SPI Mode 0 signalling */
968 	const struct spi_config spi_cfg = {
969 		.frequency = cfg->clock_freq,
970 		.operation = SPI_LINES_SINGLE | SPI_WORD_SET(8),
971 	};
972 
973 	ret = qmspi_configure(dev, &spi_cfg);
974 	if (ret) {
975 		LOG_ERR("XEC QMSPI-LDMA init configure failed (%d)", ret);
976 		return ret;
977 	}
978 
979 #ifdef CONFIG_SPI_ASYNC
980 	cfg->irq_config_func();
981 	mchp_xec_ecia_enable(cfg->girq, cfg->girq_pos);
982 #endif
983 
984 	spi_context_unlock_unconditionally(&qdata->ctx);
985 
986 	return 0;
987 }
988 
989 static DEVICE_API(spi, spi_qmspi_xec_driver_api) = {
990 	.transceive = qmspi_transceive_sync,
991 #ifdef CONFIG_SPI_ASYNC
992 	.transceive_async = qmspi_transceive_async,
993 #endif
994 #ifdef CONFIG_SPI_RTIO
995 	.iodev_submit = spi_rtio_iodev_default_submit,
996 #endif
997 	.release = qmspi_release,
998 };
999 
1000 #define XEC_QMSPI_CS_TIMING_VAL(a, b, c, d) (((a) & 0xFu) \
1001 					     | (((b) & 0xFu) << 8) \
1002 					     | (((c) & 0xFu) << 16) \
1003 					     | (((d) & 0xFu) << 24))
1004 
1005 #define XEC_QMSPI_TAPS_ADJ_VAL(a, b) (((a) & 0xffu) | (((b) & 0xffu) << 8))
1006 
1007 #define XEC_QMSPI_CS_TIMING(i) XEC_QMSPI_CS_TIMING_VAL(			\
1008 				DT_INST_PROP_OR(i, dcsckon, 6),		\
1009 				DT_INST_PROP_OR(i, dckcsoff, 4),	\
1010 				DT_INST_PROP_OR(i, dldh, 6),		\
1011 				DT_INST_PROP_OR(i, dcsda, 6))
1012 
1013 #define XEC_QMSPI_TAPS_ADJ(i) XEC_QMSPI_TAPS_ADJ_VAL(			\
1014 				DT_INST_PROP_OR(i, tctradj, 0),		\
1015 				DT_INST_PROP_OR(i, tsckadj, 0))
1016 
1017 #define XEC_QMSPI_GIRQ(i)						\
1018 	MCHP_XEC_ECIA_GIRQ(DT_INST_PROP_BY_IDX(i, girqs, 0))
1019 
1020 #define XEC_QMSPI_GIRQ_POS(i)						\
1021 	MCHP_XEC_ECIA_GIRQ_POS(DT_INST_PROP_BY_IDX(i, girqs, 0))
1022 
1023 #define XEC_QMSPI_NVIC_AGGR(i)						\
1024 	MCHP_XEC_ECIA_NVIC_AGGR(DT_INST_PROP_BY_IDX(i, girqs, 0))
1025 
1026 #define XEC_QMSPI_NVIC_DIRECT(i)					\
1027 	MCHP_XEC_ECIA_NVIC_DIRECT(DT_INST_PROP_BY_IDX(i, girqs, 0))
1028 
1029 #define XEC_QMSPI_PCR_INFO(i)						\
1030 	MCHP_XEC_PCR_SCR_ENCODE(DT_INST_CLOCKS_CELL(i, regidx),		\
1031 				DT_INST_CLOCKS_CELL(i, bitpos),		\
1032 				DT_INST_CLOCKS_CELL(i, domain))
1033 
1034 /*
1035  * The instance number, i is not related to block ID's rather the
1036  * order the DT tools process all DT files in a build.
1037  */
1038 #define QMSPI_XEC_DEVICE(i)						\
1039 									\
1040 	PINCTRL_DT_INST_DEFINE(i);					\
1041 									\
1042 	static void qmspi_xec_irq_config_func_##i(void)			\
1043 	{								\
1044 		IRQ_CONNECT(DT_INST_IRQN(i),				\
1045 			    DT_INST_IRQ(i, priority),			\
1046 			    qmspi_xec_isr,				\
1047 			    DEVICE_DT_INST_GET(i), 0);			\
1048 		irq_enable(DT_INST_IRQN(i));				\
1049 	}								\
1050 									\
1051 	static struct spi_qmspi_data qmspi_xec_data_##i = {		\
1052 		SPI_CONTEXT_INIT_LOCK(qmspi_xec_data_##i, ctx),		\
1053 		SPI_CONTEXT_INIT_SYNC(qmspi_xec_data_##i, ctx),		\
1054 	};								\
1055 	static const struct spi_qmspi_config qmspi_xec_config_##i = {	\
1056 		.regs = (struct qmspi_regs *) DT_INST_REG_ADDR(i),	\
1057 		.clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(i)),	\
1058 		.clksrc = { .pcr_info = XEC_QMSPI_PCR_INFO(i), },	\
1059 		.clock_freq = DT_INST_PROP_OR(i, clock_frequency, MHZ(12)), \
1060 		.cs1_freq = DT_INST_PROP_OR(i, cs1_freq, 0),		\
1061 		.cs_timing = XEC_QMSPI_CS_TIMING(i),			\
1062 		.taps_adj = XEC_QMSPI_TAPS_ADJ(i),			\
1063 		.girq = XEC_QMSPI_GIRQ(i),				\
1064 		.girq_pos = XEC_QMSPI_GIRQ_POS(i),			\
1065 		.girq_nvic_aggr = XEC_QMSPI_NVIC_AGGR(i),		\
1066 		.girq_nvic_direct = XEC_QMSPI_NVIC_DIRECT(i),		\
1067 		.irq_pri = DT_INST_IRQ(i, priority),			\
1068 		.chip_sel = DT_INST_PROP_OR(i, chip_select, 0),		\
1069 		.width = DT_INST_PROP_OR(0, lines, 1),			\
1070 		.irq_config_func = qmspi_xec_irq_config_func_##i,	\
1071 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(i),		\
1072 	};								\
1073 	PM_DEVICE_DT_INST_DEFINE(i, qmspi_xec_pm_action);		\
1074 	SPI_DEVICE_DT_INST_DEFINE(i, qmspi_xec_init,			\
1075 		PM_DEVICE_DT_INST_GET(i),				\
1076 		&qmspi_xec_data_##i, &qmspi_xec_config_##i,		\
1077 		POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,			\
1078 		&spi_qmspi_xec_driver_api);
1079 
1080 DT_INST_FOREACH_STATUS_OKAY(QMSPI_XEC_DEVICE)
1081