1 /*
2  * Copyright (c) 2021 Microchip Technology Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT microchip_xec_qmspi_ldma
8 
9 #include <errno.h>
10 #include <soc.h>
11 
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h>
15 #include <zephyr/drivers/gpio.h>
16 #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #include <zephyr/drivers/spi.h>
19 #include <zephyr/dt-bindings/clock/mchp_xec_pcr.h>
20 #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h>
21 #include <zephyr/irq.h>
22 #include <zephyr/pm/device.h>
23 #include <zephyr/sys/sys_io.h>
24 #include <zephyr/sys/util.h>
25 #include <zephyr/logging/log.h>
26 LOG_MODULE_REGISTER(spi_xec, CONFIG_SPI_LOG_LEVEL);
27 
28 #include "spi_context.h"
29 
30 /* #define MCHP_XEC_QMSPI_DEBUG 1 */
31 
32 /* MEC172x QMSPI controller SPI Mode 3 signalling has an anomaly where
33  * received data is shifted off the input line(s) improperly. Received
34  * data bytes will be left shifted by 1. Work-around for SPI Mode 3 is
35  * to sample input line(s) on same edge as output data is ready.
36  */
37 #define XEC_QMSPI_SPI_MODE_3_ANOMALY 1
38 
39 /* common clock control device node for all Microchip XEC chips */
40 #define MCHP_XEC_CLOCK_CONTROL_NODE	DT_NODELABEL(pcr)
41 
42 /* spin loops waiting for HW to clear soft reset bit */
43 #define XEC_QMSPI_SRST_LOOPS		16
44 
45 /* microseconds for busy wait and total wait interval */
46 #define XEC_QMSPI_WAIT_INTERVAL		8
47 #define XEC_QMSPI_WAIT_COUNT		64
48 
49 /* QSPI transfer and DMA done */
50 #define XEC_QSPI_HW_XFR_DMA_DONE	(MCHP_QMSPI_STS_DONE | MCHP_QMSPI_STS_DMA_DONE)
51 
52 /* QSPI hardware error status
53  * Misprogrammed control or descriptors (software error)
54  * Overflow TX FIFO
55  * Underflow RX FIFO
56  */
57 #define XEC_QSPI_HW_ERRORS		(MCHP_QMSPI_STS_PROG_ERR |	\
58 					 MCHP_QMSPI_STS_TXB_ERR |	\
59 					 MCHP_QMSPI_STS_RXB_ERR)
60 
61 #define XEC_QSPI_HW_ERRORS_LDMA		(MCHP_QMSPI_STS_LDMA_RX_ERR |	\
62 					 MCHP_QMSPI_STS_LDMA_TX_ERR)
63 
64 #define XEC_QSPI_HW_ERRORS_ALL		(XEC_QSPI_HW_ERRORS |		\
65 					 XEC_QSPI_HW_ERRORS_LDMA)
66 
67 #define XEC_QSPI_TIMEOUT_US		(100 * 1000) /* 100 ms */
68 
69 /* Device constant configuration parameters */
70 struct spi_qmspi_config {
71 	struct qmspi_regs *regs;
72 	const struct device *clk_dev;
73 	struct mchp_xec_pcr_clk_ctrl clksrc;
74 	uint32_t clock_freq;
75 	uint32_t cs1_freq;
76 	uint32_t cs_timing;
77 	uint16_t taps_adj;
78 	uint8_t girq;
79 	uint8_t girq_pos;
80 	uint8_t girq_nvic_aggr;
81 	uint8_t girq_nvic_direct;
82 	uint8_t irq_pri;
83 	uint8_t chip_sel;
84 	uint8_t width;	/* 0(half) 1(single), 2(dual), 4(quad) */
85 	uint8_t unused[1];
86 	const struct pinctrl_dev_config *pcfg;
87 	void (*irq_config_func)(void);
88 };
89 
90 #define XEC_QMSPI_XFR_FLAG_TX		BIT(0)
91 #define XEC_QMSPI_XFR_FLAG_RX		BIT(1)
92 
93 /* Device run time data */
94 struct spi_qmspi_data {
95 	struct spi_context ctx;
96 	uint32_t base_freq_hz;
97 	uint32_t spi_freq_hz;
98 	uint32_t qstatus;
99 	uint8_t np; /* number of data pins: 1, 2, or 4 */
100 #ifdef CONFIG_SPI_ASYNC
101 	spi_callback_t cb;
102 	void *userdata;
103 	size_t xfr_len;
104 #endif
105 	uint32_t tempbuf[2];
106 #ifdef MCHP_XEC_QMSPI_DEBUG
107 	uint32_t bufcnt_status;
108 	uint32_t rx_ldma_ctrl0;
109 	uint32_t tx_ldma_ctrl0;
110 	uint32_t qunits;
111 	uint32_t qxfru;
112 	uint32_t xfrlen;
113 
114 #endif
115 };
116 
xec_qmspi_spin_yield(int * counter,int max_count)117 static int xec_qmspi_spin_yield(int *counter, int max_count)
118 {
119 	*counter = *counter + 1;
120 
121 	if (*counter > max_count) {
122 		return -ETIMEDOUT;
123 	}
124 
125 	k_busy_wait(XEC_QMSPI_WAIT_INTERVAL);
126 
127 	return 0;
128 }
129 
130 /*
131  * reset QMSPI controller with save/restore of timing registers.
132  * Some QMSPI timing register may be modified by the Boot-ROM OTP
133  * values.
134  */
qmspi_reset(struct qmspi_regs * regs)135 static void qmspi_reset(struct qmspi_regs *regs)
136 {
137 	uint32_t taps[3];
138 	uint32_t malt1;
139 	uint32_t cstm;
140 	uint32_t mode;
141 	uint32_t cnt = XEC_QMSPI_SRST_LOOPS;
142 
143 	taps[0] = regs->TM_TAPS;
144 	taps[1] = regs->TM_TAPS_ADJ;
145 	taps[2] = regs->TM_TAPS_CTRL;
146 	malt1 = regs->MODE_ALT1;
147 	cstm = regs->CSTM;
148 	mode = regs->MODE;
149 	regs->MODE = MCHP_QMSPI_M_SRST;
150 	while (regs->MODE & MCHP_QMSPI_M_SRST) {
151 		if (cnt == 0) {
152 			break;
153 		}
154 		cnt--;
155 	}
156 	regs->MODE = 0;
157 	regs->MODE = mode & ~MCHP_QMSPI_M_ACTIVATE;
158 	regs->CSTM = cstm;
159 	regs->MODE_ALT1 = malt1;
160 	regs->TM_TAPS = taps[0];
161 	regs->TM_TAPS_ADJ = taps[1];
162 	regs->TM_TAPS_CTRL = taps[2];
163 }
164 
qmspi_encoded_fdiv(const struct device * dev,uint32_t freq_hz)165 static uint32_t qmspi_encoded_fdiv(const struct device *dev, uint32_t freq_hz)
166 {
167 	struct spi_qmspi_data *qdata = dev->data;
168 
169 	if (freq_hz == 0u) {
170 		return 0u; /* maximum frequency divider */
171 	}
172 
173 	return (qdata->base_freq_hz / freq_hz);
174 }
175 
176 /* Program QMSPI frequency divider field in the mode register.
177  * MEC172x QMSPI input clock source is the Fast Peripheral domain whose
178  * clock is controlled by the PCR turbo clock. 96 MHz if turbo mode
179  * enabled else 48 MHz. Query the clock control driver to get clock
180  * rate of fast peripheral domain. MEC172x QMSPI clock divider has
181  * been expanded to a 16-bit field encoded as:
182  * 0 = divide by 0x10000
183  * 1 to 0xffff = divide by this value.
184  */
qmspi_set_frequency(struct spi_qmspi_data * qdata,struct qmspi_regs * regs,uint32_t freq_hz)185 static int qmspi_set_frequency(struct spi_qmspi_data *qdata, struct qmspi_regs *regs,
186 			       uint32_t freq_hz)
187 {
188 	uint32_t clk = MCHP_QMSPI_INPUT_CLOCK_FREQ_HZ;
189 	uint32_t fdiv = 0u; /* maximum divider */
190 
191 	if (qdata->base_freq_hz) {
192 		clk = qdata->base_freq_hz;
193 	}
194 
195 	if (freq_hz) {
196 		fdiv = 1u;
197 		if (freq_hz < clk) {
198 			fdiv = clk / freq_hz;
199 		}
200 	}
201 
202 	regs->MODE = ((regs->MODE & ~(MCHP_QMSPI_M_FDIV_MASK)) |
203 		((fdiv << MCHP_QMSPI_M_FDIV_POS) & MCHP_QMSPI_M_FDIV_MASK));
204 
205 	if (!fdiv) {
206 		fdiv = 0x10000u;
207 	}
208 
209 	qdata->spi_freq_hz = clk / fdiv;
210 
211 	return 0;
212 }
213 
214 /*
215  * SPI signalling mode: CPOL and CPHA
216  * CPOL = 0 is clock idles low, 1 is clock idle high
217  * CPHA = 0 Transmitter changes data on trailing of preceding clock cycle.
218  *          Receiver samples data on leading edge of clock cycle.
219  *        1 Transmitter changes data on leading edge of current clock cycle.
220  *          Receiver samples data on the trailing edge of clock cycle.
221  * SPI Mode nomenclature:
222  * Mode CPOL CPHA
223  *  0     0    0
224  *  1     0    1
225  *  2     1    0
226  *  3     1    1
227  * QMSPI has three controls, CPOL, CPHA for output and CPHA for input.
228  * SPI frequency < 48MHz
229  *	Mode 0: CPOL=0 CHPA=0 (CHPA_MISO=0 and CHPA_MOSI=0)
230  *	Mode 3: CPOL=1 CHPA=1 (CHPA_MISO=1 and CHPA_MOSI=1)
231  * Data sheet recommends when QMSPI set at max. SPI frequency (48MHz).
232  * SPI frequency == 48MHz sample and change data on same edge.
233  *  Mode 0: CPOL=0 CHPA=0 (CHPA_MISO=1 and CHPA_MOSI=0)
234  *  Mode 3: CPOL=1 CHPA=1 (CHPA_MISO=0 and CHPA_MOSI=1)
235  *
236  * There is an anomaly in MEC172x for SPI signalling mode 3. We must
237  * set CHPA_MISO=0 for SPI Mode 3 at all frequencies.
238  */
239 
240 const uint8_t smode_tbl[4] = {
241 	0x00u, 0x06u, 0x01u,
242 #ifdef XEC_QMSPI_SPI_MODE_3_ANOMALY
243 	0x03u, /* CPOL=1, CPHA_MOSI=1, CPHA_MISO=0 */
244 #else
245 	0x07u, /* CPOL=1, CPHA_MOSI=1, CPHA_MISO=1 */
246 #endif
247 };
248 
249 const uint8_t smode48_tbl[4] = {
250 	0x04u, 0x02u, 0x05u, 0x03u
251 };
252 
qmspi_set_signalling_mode(struct spi_qmspi_data * qdata,struct qmspi_regs * regs,uint32_t smode)253 static void qmspi_set_signalling_mode(struct spi_qmspi_data *qdata,
254 				      struct qmspi_regs *regs, uint32_t smode)
255 {
256 	const uint8_t *ptbl;
257 	uint32_t m;
258 
259 	ptbl = smode_tbl;
260 	if (qdata->spi_freq_hz >= MHZ(48)) {
261 		ptbl = smode48_tbl;
262 	}
263 
264 	m = (uint32_t)ptbl[smode & 0x03];
265 	regs->MODE = (regs->MODE & ~(MCHP_QMSPI_M_SIG_MASK))
266 		     | (m << MCHP_QMSPI_M_SIG_POS);
267 }
268 
269 #ifdef CONFIG_SPI_EXTENDED_MODES
270 /*
271  * QMSPI HW support single, dual, and quad.
272  * Return QMSPI Control/Descriptor register encoded value.
273  */
encode_lines(const struct spi_config * config)274 static uint32_t encode_lines(const struct spi_config *config)
275 {
276 	uint32_t qlines;
277 
278 	switch (config->operation & SPI_LINES_MASK) {
279 	case SPI_LINES_SINGLE:
280 		qlines = MCHP_QMSPI_C_IFM_1X;
281 		break;
282 #if DT_INST_PROP(0, lines) > 1
283 	case SPI_LINES_DUAL:
284 		qlines = MCHP_QMSPI_C_IFM_2X;
285 		break;
286 #endif
287 #if DT_INST_PROP(0, lines) > 2
288 	case SPI_LINES_QUAD:
289 		qlines = MCHP_QMSPI_C_IFM_4X;
290 		break;
291 #endif
292 	default:
293 		qlines = 0xffu;
294 	}
295 
296 	return qlines;
297 }
298 
npins_from_spi_config(const struct spi_config * config)299 static uint8_t npins_from_spi_config(const struct spi_config *config)
300 {
301 	switch (config->operation & SPI_LINES_MASK) {
302 	case SPI_LINES_DUAL:
303 		return 2u;
304 	case SPI_LINES_QUAD:
305 		return 4u;
306 	default:
307 		return 1u;
308 	}
309 }
310 #endif /* CONFIG_SPI_EXTENDED_MODES */
311 
spi_feature_support(const struct spi_config * config)312 static int spi_feature_support(const struct spi_config *config)
313 {
314 	if (config->operation & (SPI_TRANSFER_LSB | SPI_OP_MODE_SLAVE | SPI_MODE_LOOP)) {
315 		LOG_ERR("Driver does not support LSB first, slave, or loop back");
316 		return -ENOTSUP;
317 	}
318 
319 	if (config->operation & SPI_CS_ACTIVE_HIGH) {
320 		LOG_ERR("CS active high not supported");
321 		return -ENOTSUP;
322 	}
323 
324 	if (config->operation & SPI_LOCK_ON) {
325 		LOG_ERR("Lock On not supported");
326 		return -ENOTSUP;
327 	}
328 
329 	if (SPI_WORD_SIZE_GET(config->operation) != 8) {
330 		LOG_ERR("Word size != 8 not supported");
331 		return -ENOTSUP;
332 	}
333 
334 	return 0;
335 }
336 
337 /* Configure QMSPI.
338  * NOTE: QMSPI Shared SPI port has two chip selects.
339  * Private SPI and internal SPI ports support one chip select.
340  * Hardware supports dual and quad I/O. Dual and quad are allowed
341  * if SPI extended mode is enabled at build time. User must
342  * provide pin configuration via DTS.
343  */
qmspi_configure(const struct device * dev,const struct spi_config * config)344 static int qmspi_configure(const struct device *dev,
345 			   const struct spi_config *config)
346 {
347 	const struct spi_qmspi_config *cfg = dev->config;
348 	struct spi_qmspi_data *qdata = dev->data;
349 	struct qmspi_regs *regs = cfg->regs;
350 	uint32_t smode;
351 	int ret;
352 
353 	if (!config) {
354 		return -EINVAL;
355 	}
356 
357 	if (spi_context_configured(&qdata->ctx, config)) {
358 		return 0;
359 	}
360 
361 	qmspi_set_frequency(qdata, regs, config->frequency);
362 
363 	/* check new configuration */
364 	ret = spi_feature_support(config);
365 	if (ret) {
366 		return ret;
367 	}
368 
369 #ifdef CONFIG_SPI_EXTENDED_MODES
370 	smode = encode_lines(config);
371 	if (smode == 0xff) {
372 		LOG_ERR("Requested lines mode not supported");
373 		return -ENOTSUP;
374 	}
375 	qdata->np = npins_from_spi_config(config);
376 #else
377 	smode = MCHP_QMSPI_C_IFM_1X;
378 	qdata->np = 1u;
379 #endif
380 	regs->CTRL = smode;
381 
382 	smode = 0;
383 	if ((config->operation & SPI_MODE_CPHA) != 0U) {
384 		smode |= BIT(0);
385 	}
386 
387 	if ((config->operation & SPI_MODE_CPOL) != 0U) {
388 		smode |= BIT(1);
389 	}
390 
391 	qmspi_set_signalling_mode(qdata, regs, smode);
392 
393 	/* chip select */
394 	smode = regs->MODE & ~(MCHP_QMSPI_M_CS_MASK);
395 	if (cfg->chip_sel == 0) {
396 		smode |= MCHP_QMSPI_M_CS0;
397 	} else {
398 		smode |= MCHP_QMSPI_M_CS1;
399 	}
400 	regs->MODE = smode;
401 
402 	/* chip select timing and TAPS adjust */
403 	regs->CSTM = cfg->cs_timing;
404 	regs->TM_TAPS_ADJ = cfg->taps_adj;
405 
406 	/* CS1 alternate mode (frequency) */
407 	regs->MODE_ALT1 = 0;
408 	if (cfg->cs1_freq) {
409 		uint32_t fdiv = qmspi_encoded_fdiv(dev, cfg->cs1_freq);
410 
411 		regs->MODE_ALT1 = (fdiv << MCHP_QMSPI_MA1_CS1_CDIV_POS) &
412 				  MCHP_QMSPI_MA1_CS1_CDIV_MSK;
413 		regs->MODE_ALT1 |= MCHP_QMSPI_MA1_CS1_CDIV_EN;
414 	}
415 
416 	qdata->ctx.config = config;
417 
418 	regs->MODE |= MCHP_QMSPI_M_ACTIVATE;
419 
420 	return 0;
421 }
422 
encode_npins(uint8_t npins)423 static uint32_t encode_npins(uint8_t npins)
424 {
425 	if (npins == 4) {
426 		return MCHP_QMSPI_C_IFM_4X;
427 	} else if (npins == 2) {
428 		return MCHP_QMSPI_C_IFM_2X;
429 	} else {
430 		return MCHP_QMSPI_C_IFM_1X;
431 	}
432 }
433 
434 /* Common controller transfer initialziation using Local-DMA.
435  * Full-duplex: controller configured to transmit and receive simultaneouly.
436  * Half-duplex(dual/quad): User may only specify TX or RX buffer sets.
437  * Passing both buffers sets is reported as an error.
438  */
qmspi_xfr_cm_init(const struct device * dev,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)439 static inline int qmspi_xfr_cm_init(const struct device *dev,
440 				    const struct spi_buf_set *tx_bufs,
441 				    const struct spi_buf_set *rx_bufs)
442 {
443 	const struct spi_qmspi_config *devcfg = dev->config;
444 	struct spi_qmspi_data *qdata = dev->data;
445 	struct qmspi_regs *regs = devcfg->regs;
446 
447 	regs->IEN = 0;
448 	regs->EXE = MCHP_QMSPI_EXE_CLR_FIFOS;
449 	regs->LDMA_RX_DESCR_BM = 0;
450 	regs->LDMA_TX_DESCR_BM = 0;
451 	regs->MODE &= ~(MCHP_QMSPI_M_LDMA_TX_EN | MCHP_QMSPI_M_LDMA_RX_EN);
452 	regs->STS = 0xffffffffu;
453 	regs->CTRL = encode_npins(qdata->np);
454 
455 	qdata->qstatus = 0;
456 
457 #ifdef CONFIG_SPI_EXTENDED_MODES
458 	if (qdata->np != 1) {
459 		if (tx_bufs && rx_bufs) {
460 			LOG_ERR("Cannot specify both TX and RX buffers in half-duplex(dual/quad)");
461 			return -EPROTONOSUPPORT;
462 		}
463 	}
464 #endif
465 
466 	return 0;
467 }
468 
469 /* QMSPI Local-DMA transfer configuration:
470  * Support full and half(dual/quad) duplex transfers.
471  * Requires caller to have checked that only one direction was setup
472  * in the SPI context: TX or RX not both. (refer to qmspi_xfr_cm_init)
473  * Supports spi_buf's where data pointer is NULL and length non-zero.
474  * These buffers are used as TX tri-state I/O clock only generation or
475  * RX data discard for certain SPI command protocols using dual/quad I/O.
476  * 1. Get largest contiguous data size from SPI context.
477  * 2. If the SPI TX context has a non-zero length configure Local-DMA TX
478  *    channel 1 for contiguous data size. If TX context has valid buffer
479  *    configure channel to use context buffer with address increment.
480  *    If the TX buffer pointer is NULL interpret byte length as the number
481  *    of clocks to generate with output line(s) tri-stated. NOTE: The controller
482  *    must be configured with TX disabled to not drive output line(s) during
483  *    clock generation. Also, no data should be written to TX FIFO. The unit
484  *    size can be set to bits. The number of units to transfer must be computed
485  *    based upon the number of output pins in the IOM field: full-duplex is one
486  *    bit per clock, dual is 2 bits per clock, and quad is 4 bits per clock.
487  *    For example, if I/O lines is 4 (quad) meaning 4 bits per clock and the
488  *    user wants 7 clocks then the number of bit units is 4 * 7 = 28.
489  * 3. If instead, the SPI RX context has a non-zero length configure Local-DMA
490  *    RX channel 1 for the contiguous data size. If RX context has a valid
491  *    buffer configure channel to use buffer with address increment else
492  *    configure channel for driver data temporary buffer without address
493  *    increment.
494  * 4. Update QMSPI Control register.
495  */
qmspi_ldma_encode_unit_size(uint32_t maddr,size_t len)496 static uint32_t qmspi_ldma_encode_unit_size(uint32_t maddr, size_t len)
497 {
498 	uint8_t temp = (maddr | (uint32_t)len) & 0x3u;
499 
500 	if (temp == 0) {
501 		return MCHP_QMSPI_LDC_ASZ_4;
502 	} else if (temp == 2) {
503 		return MCHP_QMSPI_LDC_ASZ_2;
504 	} else {
505 		return MCHP_QMSPI_LDC_ASZ_1;
506 	}
507 }
508 
qmspi_unit_size(size_t xfrlen)509 static uint32_t qmspi_unit_size(size_t xfrlen)
510 {
511 	if ((xfrlen & 0xfu) == 0u) {
512 		return 16u;
513 	} else if ((xfrlen & 0x3u) == 0u) {
514 		return 4u;
515 	} else {
516 		return 1u;
517 	}
518 }
519 
qmspi_encode_unit_size(uint32_t units_in_bytes)520 static uint32_t qmspi_encode_unit_size(uint32_t units_in_bytes)
521 {
522 	if (units_in_bytes == 16u) {
523 		return MCHP_QMSPI_C_XFR_UNITS_16;
524 	} else if (units_in_bytes == 4u) {
525 		return MCHP_QMSPI_C_XFR_UNITS_4;
526 	} else {
527 		return MCHP_QMSPI_C_XFR_UNITS_1;
528 	}
529 }
530 
q_ldma_cfg(const struct device * dev)531 static size_t q_ldma_cfg(const struct device *dev)
532 {
533 	const struct spi_qmspi_config *devcfg = dev->config;
534 	struct spi_qmspi_data *qdata = dev->data;
535 	struct spi_context *ctx = &qdata->ctx;
536 	struct qmspi_regs *regs = devcfg->regs;
537 
538 	size_t ctx_xfr_len = spi_context_max_continuous_chunk(ctx);
539 	uint32_t ctrl, ldctrl, mstart, qunits, qxfru, xfrlen;
540 
541 	regs->EXE = MCHP_QMSPI_EXE_CLR_FIFOS;
542 	regs->MODE &= ~(MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN);
543 	regs->LDRX[0].CTRL = 0;
544 	regs->LDRX[0].MSTART = 0;
545 	regs->LDRX[0].LEN = 0;
546 	regs->LDTX[0].CTRL = 0;
547 	regs->LDTX[0].MSTART = 0;
548 	regs->LDTX[0].LEN = 0;
549 
550 	if (ctx_xfr_len == 0) {
551 		return 0;
552 	}
553 
554 	qunits = qmspi_unit_size(ctx_xfr_len);
555 	ctrl = qmspi_encode_unit_size(qunits);
556 	qxfru = ctx_xfr_len / qunits;
557 	if (qxfru > 0x7fffu) {
558 		qxfru = 0x7fffu;
559 	}
560 	ctrl |= (qxfru << MCHP_QMSPI_C_XFR_NUNITS_POS);
561 	xfrlen = qxfru * qunits;
562 
563 #ifdef MCHP_XEC_QMSPI_DEBUG
564 	qdata->qunits = qunits;
565 	qdata->qxfru = qxfru;
566 	qdata->xfrlen = xfrlen;
567 #endif
568 	if (spi_context_tx_buf_on(ctx)) {
569 		mstart = (uint32_t)ctx->tx_buf;
570 		ctrl |= MCHP_QMSPI_C_TX_DATA | MCHP_QMSPI_C_TX_LDMA_CH0;
571 		ldctrl = qmspi_ldma_encode_unit_size(mstart, xfrlen);
572 		ldctrl |= MCHP_QMSPI_LDC_INCR_EN | MCHP_QMSPI_LDC_EN;
573 		regs->MODE |= MCHP_QMSPI_M_LDMA_TX_EN;
574 		regs->LDTX[0].LEN = xfrlen;
575 		regs->LDTX[0].MSTART = mstart;
576 		regs->LDTX[0].CTRL = ldctrl;
577 	}
578 
579 	if (spi_context_rx_buf_on(ctx)) {
580 		mstart = (uint32_t)ctx->rx_buf;
581 		ctrl |= MCHP_QMSPI_C_RX_LDMA_CH0 | MCHP_QMSPI_C_RX_EN;
582 		ldctrl = MCHP_QMSPI_LDC_EN | MCHP_QMSPI_LDC_INCR_EN;
583 		ldctrl |= qmspi_ldma_encode_unit_size(mstart, xfrlen);
584 		regs->MODE |= MCHP_QMSPI_M_LDMA_RX_EN;
585 		regs->LDRX[0].LEN = xfrlen;
586 		regs->LDRX[0].MSTART = mstart;
587 		regs->LDRX[0].CTRL = ldctrl;
588 	}
589 
590 	regs->CTRL = (regs->CTRL & 0x3u) | ctrl;
591 
592 	return xfrlen;
593 }
594 
595 /* Start and wait for QMSPI synchronous transfer(s) to complete.
596  * Initialize QMSPI controller for Local-DMA operation.
597  * Iterate over SPI context with non-zero TX or RX data lengths.
598  *   1. Configure QMSPI Control register and Local-DMA channel(s)
599  *   2. Clear QMSPI status
600  *   3. Start QMSPI transfer
601  *   4. Poll QMSPI status for transfer done and DMA done with timeout.
602  *   5. Hardware anomaly work-around: Poll with timeout QMSPI Local-DMA
603  *      TX and RX channels until hardware clears both channel enables.
604  *      This indicates hardware is really done with transfer to/from memory.
605  *   6. Update SPI context with amount of data transmitted and received.
606  * If SPI configuration hold chip select on flag is not set then instruct
607  * QMSPI to de-assert chip select.
608  * Set SPI context as complete
609  */
qmspi_xfr_sync(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)610 static int qmspi_xfr_sync(const struct device *dev,
611 			  const struct spi_config *spi_cfg,
612 			  const struct spi_buf_set *tx_bufs,
613 			  const struct spi_buf_set *rx_bufs)
614 {
615 	const struct spi_qmspi_config *devcfg = dev->config;
616 	struct spi_qmspi_data *qdata = dev->data;
617 	struct spi_context *ctx = &qdata->ctx;
618 	struct qmspi_regs *regs = devcfg->regs;
619 	size_t xfr_len;
620 
621 	int ret = qmspi_xfr_cm_init(dev, tx_bufs, rx_bufs);
622 
623 	if (ret) {
624 		return ret;
625 	}
626 
627 	while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)) {
628 		xfr_len = q_ldma_cfg(dev);
629 		regs->STS = 0xffffffffu;
630 		regs->EXE = MCHP_QMSPI_EXE_START;
631 
632 #ifdef MCHP_XEC_QMSPI_DEBUG
633 		uint32_t temp = regs->STS;
634 
635 		while (!(temp & MCHP_QMSPI_STS_DONE)) {
636 			temp = regs->STS;
637 		}
638 		qdata->qstatus = temp;
639 		qdata->bufcnt_status = regs->BCNT_STS;
640 		qdata->rx_ldma_ctrl0 = regs->LDRX[0].CTRL;
641 		qdata->tx_ldma_ctrl0 = regs->LDTX[0].CTRL;
642 #else
643 		uint32_t wcnt = 0;
644 
645 		qdata->qstatus = regs->STS;
646 		while (!(qdata->qstatus & MCHP_QMSPI_STS_DONE)) {
647 			k_busy_wait(1u);
648 			if (++wcnt > XEC_QSPI_TIMEOUT_US) {
649 				regs->EXE = MCHP_QMSPI_EXE_STOP;
650 				return -ETIMEDOUT;
651 			}
652 			qdata->qstatus = regs->STS;
653 		}
654 #endif
655 		spi_context_update_tx(ctx, 1, xfr_len);
656 		spi_context_update_rx(ctx, 1, xfr_len);
657 	}
658 
659 	if (!(spi_cfg->operation & SPI_HOLD_ON_CS)) {
660 		regs->EXE = MCHP_QMSPI_EXE_STOP;
661 	}
662 
663 	spi_context_complete(ctx, dev, 0);
664 
665 	return 0;
666 }
667 
668 #ifdef CONFIG_SPI_ASYNC
669 /* Configure QMSPI such that QMSPI transfer FSM and LDMA FSM are synchronized.
670  * Transfer length must be programmed into control/descriptor register(s) and
671  * LDMA register(s). LDMA override length bit must NOT be set.
672  */
qmspi_xfr_start_async(const struct device * dev,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)673 static int qmspi_xfr_start_async(const struct device *dev, const struct spi_buf_set *tx_bufs,
674 				 const struct spi_buf_set *rx_bufs)
675 {
676 	const struct spi_qmspi_config *devcfg = dev->config;
677 	struct spi_qmspi_data *qdata = dev->data;
678 	struct qmspi_regs *regs = devcfg->regs;
679 	int ret;
680 
681 	ret = qmspi_xfr_cm_init(dev, tx_bufs, rx_bufs);
682 	if (ret) {
683 		return ret;
684 	}
685 
686 	qdata->xfr_len = q_ldma_cfg(dev);
687 	if (!qdata->xfr_len) {
688 		return 0; /* nothing to do */
689 	}
690 
691 	regs->STS = 0xffffffffu;
692 	regs->EXE = MCHP_QMSPI_EXE_START;
693 	regs->IEN = MCHP_QMSPI_IEN_XFR_DONE | MCHP_QMSPI_IEN_PROG_ERR
694 		    | MCHP_QMSPI_IEN_LDMA_RX_ERR | MCHP_QMSPI_IEN_LDMA_TX_ERR;
695 
696 	return 0;
697 }
698 
699 /* Wrapper to start asynchronous (interrupts enabled) SPI transaction */
qmspi_xfr_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)700 static int qmspi_xfr_async(const struct device *dev,
701 			   const struct spi_config *config,
702 			   const struct spi_buf_set *tx_bufs,
703 			   const struct spi_buf_set *rx_bufs)
704 {
705 	struct spi_qmspi_data *qdata = dev->data;
706 	int err = 0;
707 
708 	qdata->qstatus = 0;
709 	qdata->xfr_len = 0;
710 
711 	err = qmspi_xfr_start_async(dev, tx_bufs, rx_bufs);
712 
713 	return err;
714 }
715 #endif /* CONFIG_SPI_ASYNC */
716 
717 /* Start (a)synchronous transaction using QMSPI Local-DMA */
qmspi_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * user_data)718 static int qmspi_transceive(const struct device *dev,
719 			    const struct spi_config *config,
720 			    const struct spi_buf_set *tx_bufs,
721 			    const struct spi_buf_set *rx_bufs,
722 			    bool asynchronous,
723 			    spi_callback_t cb,
724 			    void *user_data)
725 {
726 	struct spi_qmspi_data *qdata = dev->data;
727 	struct spi_context *ctx = &qdata->ctx;
728 	int err = 0;
729 
730 	if (!config) {
731 		return -EINVAL;
732 	}
733 
734 	if (!tx_bufs && !rx_bufs) {
735 		return 0;
736 	}
737 
738 	spi_context_lock(&qdata->ctx, asynchronous, cb, user_data, config);
739 
740 	err = qmspi_configure(dev, config);
741 	if (err != 0) {
742 		spi_context_release(ctx, err);
743 		return err;
744 	}
745 
746 	spi_context_cs_control(ctx, true);
747 	spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
748 
749 #ifdef CONFIG_SPI_ASYNC
750 	if (asynchronous) {
751 		qdata->cb = cb;
752 		qdata->userdata = user_data;
753 		err = qmspi_xfr_async(dev, config, tx_bufs, rx_bufs);
754 	} else {
755 		err = qmspi_xfr_sync(dev, config, tx_bufs, rx_bufs);
756 	}
757 #else
758 	err = qmspi_xfr_sync(dev, config, tx_bufs, rx_bufs);
759 #endif
760 	if (err) { /* de-assert CS# and give semaphore */
761 		spi_context_unlock_unconditionally(ctx);
762 		return err;
763 	}
764 
765 	if (asynchronous) {
766 		return err;
767 	}
768 
769 	err = spi_context_wait_for_completion(ctx);
770 	if (!(config->operation & SPI_HOLD_ON_CS)) {
771 		spi_context_cs_control(ctx, false);
772 	}
773 	spi_context_release(ctx, err);
774 
775 	return err;
776 }
777 
qmspi_transceive_sync(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)778 static int qmspi_transceive_sync(const struct device *dev,
779 				 const struct spi_config *config,
780 				 const struct spi_buf_set *tx_bufs,
781 				 const struct spi_buf_set *rx_bufs)
782 {
783 	return qmspi_transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
784 }
785 
786 #ifdef CONFIG_SPI_ASYNC
787 
qmspi_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)788 static int qmspi_transceive_async(const struct device *dev,
789 				  const struct spi_config *config,
790 				  const struct spi_buf_set *tx_bufs,
791 				  const struct spi_buf_set *rx_bufs,
792 				  spi_callback_t cb,
793 				  void *userdata)
794 {
795 	return qmspi_transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
796 }
797 #endif /* CONFIG_SPI_ASYNC */
798 
qmspi_release(const struct device * dev,const struct spi_config * config)799 static int qmspi_release(const struct device *dev,
800 			 const struct spi_config *config)
801 {
802 	struct spi_qmspi_data *data = dev->data;
803 	const struct spi_qmspi_config *cfg = dev->config;
804 	struct qmspi_regs *regs = cfg->regs;
805 	int ret = 0;
806 	int counter = 0;
807 
808 	if (regs->STS & MCHP_QMSPI_STS_ACTIVE_RO) {
809 		/* Force CS# to de-assert on next unit boundary */
810 		regs->EXE = MCHP_QMSPI_EXE_STOP;
811 		while (regs->STS & MCHP_QMSPI_STS_ACTIVE_RO) {
812 			ret = xec_qmspi_spin_yield(&counter, XEC_QMSPI_WAIT_COUNT);
813 			if (ret != 0) {
814 				break;
815 			}
816 		}
817 	}
818 
819 	spi_context_unlock_unconditionally(&data->ctx);
820 
821 	return ret;
822 }
823 
824 /* QMSPI interrupt handler called by Zephyr ISR
825  * All transfers use QMSPI Local-DMA specified by the Control register.
826  * QMSPI descriptor mode not used.
827  * Full-duplex always uses LDMA TX channel 0 and RX channel 0
828  * Half-duplex(dual/quad) use one of TX channel 0 or RX channel 0
829  */
qmspi_xec_isr(const struct device * dev)830 void qmspi_xec_isr(const struct device *dev)
831 {
832 	const struct spi_qmspi_config *cfg = dev->config;
833 	struct spi_qmspi_data *data = dev->data;
834 	struct qmspi_regs *regs = cfg->regs;
835 	uint32_t qstatus = regs->STS;
836 #ifdef CONFIG_SPI_ASYNC
837 	struct spi_context *ctx = &data->ctx;
838 	int xstatus = 0;
839 #endif
840 
841 	regs->IEN = 0;
842 	data->qstatus = qstatus;
843 	regs->STS = MCHP_QMSPI_STS_RW1C_MASK;
844 	mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos);
845 
846 #ifdef CONFIG_SPI_ASYNC
847 	if (qstatus & XEC_QSPI_HW_ERRORS_ALL) {
848 		xstatus = -EIO;
849 		data->qstatus |= BIT(7);
850 		regs->EXE = MCHP_QMSPI_EXE_STOP;
851 		spi_context_cs_control(ctx, false);
852 		spi_context_complete(ctx, dev, xstatus);
853 		if (data->cb) {
854 			data->cb(dev, xstatus, data->userdata);
855 		}
856 		return;
857 	}
858 
859 	/* Clear Local-DMA enables in Mode and Control registers */
860 	regs->MODE &= ~(MCHP_QMSPI_M_LDMA_RX_EN | MCHP_QMSPI_M_LDMA_TX_EN);
861 	regs->CTRL &= MCHP_QMSPI_C_IFM_MASK;
862 
863 	spi_context_update_tx(ctx, 1, data->xfr_len);
864 	spi_context_update_rx(ctx, 1, data->xfr_len);
865 
866 	data->xfr_len = q_ldma_cfg(dev);
867 	if (data->xfr_len) {
868 		regs->STS = 0xffffffffu;
869 		regs->EXE = MCHP_QMSPI_EXE_START;
870 		regs->IEN = MCHP_QMSPI_IEN_XFR_DONE | MCHP_QMSPI_IEN_PROG_ERR
871 			    | MCHP_QMSPI_IEN_LDMA_RX_ERR | MCHP_QMSPI_IEN_LDMA_TX_ERR;
872 		return;
873 	}
874 
875 	if (!(ctx->owner->operation & SPI_HOLD_ON_CS)) {
876 		regs->EXE = MCHP_QMSPI_EXE_STOP;
877 		spi_context_cs_control(&data->ctx, false);
878 	}
879 
880 	spi_context_complete(&data->ctx, dev, xstatus);
881 
882 	if (data->cb) {
883 		data->cb(dev, xstatus, data->userdata);
884 	}
885 #endif /* CONFIG_SPI_ASYNC */
886 }
887 
888 #ifdef CONFIG_PM_DEVICE
889 /* If the application wants the QMSPI pins to be disabled in suspend it must
890  * define pinctr-1 values for each pin in the app/project DT overlay.
891  */
qmspi_xec_pm_action(const struct device * dev,enum pm_device_action action)892 static int qmspi_xec_pm_action(const struct device *dev, enum pm_device_action action)
893 {
894 	const struct spi_qmspi_config *devcfg = dev->config;
895 	int ret;
896 
897 	switch (action) {
898 	case PM_DEVICE_ACTION_RESUME:
899 		ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT);
900 		break;
901 	case PM_DEVICE_ACTION_SUSPEND:
902 		ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP);
903 		if (ret == -ENOENT) { /* pinctrl-1 does not exist */
904 			ret = 0;
905 		}
906 		break;
907 	default:
908 		ret = -ENOTSUP;
909 	}
910 
911 	return ret;
912 }
913 #endif /* CONFIG_PM_DEVICE */
914 
915 /*
916  * Called for each QMSPI controller instance
917  * Initialize QMSPI controller.
918  * Disable sleep control.
919  * Disable and clear interrupt status.
920  * Initialize SPI context.
921  * QMSPI will be fully configured and enabled when the transceive API
922  * is called.
923  */
qmspi_xec_init(const struct device * dev)924 static int qmspi_xec_init(const struct device *dev)
925 {
926 	const struct spi_qmspi_config *cfg = dev->config;
927 	struct spi_qmspi_data *qdata = dev->data;
928 	struct qmspi_regs *regs = cfg->regs;
929 	clock_control_subsys_t clkss = (clock_control_subsys_t)MCHP_XEC_PCR_CLK_PERIPH_FAST;
930 	int ret = 0;
931 
932 	qdata->base_freq_hz = 0u;
933 	qdata->qstatus = 0;
934 	qdata->np = cfg->width;
935 #ifdef CONFIG_SPI_ASYNC
936 	qdata->xfr_len = 0;
937 #endif
938 
939 	if (!cfg->clk_dev) {
940 		LOG_ERR("XEC QMSPI-LDMA clock device not configured");
941 		return -EINVAL;
942 	}
943 
944 	ret = clock_control_on(cfg->clk_dev, (clock_control_subsys_t)&cfg->clksrc);
945 	if (ret < 0) {
946 		LOG_ERR("XEC QMSPI-LDMA enable clock source error %d", ret);
947 		return ret;
948 	}
949 
950 	ret = clock_control_get_rate(cfg->clk_dev, clkss, &qdata->base_freq_hz);
951 	if (ret) {
952 		LOG_ERR("XEC QMSPI-LDMA clock get rate error %d", ret);
953 		return ret;
954 	}
955 
956 	/* controller in known state before enabling pins */
957 	qmspi_reset(regs);
958 	mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos);
959 
960 	ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
961 	if (ret != 0) {
962 		LOG_ERR("XEC QMSPI-LDMA pinctrl setup failed (%d)", ret);
963 		return ret;
964 	}
965 
966 	/* default SPI Mode 0 signalling */
967 	const struct spi_config spi_cfg = {
968 		.frequency = cfg->clock_freq,
969 		.operation = SPI_LINES_SINGLE | SPI_WORD_SET(8),
970 	};
971 
972 	ret = qmspi_configure(dev, &spi_cfg);
973 	if (ret) {
974 		LOG_ERR("XEC QMSPI-LDMA init configure failed (%d)", ret);
975 		return ret;
976 	}
977 
978 #ifdef CONFIG_SPI_ASYNC
979 	cfg->irq_config_func();
980 	mchp_xec_ecia_enable(cfg->girq, cfg->girq_pos);
981 #endif
982 
983 	spi_context_unlock_unconditionally(&qdata->ctx);
984 
985 	return 0;
986 }
987 
988 static const struct spi_driver_api spi_qmspi_xec_driver_api = {
989 	.transceive = qmspi_transceive_sync,
990 #ifdef CONFIG_SPI_ASYNC
991 	.transceive_async = qmspi_transceive_async,
992 #endif
993 	.release = qmspi_release,
994 };
995 
996 #define XEC_QMSPI_CS_TIMING_VAL(a, b, c, d) (((a) & 0xFu) \
997 					     | (((b) & 0xFu) << 8) \
998 					     | (((c) & 0xFu) << 16) \
999 					     | (((d) & 0xFu) << 24))
1000 
1001 #define XEC_QMSPI_TAPS_ADJ_VAL(a, b) (((a) & 0xffu) | (((b) & 0xffu) << 8))
1002 
1003 #define XEC_QMSPI_CS_TIMING(i) XEC_QMSPI_CS_TIMING_VAL(			\
1004 				DT_INST_PROP_OR(i, dcsckon, 6),		\
1005 				DT_INST_PROP_OR(i, dckcsoff, 4),	\
1006 				DT_INST_PROP_OR(i, dldh, 6),		\
1007 				DT_INST_PROP_OR(i, dcsda, 6))
1008 
1009 #define XEC_QMSPI_TAPS_ADJ(i) XEC_QMSPI_TAPS_ADJ_VAL(			\
1010 				DT_INST_PROP_OR(i, tctradj, 0),		\
1011 				DT_INST_PROP_OR(i, tsckadj, 0))
1012 
1013 #define XEC_QMSPI_GIRQ(i)						\
1014 	MCHP_XEC_ECIA_GIRQ(DT_INST_PROP_BY_IDX(i, girqs, 0))
1015 
1016 #define XEC_QMSPI_GIRQ_POS(i)						\
1017 	MCHP_XEC_ECIA_GIRQ_POS(DT_INST_PROP_BY_IDX(i, girqs, 0))
1018 
1019 #define XEC_QMSPI_NVIC_AGGR(i)						\
1020 	MCHP_XEC_ECIA_NVIC_AGGR(DT_INST_PROP_BY_IDX(i, girqs, 0))
1021 
1022 #define XEC_QMSPI_NVIC_DIRECT(i)					\
1023 	MCHP_XEC_ECIA_NVIC_DIRECT(DT_INST_PROP_BY_IDX(i, girqs, 0))
1024 
1025 #define XEC_QMSPI_PCR_INFO(i)						\
1026 	MCHP_XEC_PCR_SCR_ENCODE(DT_INST_CLOCKS_CELL(i, regidx),		\
1027 				DT_INST_CLOCKS_CELL(i, bitpos),		\
1028 				DT_INST_CLOCKS_CELL(i, domain))
1029 
1030 /*
1031  * The instance number, i is not related to block ID's rather the
1032  * order the DT tools process all DT files in a build.
1033  */
1034 #define QMSPI_XEC_DEVICE(i)						\
1035 									\
1036 	PINCTRL_DT_INST_DEFINE(i);					\
1037 									\
1038 	static void qmspi_xec_irq_config_func_##i(void)			\
1039 	{								\
1040 		IRQ_CONNECT(DT_INST_IRQN(i),				\
1041 			    DT_INST_IRQ(i, priority),			\
1042 			    qmspi_xec_isr,				\
1043 			    DEVICE_DT_INST_GET(i), 0);			\
1044 		irq_enable(DT_INST_IRQN(i));				\
1045 	}								\
1046 									\
1047 	static struct spi_qmspi_data qmspi_xec_data_##i = {		\
1048 		SPI_CONTEXT_INIT_LOCK(qmspi_xec_data_##i, ctx),		\
1049 		SPI_CONTEXT_INIT_SYNC(qmspi_xec_data_##i, ctx),		\
1050 	};								\
1051 	static const struct spi_qmspi_config qmspi_xec_config_##i = {	\
1052 		.regs = (struct qmspi_regs *) DT_INST_REG_ADDR(i),	\
1053 		.clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(i)),	\
1054 		.clksrc = { .pcr_info = XEC_QMSPI_PCR_INFO(i), },	\
1055 		.clock_freq = DT_INST_PROP_OR(i, clock_frequency, MHZ(12)), \
1056 		.cs1_freq = DT_INST_PROP_OR(i, cs1_freq, 0),		\
1057 		.cs_timing = XEC_QMSPI_CS_TIMING(i),			\
1058 		.taps_adj = XEC_QMSPI_TAPS_ADJ(i),			\
1059 		.girq = XEC_QMSPI_GIRQ(i),				\
1060 		.girq_pos = XEC_QMSPI_GIRQ_POS(i),			\
1061 		.girq_nvic_aggr = XEC_QMSPI_NVIC_AGGR(i),		\
1062 		.girq_nvic_direct = XEC_QMSPI_NVIC_DIRECT(i),		\
1063 		.irq_pri = DT_INST_IRQ(i, priority),			\
1064 		.chip_sel = DT_INST_PROP_OR(i, chip_select, 0),		\
1065 		.width = DT_INST_PROP_OR(0, lines, 1),			\
1066 		.irq_config_func = qmspi_xec_irq_config_func_##i,	\
1067 		.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(i),		\
1068 	};								\
1069 	PM_DEVICE_DT_INST_DEFINE(i, qmspi_xec_pm_action);		\
1070 	DEVICE_DT_INST_DEFINE(i, &qmspi_xec_init,			\
1071 		PM_DEVICE_DT_INST_GET(i),				\
1072 		&qmspi_xec_data_##i, &qmspi_xec_config_##i,		\
1073 		POST_KERNEL, CONFIG_SPI_INIT_PRIORITY,			\
1074 		&spi_qmspi_xec_driver_api);
1075 
1076 DT_INST_FOREACH_STATUS_OKAY(QMSPI_XEC_DEVICE)
1077