1 /*
2  * Copyright 2018, 2024 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT nxp_lpspi
8 
9 #include <zephyr/drivers/spi.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include <zephyr/drivers/clock_control.h>
12 #include <zephyr/irq.h>
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(spi_mcux_lpspi, CONFIG_SPI_LOG_LEVEL);
16 
17 #ifdef CONFIG_SPI_RTIO
18 #include <zephyr/drivers/spi/rtio.h>
19 #endif
20 
21 #include "spi_context.h"
22 
23 #if CONFIG_NXP_LP_FLEXCOMM
24 #include <zephyr/drivers/mfd/nxp_lp_flexcomm.h>
25 #endif
26 
27 #include <fsl_lpspi.h>
28 
29 /* If any hardware revisions change this, make it into a DT property.
30  * DONT'T make #ifdefs here by platform.
31  */
32 #define LPSPI_CHIP_SELECT_COUNT   4
33 #define LPSPI_MIN_FRAME_SIZE_BITS 8
34 
35 /* Required by DEVICE_MMIO_NAMED_* macros */
36 #define DEV_CFG(_dev)  ((const struct spi_mcux_config *)(_dev)->config)
37 #define DEV_DATA(_dev) ((struct spi_mcux_data *)(_dev)->data)
38 
39 /* Argument to MCUX SDK IRQ handler */
40 #define LPSPI_IRQ_HANDLE_ARG COND_CODE_1(CONFIG_NXP_LP_FLEXCOMM, (LPSPI_GetInstance(base)), (base))
41 
42 /* flag for SDK API for master transfers */
43 #define LPSPI_MASTER_XFER_CFG_FLAGS(slave)                                                         \
44 	kLPSPI_MasterPcsContinuous | (slave << LPSPI_MASTER_PCS_SHIFT)
45 
46 #ifdef CONFIG_SPI_MCUX_LPSPI_DMA
47 #include <zephyr/drivers/dma.h>
48 
49 /* These flags are arbitrary */
50 #define LPSPI_DMA_ERROR_FLAG   BIT(0)
51 #define LPSPI_DMA_RX_DONE_FLAG BIT(1)
52 #define LPSPI_DMA_TX_DONE_FLAG BIT(2)
53 #define LPSPI_DMA_DONE_FLAG    (LPSPI_DMA_RX_DONE_FLAG | LPSPI_DMA_TX_DONE_FLAG)
54 
55 struct spi_dma_stream {
56 	const struct device *dma_dev;
57 	uint32_t channel; /* stores the channel for dma */
58 	struct dma_config dma_cfg;
59 	struct dma_block_config dma_blk_cfg;
60 };
61 #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
62 
63 struct spi_mcux_config {
64 	DEVICE_MMIO_NAMED_ROM(reg_base);
65 	const struct device *clock_dev;
66 	clock_control_subsys_t clock_subsys;
67 	void (*irq_config_func)(const struct device *dev);
68 	uint32_t pcs_sck_delay;
69 	uint32_t sck_pcs_delay;
70 	uint32_t transfer_delay;
71 	const struct pinctrl_dev_config *pincfg;
72 	lpspi_pin_config_t data_pin_config;
73 };
74 
75 struct spi_mcux_data {
76 	DEVICE_MMIO_NAMED_RAM(reg_base);
77 	const struct device *dev;
78 	lpspi_master_handle_t handle;
79 	struct spi_context ctx;
80 	size_t transfer_len;
81 #ifdef CONFIG_SPI_RTIO
82 	struct spi_rtio *rtio_ctx;
83 #endif
84 #ifdef CONFIG_SPI_MCUX_LPSPI_DMA
85 	volatile uint32_t status_flags;
86 	struct spi_dma_stream dma_rx;
87 	struct spi_dma_stream dma_tx;
88 	/* dummy value used for transferring NOP when tx buf is null */
89 	uint32_t dummy_buffer;
90 #endif
91 };
92 
93 static int spi_mcux_transfer_next_packet(const struct device *dev);
94 
spi_mcux_isr(const struct device * dev)95 static void spi_mcux_isr(const struct device *dev)
96 {
97 	struct spi_mcux_data *data = dev->data;
98 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
99 
100 	LPSPI_MasterTransferHandleIRQ(LPSPI_IRQ_HANDLE_ARG, &data->handle);
101 }
102 
spi_mcux_master_callback(LPSPI_Type * base,lpspi_master_handle_t * handle,status_t status,void * userData)103 static void spi_mcux_master_callback(LPSPI_Type *base, lpspi_master_handle_t *handle,
104 				     status_t status, void *userData)
105 {
106 	struct spi_mcux_data *data = userData;
107 
108 	spi_context_update_tx(&data->ctx, 1, data->transfer_len);
109 	spi_context_update_rx(&data->ctx, 1, data->transfer_len);
110 
111 	spi_mcux_transfer_next_packet(data->dev);
112 }
113 
spi_mcux_transfer_next_packet(const struct device * dev)114 static int spi_mcux_transfer_next_packet(const struct device *dev)
115 {
116 	struct spi_mcux_data *data = dev->data;
117 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
118 	struct spi_context *ctx = &data->ctx;
119 	size_t max_chunk = spi_context_max_continuous_chunk(ctx);
120 	lpspi_transfer_t transfer;
121 	status_t status;
122 
123 	if (max_chunk == 0) {
124 		spi_context_cs_control(ctx, false);
125 		spi_context_complete(ctx, dev, 0);
126 		return 0;
127 	}
128 
129 	data->transfer_len = max_chunk;
130 
131 	transfer.configFlags = LPSPI_MASTER_XFER_CFG_FLAGS(ctx->config->slave);
132 	transfer.txData = (ctx->tx_len == 0 ? NULL : ctx->tx_buf);
133 	transfer.rxData = (ctx->rx_len == 0 ? NULL : ctx->rx_buf);
134 	transfer.dataSize = max_chunk;
135 
136 	status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
137 	if (status != kStatus_Success) {
138 		LOG_ERR("Transfer could not start on %s: %d", dev->name, status);
139 		return status == kStatus_LPSPI_Busy ? -EBUSY : -EINVAL;
140 	}
141 
142 	return 0;
143 }
144 
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)145 static int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg)
146 {
147 	const struct spi_mcux_config *config = dev->config;
148 	struct spi_mcux_data *data = dev->data;
149 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
150 	uint32_t word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
151 	lpspi_master_config_t master_config;
152 	uint32_t clock_freq;
153 	int ret;
154 
155 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
156 		/* the IP DOES support half duplex, need to implement driver support */
157 		LOG_ERR("Half-duplex not supported");
158 		return -ENOTSUP;
159 	}
160 
161 	if (word_size < 8 || (word_size % 32 == 1)) {
162 		/* Zephyr word size == hardware FRAME size (not word size)
163 		 * Max frame size: 4096 bits
164 		 *   (zephyr field is 6 bit wide for max 64 bit size, no need to check)
165 		 * Min frame size: 8 bits.
166 		 * Minimum hardware word size is 2. Since this driver is intended to work
167 		 * for 32 bit platforms, and 64 bits is max size, then only 33 and 1 are invalid.
168 		 */
169 		LOG_ERR("Word size %d not allowed", word_size);
170 		return -EINVAL;
171 	}
172 
173 	if (spi_cfg->slave > LPSPI_CHIP_SELECT_COUNT) {
174 		LOG_ERR("Peripheral %d select exceeds max %d", spi_cfg->slave,
175 			LPSPI_CHIP_SELECT_COUNT - 1);
176 		return -EINVAL;
177 	}
178 
179 	ret = clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq);
180 	if (ret) {
181 		return ret;
182 	}
183 
184 	if (data->ctx.config != NULL) {
185 		/* Setting the baud rate in LPSPI_MasterInit requires module to be disabled. Only
186 		 * disable if already configured, otherwise the clock is not enabled and the
187 		 * CR register cannot be written.
188 		 */
189 		LPSPI_Enable(base, false);
190 		while ((base->CR & LPSPI_CR_MEN_MASK) != 0U) {
191 			/* Wait until LPSPI is disabled. Datasheet:
192 			 * After writing 0, MEN (Module Enable) remains set until the LPSPI has
193 			 * completed the current transfer and is idle.
194 			 */
195 		}
196 	}
197 
198 	data->ctx.config = spi_cfg;
199 
200 	LPSPI_MasterGetDefaultConfig(&master_config);
201 
202 	master_config.bitsPerFrame = word_size;
203 	master_config.cpol = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
204 				     ? kLPSPI_ClockPolarityActiveLow
205 				     : kLPSPI_ClockPolarityActiveHigh;
206 	master_config.cpha = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
207 				     ? kLPSPI_ClockPhaseSecondEdge
208 				     : kLPSPI_ClockPhaseFirstEdge;
209 	master_config.direction =
210 		(spi_cfg->operation & SPI_TRANSFER_LSB) ? kLPSPI_LsbFirst : kLPSPI_MsbFirst;
211 	master_config.baudRate = spi_cfg->frequency;
212 	master_config.pcsToSckDelayInNanoSec = config->pcs_sck_delay;
213 	master_config.lastSckToPcsDelayInNanoSec = config->sck_pcs_delay;
214 	master_config.betweenTransferDelayInNanoSec = config->transfer_delay;
215 	master_config.pinCfg = config->data_pin_config;
216 
217 	LPSPI_MasterInit(base, &master_config, clock_freq);
218 	LPSPI_SetDummyData(base, 0);
219 
220 	if (IS_ENABLED(CONFIG_DEBUG)) {
221 		base->CR |= LPSPI_CR_DBGEN_MASK;
222 	}
223 
224 	return 0;
225 }
226 
227 #ifdef CONFIG_SPI_MCUX_LPSPI_DMA
lpspi_inst_has_dma(const struct spi_mcux_data * data)228 static bool lpspi_inst_has_dma(const struct spi_mcux_data *data)
229 {
230 	return (data->dma_tx.dma_dev && data->dma_rx.dma_dev);
231 }
232 
233 /* This function is executed in the interrupt context */
spi_mcux_dma_callback(const struct device * dev,void * arg,uint32_t channel,int status)234 static void spi_mcux_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status)
235 {
236 	/* arg directly holds the spi device */
237 	const struct device *spi_dev = arg;
238 	struct spi_mcux_data *data = (struct spi_mcux_data *)spi_dev->data;
239 	char debug_char;
240 
241 	if (status < 0) {
242 		goto error;
243 	}
244 
245 	/* identify the origin of this callback */
246 	if (channel == data->dma_tx.channel) {
247 		/* this part of the transfer ends */
248 		data->status_flags |= LPSPI_DMA_TX_DONE_FLAG;
249 		debug_char = 'T';
250 	} else if (channel == data->dma_rx.channel) {
251 		/* this part of the transfer ends */
252 		data->status_flags |= LPSPI_DMA_RX_DONE_FLAG;
253 		debug_char = 'R';
254 	} else {
255 		goto error;
256 	}
257 
258 	LOG_DBG("DMA %cX Block Complete", debug_char);
259 
260 #if CONFIG_SPI_ASYNC
261 	if (data->ctx.asynchronous && (data->status_flags & LPSPI_DMA_DONE_FLAG)) {
262 		/* Load dma blocks of equal length */
263 		size_t dma_size = spi_context_max_continuous_chunk(data->ctx);
264 
265 		if (dma_size != 0) {
266 			return;
267 		}
268 
269 		spi_context_update_tx(&data->ctx, 1, dma_size);
270 		spi_context_update_rx(&data->ctx, 1, dma_size);
271 	}
272 #endif
273 
274 	goto done;
275 error:
276 	LOG_ERR("DMA callback error with channel %d.", channel);
277 	data->status_flags |= LPSPI_DMA_ERROR_FLAG;
278 done:
279 	spi_context_complete(&data->ctx, spi_dev, 0);
280 }
281 
spi_mcux_dma_common_load(struct spi_dma_stream * stream,const struct device * dev,const uint8_t * buf,size_t len)282 static struct dma_block_config *spi_mcux_dma_common_load(struct spi_dma_stream *stream,
283 							 const struct device *dev,
284 							 const uint8_t *buf, size_t len)
285 {
286 	struct spi_mcux_data *data = dev->data;
287 	struct dma_block_config *blk_cfg = &stream->dma_blk_cfg;
288 
289 	/* prepare the block for this TX DMA channel */
290 	memset(blk_cfg, 0, sizeof(struct dma_block_config));
291 
292 	blk_cfg->block_size = len;
293 
294 	if (buf == NULL) {
295 		blk_cfg->source_address = (uint32_t)&data->dummy_buffer;
296 		blk_cfg->dest_address = (uint32_t)&data->dummy_buffer;
297 		/* pretend it is peripheral xfer so DMA just xfer to dummy buf */
298 		stream->dma_cfg.channel_direction = PERIPHERAL_TO_PERIPHERAL;
299 	} else {
300 		blk_cfg->source_address = (uint32_t)buf;
301 		blk_cfg->dest_address = (uint32_t)buf;
302 	}
303 
304 	/* Transfer 1 byte each DMA loop */
305 	stream->dma_cfg.source_burst_length = 1;
306 	stream->dma_cfg.user_data = (void *)dev;
307 	stream->dma_cfg.head_block = blk_cfg;
308 
309 	return blk_cfg;
310 }
311 
spi_mcux_dma_tx_load(const struct device * dev,const uint8_t * buf,size_t len)312 static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf, size_t len)
313 {
314 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
315 	struct spi_mcux_data *data = dev->data;
316 	/* remember active TX DMA channel (used in callback) */
317 	struct spi_dma_stream *stream = &data->dma_tx;
318 	struct dma_block_config *blk_cfg = spi_mcux_dma_common_load(stream, dev, buf, len);
319 
320 	if (buf != NULL) {
321 		/* tx direction has memory as source and periph as dest. */
322 		stream->dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
323 	}
324 
325 	/* Dest is LPSPI tx fifo */
326 	blk_cfg->dest_address = LPSPI_GetTxRegisterAddress(base);
327 
328 	/* give the client dev as arg, as the callback comes from the dma */
329 	/* pass our client origin to the dma: data->dma_tx.dma_channel */
330 	return dma_config(stream->dma_dev, stream->channel, &stream->dma_cfg);
331 }
332 
spi_mcux_dma_rx_load(const struct device * dev,uint8_t * buf,size_t len)333 static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len)
334 {
335 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
336 	struct spi_mcux_data *data = dev->data;
337 	/* retrieve active RX DMA channel (used in callback) */
338 	struct spi_dma_stream *stream = &data->dma_rx;
339 	struct dma_block_config *blk_cfg = spi_mcux_dma_common_load(stream, dev, buf, len);
340 
341 	if (buf != NULL) {
342 		/* rx direction has periph as source and mem as dest. */
343 		stream->dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
344 	}
345 
346 	/* Source is LPSPI rx fifo */
347 	blk_cfg->source_address = LPSPI_GetRxRegisterAddress(base);
348 
349 	/* pass our client origin to the dma: data->dma_rx.channel */
350 	return dma_config(stream->dma_dev, stream->channel, &stream->dma_cfg);
351 }
352 
wait_dma_rx_tx_done(const struct device * dev)353 static int wait_dma_rx_tx_done(const struct device *dev)
354 {
355 	struct spi_mcux_data *data = dev->data;
356 	int ret;
357 
358 	do {
359 		ret = spi_context_wait_for_completion(&data->ctx);
360 		if (ret) {
361 			LOG_DBG("Timed out waiting for SPI context to complete");
362 			return ret;
363 		} else if (data->status_flags & LPSPI_DMA_ERROR_FLAG) {
364 			return -EIO;
365 		}
366 	} while (!((data->status_flags & LPSPI_DMA_DONE_FLAG) == LPSPI_DMA_DONE_FLAG));
367 
368 	LOG_DBG("DMA block completed");
369 	return 0;
370 }
371 
spi_mcux_dma_rxtx_load(const struct device * dev,size_t * dma_size)372 static inline int spi_mcux_dma_rxtx_load(const struct device *dev, size_t *dma_size)
373 {
374 	struct spi_mcux_data *data = dev->data;
375 	struct spi_context *ctx = &data->ctx;
376 	int ret = 0;
377 
378 	/* Clear status flags */
379 	data->status_flags = 0U;
380 
381 	/* Load dma blocks of equal length */
382 	*dma_size = spi_context_max_continuous_chunk(ctx);
383 
384 	ret = spi_mcux_dma_tx_load(dev, ctx->tx_buf, *dma_size);
385 	if (ret != 0) {
386 		return ret;
387 	}
388 
389 	ret = spi_mcux_dma_rx_load(dev, ctx->rx_buf, *dma_size);
390 	if (ret != 0) {
391 		return ret;
392 	}
393 
394 	/* Start DMA */
395 	ret = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
396 	if (ret != 0) {
397 		return ret;
398 	}
399 
400 	ret = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
401 	return ret;
402 }
403 
404 #ifdef CONFIG_SPI_ASYNC
transceive_dma_async(const struct device * dev,spi_callback_t cb,void * userdata)405 static int transceive_dma_async(const struct device *dev, spi_callback_t cb, void *userdata)
406 {
407 	struct spi_mcux_data *data = dev->data;
408 	struct spi_context *ctx = &data->ctx;
409 	size_t dma_size;
410 	int ret;
411 
412 	ctx->asynchronous = true;
413 	ctx->callback = cb;
414 	ctx->callback_data = userdata;
415 
416 	ret = spi_mcux_dma_rxtx_load(dev, &dma_size);
417 	if (ret) {
418 		return ret;
419 	}
420 
421 	/* Enable DMA Requests */
422 	LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable);
423 
424 	return 0;
425 }
426 #else
427 #define transceive_dma_async(...) 0
428 #endif /* CONFIG_SPI_ASYNC */
429 
transceive_dma_sync(const struct device * dev)430 static int transceive_dma_sync(const struct device *dev)
431 {
432 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
433 	struct spi_mcux_data *data = dev->data;
434 	struct spi_context *ctx = &data->ctx;
435 	size_t dma_size;
436 	int ret;
437 
438 	spi_context_cs_control(ctx, true);
439 
440 	/* Send each spi buf via DMA, updating context as DMA completes */
441 	while (ctx->rx_len > 0 || ctx->tx_len > 0) {
442 		/* Load dma block */
443 		ret = spi_mcux_dma_rxtx_load(dev, &dma_size);
444 		if (ret) {
445 			return ret;
446 		}
447 
448 #ifdef CONFIG_SOC_SERIES_MCXN
449 		while (!(LPSPI_GetStatusFlags(base) & kLPSPI_TxDataRequestFlag)) {
450 			/* wait until previous tx finished */
451 		}
452 #endif
453 
454 		/* Enable DMA Requests */
455 		LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable);
456 
457 		/* Wait for DMA to finish */
458 		ret = wait_dma_rx_tx_done(dev);
459 		if (ret) {
460 			return ret;
461 		}
462 
463 #ifndef CONFIG_SOC_SERIES_MCXN
464 		while ((LPSPI_GetStatusFlags(base) & kLPSPI_ModuleBusyFlag)) {
465 			/* wait until module is idle */
466 		}
467 #endif
468 
469 		/* Disable DMA */
470 		LPSPI_DisableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable);
471 
472 		/* Update SPI contexts with amount of data we just sent */
473 		spi_context_update_tx(ctx, 1, dma_size);
474 		spi_context_update_rx(ctx, 1, dma_size);
475 	}
476 
477 	spi_context_cs_control(ctx, false);
478 
479 	base->TCR = 0;
480 
481 	return 0;
482 }
483 
transceive_dma(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)484 static int transceive_dma(const struct device *dev, const struct spi_config *spi_cfg,
485 			  const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
486 			  bool asynchronous, spi_callback_t cb, void *userdata)
487 {
488 	struct spi_mcux_data *data = dev->data;
489 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
490 	int ret;
491 
492 	if (!asynchronous) {
493 		spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
494 	}
495 
496 	ret = spi_mcux_configure(dev, spi_cfg);
497 	if (ret && !asynchronous) {
498 		goto out;
499 	} else if (ret) {
500 		return ret;
501 	}
502 
503 #ifdef CONFIG_SOC_SERIES_MCXN
504 	base->TCR |= LPSPI_TCR_CONT_MASK;
505 #endif
506 
507 	/* DMA is fast enough watermarks are not required */
508 	LPSPI_SetFifoWatermarks(base, 0U, 0U);
509 
510 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
511 
512 	if (asynchronous) {
513 		ret = transceive_dma_async(dev, cb, userdata);
514 	} else {
515 		ret = transceive_dma_sync(dev);
516 	}
517 
518 out:
519 	spi_context_release(&data->ctx, ret);
520 	return ret;
521 }
522 #else
523 #define lpspi_inst_has_dma(arg) arg != arg
524 #define transceive_dma(...)     0
525 #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
526 
527 #ifdef CONFIG_SPI_RTIO
528 static void spi_mcux_iodev_complete(const struct device *dev, int status);
529 
spi_mcux_master_rtio_callback(LPSPI_Type * base,lpspi_master_handle_t * handle,status_t status,void * userData)530 static void spi_mcux_master_rtio_callback(LPSPI_Type *base, lpspi_master_handle_t *handle,
531 					  status_t status, void *userData)
532 {
533 	struct spi_mcux_data *data = userData;
534 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
535 
536 	if (rtio_ctx->txn_head != NULL) {
537 		spi_mcux_iodev_complete(data->dev, status);
538 		return;
539 	}
540 
541 	spi_mcux_master_callback(base, handle, status, userData);
542 }
543 
spi_mcux_iodev_start(const struct device * dev)544 static void spi_mcux_iodev_start(const struct device *dev)
545 {
546 	struct spi_mcux_data *data = dev->data;
547 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
548 	struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
549 	struct spi_dt_spec *spi_dt_spec = sqe->iodev->data;
550 	struct spi_config *spi_cfg = &spi_dt_spec->config;
551 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
552 	lpspi_transfer_t transfer;
553 	status_t status;
554 
555 	status = spi_mcux_configure(dev, spi_cfg);
556 	if (status) {
557 		LOG_ERR("Error configuring lpspi");
558 		return;
559 	}
560 
561 	LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_rtio_callback, data);
562 
563 	transfer.configFlags = LPSPI_MASTER_XFER_CFG_FLAGS(spi_cfg->slave);
564 
565 	switch (sqe->op) {
566 	case RTIO_OP_RX:
567 		transfer.txData = NULL;
568 		transfer.rxData = sqe->rx.buf;
569 		transfer.dataSize = sqe->rx.buf_len;
570 		break;
571 	case RTIO_OP_TX:
572 		transfer.rxData = NULL;
573 		transfer.txData = sqe->tx.buf;
574 		transfer.dataSize = sqe->tx.buf_len;
575 		break;
576 	case RTIO_OP_TINY_TX:
577 		transfer.rxData = NULL;
578 		transfer.txData = sqe->tiny_tx.buf;
579 		transfer.dataSize = sqe->tiny_tx.buf_len;
580 		break;
581 	case RTIO_OP_TXRX:
582 		transfer.txData = sqe->txrx.tx_buf;
583 		transfer.rxData = sqe->txrx.rx_buf;
584 		transfer.dataSize = sqe->txrx.buf_len;
585 		break;
586 	default:
587 		LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
588 		spi_mcux_iodev_complete(dev, -EINVAL);
589 		return;
590 	}
591 
592 	data->transfer_len = transfer.dataSize;
593 
594 	spi_context_cs_control(&data->ctx, true);
595 
596 	status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
597 	if (status != kStatus_Success) {
598 		LOG_ERR("Transfer could not start on %s: %d", dev->name, status);
599 		spi_mcux_iodev_complete(dev, -EIO);
600 	}
601 }
602 
spi_mcux_iodev_complete(const struct device * dev,int status)603 static void spi_mcux_iodev_complete(const struct device *dev, int status)
604 {
605 	struct spi_mcux_data *data = dev->data;
606 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
607 
608 	if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
609 		rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr);
610 		spi_mcux_iodev_start(dev);
611 		return;
612 	}
613 
614 	/** De-assert CS-line to space from next transaction */
615 	spi_context_cs_control(&data->ctx, false);
616 
617 	if (spi_rtio_complete(rtio_ctx, status)) {
618 		spi_mcux_iodev_start(dev);
619 	}
620 }
621 
spi_mcux_iodev_submit(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)622 static void spi_mcux_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
623 {
624 	struct spi_mcux_data *data = dev->data;
625 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
626 
627 	if (spi_rtio_submit(rtio_ctx, iodev_sqe)) {
628 		spi_mcux_iodev_start(dev);
629 	}
630 }
631 
transceive_rtio(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)632 static inline int transceive_rtio(const struct device *dev, const struct spi_config *spi_cfg,
633 				  const struct spi_buf_set *tx_bufs,
634 				  const struct spi_buf_set *rx_bufs)
635 {
636 	struct spi_mcux_data *data = dev->data;
637 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
638 	int ret;
639 
640 	spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg);
641 
642 	ret = spi_rtio_transceive(rtio_ctx, spi_cfg, tx_bufs, rx_bufs);
643 
644 	spi_context_release(&data->ctx, ret);
645 
646 	return ret;
647 }
648 #else
649 #define transceive_rtio(...) 0
650 #endif /* CONFIG_SPI_RTIO */
651 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)652 static int transceive(const struct device *dev, const struct spi_config *spi_cfg,
653 		      const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
654 		      bool asynchronous, spi_callback_t cb, void *userdata)
655 {
656 	LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
657 	struct spi_mcux_data *data = dev->data;
658 	int ret;
659 
660 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
661 
662 	ret = spi_mcux_configure(dev, spi_cfg);
663 	if (ret) {
664 		goto out;
665 	}
666 
667 	LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_callback, data);
668 
669 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
670 
671 	spi_context_cs_control(&data->ctx, true);
672 
673 	ret = spi_mcux_transfer_next_packet(dev);
674 	if (ret) {
675 		goto out;
676 	}
677 
678 	ret = spi_context_wait_for_completion(&data->ctx);
679 out:
680 	spi_context_release(&data->ctx, ret);
681 
682 	return ret;
683 }
684 
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata,bool async)685 static int spi_mcux_transceive(const struct device *dev, const struct spi_config *spi_cfg,
686 			       const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
687 			       spi_callback_t cb, void *userdata, bool async)
688 {
689 	struct spi_mcux_data *data = dev->data;
690 
691 	if (lpspi_inst_has_dma(data)) {
692 		return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, async, cb, userdata);
693 	}
694 
695 	if (IS_ENABLED(CONFIG_SPI_RTIO)) {
696 		return transceive_rtio(dev, spi_cfg, tx_bufs, rx_bufs);
697 	}
698 
699 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, async, cb, userdata);
700 }
701 
spi_mcux_transceive_sync(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)702 static int spi_mcux_transceive_sync(const struct device *dev, const struct spi_config *spi_cfg,
703 				    const struct spi_buf_set *tx_bufs,
704 				    const struct spi_buf_set *rx_bufs)
705 {
706 	return spi_mcux_transceive(dev, spi_cfg, tx_bufs, rx_bufs, NULL, NULL, false);
707 }
708 
709 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)710 static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg,
711 				     const struct spi_buf_set *tx_bufs,
712 				     const struct spi_buf_set *rx_bufs, spi_callback_t cb,
713 				     void *userdata)
714 {
715 	return spi_mcux_transceive(dev, spi_cfg, tx_bufs, rx_bufs, cb, userdata, true);
716 }
717 #endif /* CONFIG_SPI_ASYNC */
718 
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)719 static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg)
720 {
721 	struct spi_mcux_data *data = dev->data;
722 
723 	spi_context_unlock_unconditionally(&data->ctx);
724 
725 	return 0;
726 }
727 
728 static DEVICE_API(spi, spi_mcux_driver_api) = {
729 	.transceive = spi_mcux_transceive_sync,
730 #ifdef CONFIG_SPI_ASYNC
731 	.transceive_async = spi_mcux_transceive_async,
732 #endif
733 #ifdef CONFIG_SPI_RTIO
734 	.iodev_submit = spi_mcux_iodev_submit,
735 #endif
736 	.release = spi_mcux_release,
737 };
738 
739 #if defined(CONFIG_SPI_MCUX_LPSPI_DMA)
lpspi_dma_dev_ready(const struct device * dma_dev)740 static int lpspi_dma_dev_ready(const struct device *dma_dev)
741 {
742 	if (!device_is_ready(dma_dev)) {
743 		LOG_ERR("%s device is not ready", dma_dev->name);
744 		return -ENODEV;
745 	}
746 
747 	return 0;
748 }
749 
lpspi_dma_devs_ready(struct spi_mcux_data * data)750 static int lpspi_dma_devs_ready(struct spi_mcux_data *data)
751 {
752 	return lpspi_dma_dev_ready(data->dma_tx.dma_dev) |
753 	       lpspi_dma_dev_ready(data->dma_rx.dma_dev);
754 }
755 #else
756 #define lpspi_dma_devs_ready(...) 0
757 #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
758 
spi_mcux_init(const struct device * dev)759 static int spi_mcux_init(const struct device *dev)
760 {
761 	const struct spi_mcux_config *config = dev->config;
762 	struct spi_mcux_data *data = dev->data;
763 	int err = 0;
764 
765 	DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP);
766 
767 	data->dev = dev;
768 
769 	if (!device_is_ready(config->clock_dev)) {
770 		LOG_ERR("clock control device not ready");
771 		return -ENODEV;
772 	}
773 
774 	if (IS_ENABLED(CONFIG_SPI_MCUX_LPSPI_DMA) && lpspi_inst_has_dma(data)) {
775 		err = lpspi_dma_devs_ready(data);
776 	}
777 	if (err < 0) {
778 		return err;
779 	}
780 
781 	err = spi_context_cs_configure_all(&data->ctx);
782 	if (err < 0) {
783 		return err;
784 	}
785 
786 	err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
787 	if (err) {
788 		return err;
789 	}
790 
791 	config->irq_config_func(dev);
792 
793 #ifdef CONFIG_SPI_RTIO
794 	spi_rtio_init(data->rtio_ctx, dev);
795 #endif
796 	spi_context_unlock_unconditionally(&data->ctx);
797 
798 	return 0;
799 }
800 
801 #define SPI_MCUX_RTIO_DEFINE(n)                                                                    \
802 	SPI_RTIO_DEFINE(spi_mcux_rtio_##n, CONFIG_SPI_MCUX_RTIO_SQ_SIZE,                           \
803 			CONFIG_SPI_MCUX_RTIO_SQ_SIZE)
804 
805 #ifdef CONFIG_SPI_MCUX_LPSPI_DMA
806 #define SPI_DMA_CHANNELS(n)                                                                        \
807 	IF_ENABLED(                                                                                \
808 		DT_INST_DMAS_HAS_NAME(n, tx),                                                      \
809 		(.dma_tx = {.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)),            \
810 			    .channel = DT_INST_DMAS_CELL_BY_NAME(n, tx, mux),                      \
811 			    .dma_cfg = {.channel_direction = MEMORY_TO_PERIPHERAL,                 \
812 					.dma_callback = spi_mcux_dma_callback,                     \
813 					.source_data_size = 1,                                     \
814 					.dest_data_size = 1,                                       \
815 					.block_count = 1,                                          \
816 					.dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, source)}},))  \
817 	IF_ENABLED(                                                                                \
818 		DT_INST_DMAS_HAS_NAME(n, rx),                                                      \
819 		(.dma_rx = {.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)),            \
820 			    .channel = DT_INST_DMAS_CELL_BY_NAME(n, rx, mux),                      \
821 			    .dma_cfg = {.channel_direction = PERIPHERAL_TO_MEMORY,                 \
822 					.dma_callback = spi_mcux_dma_callback,                     \
823 					.source_data_size = 1,                                     \
824 					.dest_data_size = 1,                                       \
825 					.block_count = 1,                                          \
826 					.dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, rx, source)}},))
827 #else
828 #define SPI_DMA_CHANNELS(n)
829 #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
830 
831 #if defined(CONFIG_NXP_LP_FLEXCOMM)
832 #define SPI_MCUX_LPSPI_IRQ_FUNC(n)                                                                 \
833 	nxp_lp_flexcomm_setirqhandler(DEVICE_DT_GET(DT_INST_PARENT(n)), DEVICE_DT_INST_GET(n),     \
834 				      LP_FLEXCOMM_PERIPH_LPSPI, spi_mcux_isr);
835 #else
836 #define SPI_MCUX_LPSPI_IRQ_FUNC(n)                                                                 \
837 	IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_mcux_isr,                       \
838 		    DEVICE_DT_INST_GET(n), 0);                                                     \
839 	irq_enable(DT_INST_IRQN(n));
840 #endif
841 
842 #define SPI_MCUX_LPSPI_INIT(n)                                                                     \
843 	PINCTRL_DT_INST_DEFINE(n);                                                                 \
844 	COND_CODE_1(CONFIG_SPI_RTIO, (SPI_MCUX_RTIO_DEFINE(n)), ());                               \
845                                                                                                    \
846 	static void spi_mcux_config_func_##n(const struct device *dev)                             \
847 	{                                                                                          \
848 		SPI_MCUX_LPSPI_IRQ_FUNC(n)                                                         \
849 	}                                                                                          \
850                                                                                                    \
851 	static const struct spi_mcux_config spi_mcux_config_##n = {                                \
852 		DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)),                              \
853 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)),                                \
854 		.clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name),              \
855 		.irq_config_func = spi_mcux_config_func_##n,                                       \
856 		.pcs_sck_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, pcs_sck_delay),                 \
857 					  DT_INST_PROP(n, pcs_sck_delay)),                         \
858 		.sck_pcs_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, sck_pcs_delay),                 \
859 					  DT_INST_PROP(n, sck_pcs_delay)),                         \
860 		.transfer_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, transfer_delay),               \
861 					   DT_INST_PROP(n, transfer_delay)),                       \
862 		.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),                                       \
863 		.data_pin_config = DT_INST_ENUM_IDX(n, data_pin_config),                           \
864 	};                                                                                         \
865                                                                                                    \
866 	static struct spi_mcux_data spi_mcux_data_##n = {                                          \
867 		SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx),                                     \
868 		SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx),                                     \
869 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) SPI_DMA_CHANNELS(n)           \
870 			IF_ENABLED(CONFIG_SPI_RTIO, (.rtio_ctx = &spi_mcux_rtio_##n,))             \
871 	};                                                                                         \
872                                                                                                    \
873 	SPI_DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, &spi_mcux_data_##n, &spi_mcux_config_##n,\
874 			      POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, &spi_mcux_driver_api);
875 
876 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_LPSPI_INIT)
877