1 /*
2  * Copyright (c) 2016, Freescale Semiconductor, Inc.
3  * Copyright (c) 2017,2019, NXP
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT nxp_lpc_spi
9 
10 #include <errno.h>
11 #include <zephyr/drivers/spi.h>
12 #include <zephyr/drivers/spi/rtio.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <fsl_spi.h>
15 #include <zephyr/logging/log.h>
16 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
17 #include <zephyr/drivers/dma.h>
18 #endif
19 #include <zephyr/drivers/pinctrl.h>
20 #include <zephyr/sys_clock.h>
21 #include <zephyr/irq.h>
22 #include <zephyr/drivers/reset.h>
23 
24 LOG_MODULE_REGISTER(spi_mcux_flexcomm, CONFIG_SPI_LOG_LEVEL);
25 
26 #include "spi_context.h"
27 
28 #define SPI_CHIP_SELECT_COUNT	4
29 #define SPI_MAX_DATA_WIDTH	16
30 
31 struct spi_mcux_config {
32 	SPI_Type *base;
33 	const struct device *clock_dev;
34 	clock_control_subsys_t clock_subsys;
35 	void (*irq_config_func)(const struct device *dev);
36 	uint32_t pre_delay;
37 	uint32_t post_delay;
38 	uint32_t frame_delay;
39 	uint32_t transfer_delay;
40 	uint32_t def_char;
41 	const struct pinctrl_dev_config *pincfg;
42 	const struct reset_dt_spec reset;
43 };
44 
45 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
46 #define SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG	0x01
47 #define SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG	0x02
48 #define SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG	0x04
49 #define SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG		\
50 	(SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG | SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG)
51 
52 struct stream {
53 	const struct device *dma_dev;
54 	uint32_t channel; /* stores the channel for dma */
55 	struct dma_config dma_cfg;
56 	struct dma_block_config dma_blk_cfg[2];
57 	int wait_for_dma_status;
58 };
59 #endif
60 
61 struct spi_mcux_data {
62 	const struct device *dev;
63 	spi_master_handle_t handle;
64 	struct spi_context ctx;
65 	size_t transfer_len;
66 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
67 	volatile uint32_t status_flags;
68 	struct stream dma_rx;
69 	struct stream dma_tx;
70 	/* dummy value used for transferring NOP when tx buf is null */
71 	uint32_t dummy_tx_buffer;
72 	/* Used to send the last word */
73 	uint32_t last_word;
74 #endif
75 };
76 
spi_mcux_transfer_next_packet(const struct device * dev)77 static void spi_mcux_transfer_next_packet(const struct device *dev)
78 {
79 	const struct spi_mcux_config *config = dev->config;
80 	struct spi_mcux_data *data = dev->data;
81 	SPI_Type *base = config->base;
82 	struct spi_context *ctx = &data->ctx;
83 	spi_transfer_t transfer;
84 	status_t status;
85 
86 	if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) {
87 		/* nothing left to rx or tx, we're done! */
88 		spi_context_cs_control(&data->ctx, false);
89 		spi_context_complete(&data->ctx, dev, 0);
90 		return;
91 	}
92 
93 	transfer.configFlags = 0;
94 	if (ctx->tx_len == 0) {
95 		/* rx only, nothing to tx */
96 		transfer.txData = NULL;
97 		transfer.rxData = ctx->rx_buf;
98 		transfer.dataSize = ctx->rx_len;
99 	} else if (ctx->rx_len == 0) {
100 		/* tx only, nothing to rx */
101 		transfer.txData = (uint8_t *) ctx->tx_buf;
102 		transfer.rxData = NULL;
103 		transfer.dataSize = ctx->tx_len;
104 	} else if (ctx->tx_len == ctx->rx_len) {
105 		/* rx and tx are the same length */
106 		transfer.txData = (uint8_t *) ctx->tx_buf;
107 		transfer.rxData = ctx->rx_buf;
108 		transfer.dataSize = ctx->tx_len;
109 	} else if (ctx->tx_len > ctx->rx_len) {
110 		/* Break up the tx into multiple transfers so we don't have to
111 		 * rx into a longer intermediate buffer. Leave chip select
112 		 * active between transfers.
113 		 */
114 		transfer.txData = (uint8_t *) ctx->tx_buf;
115 		transfer.rxData = ctx->rx_buf;
116 		transfer.dataSize = ctx->rx_len;
117 	} else {
118 		/* Break up the rx into multiple transfers so we don't have to
119 		 * tx from a longer intermediate buffer. Leave chip select
120 		 * active between transfers.
121 		 */
122 		transfer.txData = (uint8_t *) ctx->tx_buf;
123 		transfer.rxData = ctx->rx_buf;
124 		transfer.dataSize = ctx->tx_len;
125 	}
126 
127 	if (ctx->tx_count <= 1 && ctx->rx_count <= 1) {
128 		transfer.configFlags = kSPI_FrameAssert;
129 	}
130 
131 	data->transfer_len = transfer.dataSize;
132 
133 	status = SPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
134 	if (status != kStatus_Success) {
135 		LOG_ERR("Transfer could not start");
136 	}
137 }
138 
spi_mcux_isr(const struct device * dev)139 static void spi_mcux_isr(const struct device *dev)
140 {
141 	const struct spi_mcux_config *config = dev->config;
142 	struct spi_mcux_data *data = dev->data;
143 	SPI_Type *base = config->base;
144 
145 	SPI_MasterTransferHandleIRQ(base, &data->handle);
146 }
147 
spi_mcux_transfer_callback(SPI_Type * base,spi_master_handle_t * handle,status_t status,void * userData)148 static void spi_mcux_transfer_callback(SPI_Type *base,
149 		spi_master_handle_t *handle, status_t status, void *userData)
150 {
151 	struct spi_mcux_data *data = userData;
152 
153 	spi_context_update_tx(&data->ctx, 1, data->transfer_len);
154 	spi_context_update_rx(&data->ctx, 1, data->transfer_len);
155 
156 	spi_mcux_transfer_next_packet(data->dev);
157 }
158 
spi_clock_cycles(uint32_t delay_ns,uint32_t sck_frequency_hz)159 static uint8_t spi_clock_cycles(uint32_t delay_ns, uint32_t sck_frequency_hz)
160 {
161 	/* Convert delay_ns to an integer number of clock cycles of frequency
162 	 * sck_frequency_hz. The maximum delay is 15 clock cycles.
163 	 */
164 	uint8_t delay_cycles = (uint64_t)delay_ns * sck_frequency_hz / NSEC_PER_SEC;
165 
166 	delay_cycles = MIN(delay_cycles, 15);
167 
168 	return delay_cycles;
169 }
170 
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)171 static int spi_mcux_configure(const struct device *dev,
172 			      const struct spi_config *spi_cfg)
173 {
174 	const struct spi_mcux_config *config = dev->config;
175 	struct spi_mcux_data *data = dev->data;
176 	SPI_Type *base = config->base;
177 	uint32_t clock_freq;
178 	uint32_t word_size;
179 
180 	if (spi_context_configured(&data->ctx, spi_cfg)) {
181 		/* This configuration is already in use */
182 		return 0;
183 	}
184 
185 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
186 		LOG_ERR("Half-duplex not supported");
187 		return -ENOTSUP;
188 	}
189 
190 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
191 	if (word_size > SPI_MAX_DATA_WIDTH) {
192 		LOG_ERR("Word size %d is greater than %d",
193 			    word_size, SPI_MAX_DATA_WIDTH);
194 		return -EINVAL;
195 	}
196 
197 	/*
198 	 * Do master or slave initialisation, depending on the
199 	 * mode requested.
200 	 */
201 	if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) {
202 		spi_master_config_t master_config;
203 
204 		SPI_MasterGetDefaultConfig(&master_config);
205 
206 		if (!device_is_ready(config->clock_dev)) {
207 			LOG_ERR("clock control device not ready");
208 			return -ENODEV;
209 		}
210 
211 		/* Get the clock frequency */
212 		if (clock_control_get_rate(config->clock_dev,
213 					   config->clock_subsys, &clock_freq)) {
214 			return -EINVAL;
215 		}
216 
217 		if (spi_cfg->slave > SPI_CHIP_SELECT_COUNT) {
218 			LOG_ERR("Slave %d is greater than %d",
219 				    spi_cfg->slave, SPI_CHIP_SELECT_COUNT);
220 			return -EINVAL;
221 		}
222 
223 		master_config.sselNum = spi_cfg->slave;
224 		master_config.sselPol = kSPI_SpolActiveAllLow;
225 		master_config.dataWidth = word_size - 1;
226 
227 		master_config.polarity =
228 			(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
229 			? kSPI_ClockPolarityActiveLow
230 			: kSPI_ClockPolarityActiveHigh;
231 
232 		master_config.phase =
233 			(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
234 			? kSPI_ClockPhaseSecondEdge
235 			: kSPI_ClockPhaseFirstEdge;
236 
237 		master_config.direction =
238 			(spi_cfg->operation & SPI_TRANSFER_LSB)
239 			? kSPI_LsbFirst
240 			: kSPI_MsbFirst;
241 
242 		master_config.baudRate_Bps = spi_cfg->frequency;
243 
244 		spi_delay_config_t *delayConfig = &master_config.delayConfig;
245 
246 		delayConfig->preDelay = spi_clock_cycles(config->pre_delay,
247 							spi_cfg->frequency);
248 		delayConfig->postDelay = spi_clock_cycles(config->post_delay,
249 							spi_cfg->frequency);
250 		delayConfig->frameDelay = spi_clock_cycles(config->frame_delay,
251 							spi_cfg->frequency);
252 		delayConfig->transferDelay = spi_clock_cycles(config->transfer_delay,
253 							spi_cfg->frequency);
254 
255 		SPI_MasterInit(base, &master_config, clock_freq);
256 
257 		SPI_SetDummyData(base, (uint8_t)config->def_char);
258 
259 		SPI_MasterTransferCreateHandle(base, &data->handle,
260 					     spi_mcux_transfer_callback, data);
261 
262 		data->ctx.config = spi_cfg;
263 	} else {
264 		spi_slave_config_t slave_config;
265 
266 		SPI_SlaveGetDefaultConfig(&slave_config);
267 
268 		slave_config.polarity =
269 			(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
270 			? kSPI_ClockPolarityActiveLow
271 			: kSPI_ClockPolarityActiveHigh;
272 
273 		slave_config.phase =
274 			(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
275 			? kSPI_ClockPhaseSecondEdge
276 			: kSPI_ClockPhaseFirstEdge;
277 
278 		slave_config.direction =
279 			(spi_cfg->operation & SPI_TRANSFER_LSB)
280 			? kSPI_LsbFirst
281 			: kSPI_MsbFirst;
282 
283 		/* SS pin active low */
284 		slave_config.sselPol = kSPI_SpolActiveAllLow;
285 		slave_config.dataWidth = word_size - 1;
286 
287 		SPI_SlaveInit(base, &slave_config);
288 
289 		SPI_SetDummyData(base, (uint8_t)config->def_char);
290 
291 		SPI_SlaveTransferCreateHandle(base, &data->handle,
292 					      spi_mcux_transfer_callback, data);
293 
294 		data->ctx.config = spi_cfg;
295 	}
296 
297 	return 0;
298 }
299 
300 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
301 /* This function is executed in the interrupt context */
spi_mcux_dma_callback(const struct device * dev,void * arg,uint32_t channel,int status)302 static void spi_mcux_dma_callback(const struct device *dev, void *arg,
303 			 uint32_t channel, int status)
304 {
305 	/* arg directly holds the spi device */
306 	const struct device *spi_dev = arg;
307 	struct spi_mcux_data *data = spi_dev->data;
308 
309 	if (status < 0) {
310 		LOG_ERR("DMA callback error with channel %d.", channel);
311 		data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG;
312 	} else {
313 		/* identify the origin of this callback */
314 		if (channel == data->dma_tx.channel) {
315 			if (status != data->dma_tx.wait_for_dma_status) {
316 				return;
317 			}
318 			/* this part of the transfer ends */
319 			data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG;
320 		} else if (channel == data->dma_rx.channel) {
321 			/* this part of the transfer ends */
322 			data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG;
323 		} else {
324 			LOG_ERR("DMA callback channel %d is not valid.",
325 								channel);
326 			data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG;
327 		}
328 	}
329 
330 	spi_context_complete(&data->ctx, spi_dev, 0);
331 }
332 
333 
spi_mcux_prepare_txlastword(uint32_t * txLastWord,const uint8_t * buf,const struct spi_config * spi_cfg,size_t len,bool rx_ignore)334 static void spi_mcux_prepare_txlastword(uint32_t *txLastWord,
335 				const uint8_t *buf, const struct spi_config *spi_cfg,
336 				size_t len, bool rx_ignore)
337 {
338 	uint32_t word_size;
339 
340 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
341 
342 	if (word_size > 8) {
343 		*txLastWord = (((uint32_t)buf[len - 1U] << 8U) |
344 							(buf[len - 2U]));
345 	} else {
346 		*txLastWord = buf[len - 1U];
347 	}
348 
349 	if (rx_ignore) {
350 		*txLastWord |= (uint32_t)SPI_FIFOWR_RXIGNORE_MASK;
351 	}
352 
353 	*txLastWord |= (uint32_t)SPI_FIFOWR_EOT_MASK;
354 
355 	*txLastWord |= ((uint32_t)SPI_DEASSERT_ALL &
356 				(~(uint32_t)SPI_DEASSERTNUM_SSEL((uint32_t)spi_cfg->slave)));
357 
358 	/* set width of data - range asserted at entry */
359 	*txLastWord |= SPI_FIFOWR_LEN(word_size - 1);
360 }
361 
spi_mcux_prepare_txdummy(uint32_t * dummy,bool last_packet,const struct spi_config * spi_cfg,bool rx_ignore)362 static void spi_mcux_prepare_txdummy(uint32_t *dummy, bool last_packet,
363 				     const struct spi_config *spi_cfg,
364 				     bool rx_ignore)
365 {
366 	uint32_t word_size;
367 
368 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
369 
370 	if (last_packet) {
371 		*dummy |= (uint32_t)SPI_FIFOWR_EOT_MASK;
372 	}
373 	if (rx_ignore) {
374 		*dummy |= (uint32_t)SPI_FIFOWR_RXIGNORE_MASK;
375 	}
376 
377 	*dummy |= ((uint32_t)SPI_DEASSERT_ALL &
378 				(~(uint32_t)SPI_DEASSERTNUM_SSEL((uint32_t)spi_cfg->slave)));
379 
380 	/* set width of data - range asserted at entry */
381 	*dummy |= SPI_FIFOWR_LEN(word_size - 1);
382 }
383 
spi_mcux_dma_tx_load(const struct device * dev,const uint8_t * buf,const struct spi_config * spi_cfg,size_t len,bool last_packet,bool rx_ignore)384 static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf,
385 				const struct spi_config *spi_cfg, size_t len,
386 				bool last_packet, bool rx_ignore)
387 {
388 	const struct spi_mcux_config *cfg = dev->config;
389 	struct spi_mcux_data *data = dev->data;
390 	struct dma_block_config *blk_cfg;
391 	int ret;
392 	SPI_Type *base = cfg->base;
393 	uint32_t word_size;
394 
395 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
396 
397 	/* remember active TX DMA channel (used in callback) */
398 	struct stream *stream = &data->dma_tx;
399 
400 	blk_cfg = &stream->dma_blk_cfg[0];
401 
402 	/* prepare the block for this TX DMA channel */
403 	memset(blk_cfg, 0, sizeof(struct dma_block_config));
404 
405 	/* tx direction has memory as source and periph as dest. */
406 	if (buf == NULL) {
407 		data->dummy_tx_buffer = 0;
408 		data->last_word = 0;
409 		spi_mcux_prepare_txdummy(&data->dummy_tx_buffer, last_packet, spi_cfg, rx_ignore);
410 
411 		if (last_packet  &&
412 		    ((word_size > 8) ? (len > 2U) : (len > 1U))) {
413 			spi_mcux_prepare_txdummy(&data->last_word, last_packet, spi_cfg, rx_ignore);
414 			blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer;
415 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
416 			blk_cfg->block_size = (word_size > 8) ?
417 						(len - 2U) : (len - 1U);
418 			blk_cfg->next_block = &stream->dma_blk_cfg[1];
419 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
420 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
421 
422 			blk_cfg = &stream->dma_blk_cfg[1];
423 
424 			/* prepare the block for this TX DMA channel */
425 			memset(blk_cfg, 0, sizeof(struct dma_block_config));
426 			blk_cfg->source_address = (uint32_t)&data->last_word;
427 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
428 			blk_cfg->block_size = sizeof(uint32_t);
429 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
430 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
431 			data->dma_tx.wait_for_dma_status = DMA_STATUS_COMPLETE;
432 		} else {
433 			blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer;
434 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
435 			blk_cfg->block_size = len;
436 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
437 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
438 			data->dma_tx.wait_for_dma_status = DMA_STATUS_BLOCK;
439 		}
440 	} else {
441 		if (last_packet) {
442 			spi_mcux_prepare_txlastword(&data->last_word, buf, spi_cfg, len, rx_ignore);
443 		}
444 		/* If last packet and data transfer frame is bigger then 1,
445 		 * use dma descriptor to send the last data.
446 		 */
447 		if (last_packet  &&
448 		    ((word_size > 8) ? (len > 2U) : (len > 1U))) {
449 			blk_cfg->source_address = (uint32_t)buf;
450 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
451 			blk_cfg->block_size = (word_size > 8) ?
452 						(len - 2U) : (len - 1U);
453 			blk_cfg->next_block = &stream->dma_blk_cfg[1];
454 
455 			blk_cfg = &stream->dma_blk_cfg[1];
456 
457 			/* prepare the block for this TX DMA channel */
458 			memset(blk_cfg, 0, sizeof(struct dma_block_config));
459 			blk_cfg->source_address = (uint32_t)&data->last_word;
460 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
461 			blk_cfg->block_size = sizeof(uint32_t);
462 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
463 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
464 			data->dma_tx.wait_for_dma_status = DMA_STATUS_COMPLETE;
465 		} else {
466 			blk_cfg->source_address = (uint32_t)buf;
467 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
468 			blk_cfg->block_size = len;
469 			data->dma_tx.wait_for_dma_status = DMA_STATUS_BLOCK;
470 		}
471 	}
472 
473 	/* Enables the DMA request from SPI txFIFO */
474 	base->FIFOCFG |= SPI_FIFOCFG_DMATX_MASK;
475 
476 	/* direction is given by the DT */
477 	stream->dma_cfg.head_block = &stream->dma_blk_cfg[0];
478 	/* give the client dev as arg, as the callback comes from the dma */
479 	stream->dma_cfg.user_data = (struct device *)dev;
480 	/* pass our client origin to the dma: data->dma_tx.dma_channel */
481 	ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel,
482 			&stream->dma_cfg);
483 	/* the channel is the actual stream from 0 */
484 	if (ret != 0) {
485 		return ret;
486 	}
487 
488 	uint32_t tmpData = 0U;
489 
490 	spi_mcux_prepare_txdummy(&tmpData, last_packet, spi_cfg, rx_ignore);
491 
492 	/* Setup the control info.
493 	 * Halfword writes to just the control bits (offset 0xE22) doesn't push
494 	 * anything into the FIFO. And the data access type of control bits must
495 	 * be uint16_t, byte writes or halfword writes to FIFOWR will push the
496 	 * data and the current control bits into the FIFO.
497 	 */
498 	if ((last_packet) &&
499 		((word_size > 8) ? (len == 2U) : (len == 1U))) {
500 		*((uint16_t *)((uint32_t)&base->FIFOWR) + 1) = (uint16_t)(tmpData >> 16U);
501 	} else {
502 		/* Clear the SPI_FIFOWR_EOT_MASK bit when data is not the last */
503 		tmpData &= (~(uint32_t)SPI_FIFOWR_EOT_MASK);
504 		*((uint16_t *)((uint32_t)&base->FIFOWR) + 1) = (uint16_t)(tmpData >> 16U);
505 	}
506 
507 	/* gives the request ID */
508 	return dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
509 }
510 
spi_mcux_dma_rx_load(const struct device * dev,uint8_t * buf,size_t len)511 static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf,
512 				 size_t len)
513 {
514 	const struct spi_mcux_config *cfg = dev->config;
515 	struct spi_mcux_data *data = dev->data;
516 	struct dma_block_config *blk_cfg;
517 	int ret;
518 	SPI_Type *base = cfg->base;
519 
520 	/* retrieve active RX DMA channel (used in callback) */
521 	struct stream *stream = &data->dma_rx;
522 
523 	if (buf == NULL) {
524 		data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG;
525 		return 0;
526 	}
527 
528 	blk_cfg = &stream->dma_blk_cfg[0];
529 
530 	/* prepare the block for this RX DMA channel */
531 	memset(blk_cfg, 0, sizeof(struct dma_block_config));
532 	blk_cfg->block_size = len;
533 
534 	/* rx direction has periph as source and mem as dest. */
535 	blk_cfg->dest_address = (uint32_t)buf;
536 	blk_cfg->source_address = (uint32_t)&base->FIFORD;
537 
538 	/* direction is given by the DT */
539 	stream->dma_cfg.head_block = blk_cfg;
540 	stream->dma_cfg.user_data = (struct device *)dev;
541 
542 	/* Enables the DMA request from SPI rxFIFO */
543 	base->FIFOCFG |= SPI_FIFOCFG_DMARX_MASK;
544 
545 	/* pass our client origin to the dma: data->dma_rx.channel */
546 	ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel,
547 			&stream->dma_cfg);
548 	/* the channel is the actual stream from 0 */
549 	if (ret != 0) {
550 		return ret;
551 	}
552 
553 	/* gives the request ID */
554 	return dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
555 }
556 
spi_mcux_dma_move_buffers(const struct device * dev,size_t len,const struct spi_config * spi_cfg,bool last_packet)557 static int spi_mcux_dma_move_buffers(const struct device *dev, size_t len,
558 			const struct spi_config *spi_cfg, bool last_packet)
559 {
560 	struct spi_mcux_data *data = dev->data;
561 	bool rx_ignore = data->ctx.rx_buf ? false : true;
562 	int ret;
563 
564 	ret = spi_mcux_dma_rx_load(dev, data->ctx.rx_buf, len);
565 
566 	if (ret != 0) {
567 		return ret;
568 	}
569 
570 	ret = spi_mcux_dma_tx_load(dev, data->ctx.tx_buf, spi_cfg,
571 				   len, last_packet, rx_ignore);
572 
573 	return ret;
574 }
575 
wait_dma_rx_tx_done(const struct device * dev)576 static int wait_dma_rx_tx_done(const struct device *dev)
577 {
578 	struct spi_mcux_data *data = dev->data;
579 	int ret = -1;
580 
581 	while (1) {
582 		ret = spi_context_wait_for_completion(&data->ctx);
583 		if (data->status_flags & SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG) {
584 			return -EIO;
585 		}
586 
587 		if ((data->status_flags & SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG) ==
588 			SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG) {
589 			return 0;
590 		}
591 	}
592 }
593 
transceive_dma(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)594 static int transceive_dma(const struct device *dev,
595 		      const struct spi_config *spi_cfg,
596 		      const struct spi_buf_set *tx_bufs,
597 		      const struct spi_buf_set *rx_bufs,
598 		      bool asynchronous,
599 		      spi_callback_t cb,
600 		      void *userdata)
601 {
602 	const struct spi_mcux_config *config = dev->config;
603 	struct spi_mcux_data *data = dev->data;
604 	SPI_Type *base = config->base;
605 	int ret;
606 	uint32_t word_size;
607 	uint16_t data_size;
608 
609 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
610 
611 	ret = spi_mcux_configure(dev, spi_cfg);
612 	if (ret) {
613 		goto out;
614 	}
615 
616 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
617 
618 	spi_context_cs_control(&data->ctx, true);
619 
620 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
621 
622 	data_size = (word_size > 8) ? (sizeof(uint16_t)) : (sizeof(uint8_t));
623 	data->dma_rx.dma_cfg.source_data_size = data_size;
624 	data->dma_rx.dma_cfg.dest_data_size = data_size;
625 	data->dma_tx.dma_cfg.source_data_size = data_size;
626 	data->dma_tx.dma_cfg.dest_data_size = data_size;
627 
628 	while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) {
629 		size_t dma_len;
630 
631 		/* last is used to deassert chip select if this
632 		 * is the last transfer in the set.
633 		 */
634 		bool last = false;
635 
636 		if (data->ctx.rx_len == 0) {
637 			dma_len = data->ctx.tx_len;
638 			last = true;
639 		} else if (data->ctx.tx_len == 0) {
640 			dma_len = data->ctx.rx_len;
641 			last = true;
642 		} else if (data->ctx.tx_len == data->ctx.rx_len) {
643 			dma_len = data->ctx.rx_len;
644 			last = true;
645 		} else {
646 			dma_len = MIN(data->ctx.tx_len, data->ctx.rx_len);
647 			last = false;
648 		}
649 
650 		/* at this point, last just means whether or not
651 		 * this transfer will completely cover
652 		 * the current tx/rx buffer in data->ctx
653 		 * or require additional transfers because
654 		 * the two buffers are not the same size.
655 		 *
656 		 * if it covers the current ctx tx/rx buffers, then
657 		 * we'll move to the next pair of buffers (if any)
658 		 * after the transfer, but if there are
659 		 * no more buffer pairs, then this is the last
660 		 * transfer in the set and we need to deassert CS.
661 		 */
662 		if (last) {
663 			/* this dma transfer should cover
664 			 * the entire current data->ctx set
665 			 * of buffers. if there are more
666 			 * buffers in the set, then we don't
667 			 * want to deassert CS.
668 			 */
669 			if ((data->ctx.tx_count > 1) ||
670 			    (data->ctx.rx_count > 1)) {
671 				/* more buffers to transfer so
672 				 * this isn't last
673 				 */
674 				last = false;
675 			}
676 		}
677 
678 		data->status_flags = 0;
679 
680 		ret = spi_mcux_dma_move_buffers(dev, dma_len, spi_cfg, last);
681 		if (ret != 0) {
682 			break;
683 		}
684 
685 		ret = wait_dma_rx_tx_done(dev);
686 		if (ret != 0) {
687 			break;
688 		}
689 
690 		/* wait until TX FIFO is really empty */
691 		while (0U == (base->FIFOSTAT & SPI_FIFOSTAT_TXEMPTY_MASK)) {
692 		}
693 
694 		spi_context_update_tx(&data->ctx, 1, dma_len);
695 		spi_context_update_rx(&data->ctx, 1, dma_len);
696 	}
697 
698 	base->FIFOCFG &= ~SPI_FIFOCFG_DMATX_MASK;
699 	base->FIFOCFG &= ~SPI_FIFOCFG_DMARX_MASK;
700 
701 	spi_context_cs_control(&data->ctx, false);
702 
703 out:
704 	spi_context_release(&data->ctx, ret);
705 
706 	return ret;
707 }
708 
709 #endif
710 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)711 static int transceive(const struct device *dev,
712 		      const struct spi_config *spi_cfg,
713 		      const struct spi_buf_set *tx_bufs,
714 		      const struct spi_buf_set *rx_bufs,
715 		      bool asynchronous,
716 		      spi_callback_t cb,
717 		      void *userdata)
718 {
719 	struct spi_mcux_data *data = dev->data;
720 	int ret;
721 
722 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
723 
724 	ret = spi_mcux_configure(dev, spi_cfg);
725 	if (ret) {
726 		goto out;
727 	}
728 
729 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
730 
731 	spi_context_cs_control(&data->ctx, true);
732 
733 	spi_mcux_transfer_next_packet(dev);
734 
735 	ret = spi_context_wait_for_completion(&data->ctx);
736 out:
737 	spi_context_release(&data->ctx, ret);
738 
739 	return ret;
740 }
741 
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)742 static int spi_mcux_transceive(const struct device *dev,
743 			       const struct spi_config *spi_cfg,
744 			       const struct spi_buf_set *tx_bufs,
745 			       const struct spi_buf_set *rx_bufs)
746 {
747 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
748 	return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
749 #endif
750 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
751 }
752 
753 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)754 static int spi_mcux_transceive_async(const struct device *dev,
755 				     const struct spi_config *spi_cfg,
756 				     const struct spi_buf_set *tx_bufs,
757 				     const struct spi_buf_set *rx_bufs,
758 				     spi_callback_t cb,
759 				     void *userdata)
760 {
761 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
762 	return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
763 #endif
764 
765 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
766 }
767 #endif /* CONFIG_SPI_ASYNC */
768 
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)769 static int spi_mcux_release(const struct device *dev,
770 			    const struct spi_config *spi_cfg)
771 {
772 	struct spi_mcux_data *data = dev->data;
773 
774 	spi_context_unlock_unconditionally(&data->ctx);
775 
776 	return 0;
777 }
778 
spi_mcux_init(const struct device * dev)779 static int spi_mcux_init(const struct device *dev)
780 {
781 	const struct spi_mcux_config *config = dev->config;
782 	struct spi_mcux_data *data = dev->data;
783 	int err = 0;
784 
785 	if (!device_is_ready(config->reset.dev)) {
786 		LOG_ERR("Reset device not ready");
787 		return -ENODEV;
788 	}
789 
790 	err = reset_line_toggle(config->reset.dev, config->reset.id);
791 	if (err) {
792 		return err;
793 	}
794 
795 	config->irq_config_func(dev);
796 
797 	data->dev = dev;
798 
799 	err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
800 	if (err) {
801 		return err;
802 	}
803 
804 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
805 	if (!device_is_ready(data->dma_tx.dma_dev)) {
806 		LOG_ERR("%s device is not ready", data->dma_tx.dma_dev->name);
807 		return -ENODEV;
808 	}
809 
810 	if (!device_is_ready(data->dma_rx.dma_dev)) {
811 		LOG_ERR("%s device is not ready", data->dma_rx.dma_dev->name);
812 		return -ENODEV;
813 	}
814 #endif /* CONFIG_SPI_MCUX_FLEXCOMM_DMA */
815 
816 
817 	err = spi_context_cs_configure_all(&data->ctx);
818 	if (err < 0) {
819 		return err;
820 	}
821 
822 	spi_context_unlock_unconditionally(&data->ctx);
823 
824 	return 0;
825 }
826 
827 static DEVICE_API(spi, spi_mcux_driver_api) = {
828 	.transceive = spi_mcux_transceive,
829 #ifdef CONFIG_SPI_ASYNC
830 	.transceive_async = spi_mcux_transceive_async,
831 #endif
832 #ifdef CONFIG_SPI_RTIO
833 	.iodev_submit = spi_rtio_iodev_default_submit,
834 #endif
835 	.release = spi_mcux_release,
836 };
837 
838 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER_DECL(id)				\
839 	static void spi_mcux_config_func_##id(const struct device *dev)
840 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER_FUNC(id)				\
841 	.irq_config_func = spi_mcux_config_func_##id,
842 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER(id)				\
843 static void spi_mcux_config_func_##id(const struct device *dev) \
844 {								\
845 	IRQ_CONNECT(DT_INST_IRQN(id),				\
846 			DT_INST_IRQ(id, priority),			\
847 			spi_mcux_isr, DEVICE_DT_INST_GET(id),	\
848 			0);					\
849 	irq_enable(DT_INST_IRQN(id));				\
850 }
851 
852 #ifndef CONFIG_SPI_MCUX_FLEXCOMM_DMA
853 #define SPI_DMA_CHANNELS(id)
854 #else
855 #define SPI_DMA_CHANNELS(id)				\
856 	.dma_tx = {						\
857 		.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \
858 		.channel =					\
859 			DT_INST_DMAS_CELL_BY_NAME(id, tx, channel),	\
860 		.dma_cfg = {						\
861 			.channel_direction = MEMORY_TO_PERIPHERAL,	\
862 			.dma_callback = spi_mcux_dma_callback,		\
863 			.complete_callback_en = true,			\
864 			.block_count = 2,				\
865 		}							\
866 	},								\
867 	.dma_rx = {						\
868 		.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \
869 		.channel =					\
870 			DT_INST_DMAS_CELL_BY_NAME(id, rx, channel),	\
871 		.dma_cfg = {				\
872 			.channel_direction = PERIPHERAL_TO_MEMORY,	\
873 			.dma_callback = spi_mcux_dma_callback,		\
874 			.block_count = 1,		\
875 		}							\
876 	}
877 
878 #endif
879 
880 #define SPI_MCUX_FLEXCOMM_DEVICE(id)					\
881 	SPI_MCUX_FLEXCOMM_IRQ_HANDLER_DECL(id);			\
882 	PINCTRL_DT_INST_DEFINE(id);					\
883 	static const struct spi_mcux_config spi_mcux_config_##id = {	\
884 		.base =							\
885 		(SPI_Type *)DT_INST_REG_ADDR(id),			\
886 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)),	\
887 		.clock_subsys =					\
888 		(clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name),\
889 		SPI_MCUX_FLEXCOMM_IRQ_HANDLER_FUNC(id)			\
890 		.pre_delay = DT_INST_PROP_OR(id, pre_delay, 0),		\
891 		.post_delay = DT_INST_PROP_OR(id, post_delay, 0),		\
892 		.frame_delay = DT_INST_PROP_OR(id, frame_delay, 0),		\
893 		.transfer_delay = DT_INST_PROP_OR(id, transfer_delay, 0),		\
894 		.def_char = DT_INST_PROP_OR(id, def_char, 0),		\
895 		.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id),		\
896 		.reset = RESET_DT_SPEC_INST_GET(id),			\
897 	};								\
898 	static struct spi_mcux_data spi_mcux_data_##id = {		\
899 		SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##id, ctx),		\
900 		SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##id, ctx),		\
901 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx)	\
902 		SPI_DMA_CHANNELS(id)		\
903 	};								\
904 	SPI_DEVICE_DT_INST_DEFINE(id,					\
905 			    spi_mcux_init,				\
906 			    NULL,					\
907 			    &spi_mcux_data_##id,			\
908 			    &spi_mcux_config_##id,			\
909 			    POST_KERNEL,				\
910 			    CONFIG_SPI_INIT_PRIORITY,			\
911 			    &spi_mcux_driver_api);			\
912 	\
913 	SPI_MCUX_FLEXCOMM_IRQ_HANDLER(id)
914 
915 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_FLEXCOMM_DEVICE)
916