1 /*
2  * Copyright (c) 2016, Freescale Semiconductor, Inc.
3  * Copyright (c) 2017,2019, NXP
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT nxp_lpc_spi
9 
10 #include <errno.h>
11 #include <zephyr/drivers/spi.h>
12 #include <zephyr/drivers/clock_control.h>
13 #include <fsl_spi.h>
14 #include <zephyr/logging/log.h>
15 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
16 #include <zephyr/drivers/dma.h>
17 #endif
18 #include <zephyr/drivers/pinctrl.h>
19 #include <zephyr/sys_clock.h>
20 #include <zephyr/irq.h>
21 
22 LOG_MODULE_REGISTER(spi_mcux_flexcomm, CONFIG_SPI_LOG_LEVEL);
23 
24 #include "spi_context.h"
25 
26 #define SPI_CHIP_SELECT_COUNT	4
27 #define SPI_MAX_DATA_WIDTH	16
28 
29 struct spi_mcux_config {
30 	SPI_Type *base;
31 	const struct device *clock_dev;
32 	clock_control_subsys_t clock_subsys;
33 	void (*irq_config_func)(const struct device *dev);
34 	uint32_t pre_delay;
35 	uint32_t post_delay;
36 	uint32_t frame_delay;
37 	uint32_t transfer_delay;
38 	uint32_t def_char;
39 	const struct pinctrl_dev_config *pincfg;
40 };
41 
42 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
43 #define SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG	0x01
44 #define SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG	0x02
45 #define SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG	0x04
46 #define SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG		\
47 	(SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG | SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG)
48 
49 struct stream {
50 	const struct device *dma_dev;
51 	uint32_t channel; /* stores the channel for dma */
52 	struct dma_config dma_cfg;
53 	struct dma_block_config dma_blk_cfg[2];
54 };
55 #endif
56 
57 struct spi_mcux_data {
58 	const struct device *dev;
59 	spi_master_handle_t handle;
60 	struct spi_context ctx;
61 	size_t transfer_len;
62 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
63 	volatile uint32_t status_flags;
64 	struct stream dma_rx;
65 	struct stream dma_tx;
66 	/* dummy value used for transferring NOP when tx buf is null */
67 	uint32_t dummy_tx_buffer;
68 	/* Used to send the last word */
69 	uint32_t last_word;
70 #endif
71 };
72 
spi_mcux_transfer_next_packet(const struct device * dev)73 static void spi_mcux_transfer_next_packet(const struct device *dev)
74 {
75 	const struct spi_mcux_config *config = dev->config;
76 	struct spi_mcux_data *data = dev->data;
77 	SPI_Type *base = config->base;
78 	struct spi_context *ctx = &data->ctx;
79 	spi_transfer_t transfer;
80 	status_t status;
81 
82 	if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) {
83 		/* nothing left to rx or tx, we're done! */
84 		spi_context_cs_control(&data->ctx, false);
85 		spi_context_complete(&data->ctx, dev, 0);
86 		return;
87 	}
88 
89 	transfer.configFlags = 0;
90 	if (ctx->tx_len == 0) {
91 		/* rx only, nothing to tx */
92 		transfer.txData = NULL;
93 		transfer.rxData = ctx->rx_buf;
94 		transfer.dataSize = ctx->rx_len;
95 	} else if (ctx->rx_len == 0) {
96 		/* tx only, nothing to rx */
97 		transfer.txData = (uint8_t *) ctx->tx_buf;
98 		transfer.rxData = NULL;
99 		transfer.dataSize = ctx->tx_len;
100 	} else if (ctx->tx_len == ctx->rx_len) {
101 		/* rx and tx are the same length */
102 		transfer.txData = (uint8_t *) ctx->tx_buf;
103 		transfer.rxData = ctx->rx_buf;
104 		transfer.dataSize = ctx->tx_len;
105 	} else if (ctx->tx_len > ctx->rx_len) {
106 		/* Break up the tx into multiple transfers so we don't have to
107 		 * rx into a longer intermediate buffer. Leave chip select
108 		 * active between transfers.
109 		 */
110 		transfer.txData = (uint8_t *) ctx->tx_buf;
111 		transfer.rxData = ctx->rx_buf;
112 		transfer.dataSize = ctx->rx_len;
113 	} else {
114 		/* Break up the rx into multiple transfers so we don't have to
115 		 * tx from a longer intermediate buffer. Leave chip select
116 		 * active between transfers.
117 		 */
118 		transfer.txData = (uint8_t *) ctx->tx_buf;
119 		transfer.rxData = ctx->rx_buf;
120 		transfer.dataSize = ctx->tx_len;
121 	}
122 
123 	if (ctx->tx_count <= 1 && ctx->rx_count <= 1) {
124 		transfer.configFlags = kSPI_FrameAssert;
125 	}
126 
127 	data->transfer_len = transfer.dataSize;
128 
129 	status = SPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
130 	if (status != kStatus_Success) {
131 		LOG_ERR("Transfer could not start");
132 	}
133 }
134 
spi_mcux_isr(const struct device * dev)135 static void spi_mcux_isr(const struct device *dev)
136 {
137 	const struct spi_mcux_config *config = dev->config;
138 	struct spi_mcux_data *data = dev->data;
139 	SPI_Type *base = config->base;
140 
141 	SPI_MasterTransferHandleIRQ(base, &data->handle);
142 }
143 
spi_mcux_transfer_callback(SPI_Type * base,spi_master_handle_t * handle,status_t status,void * userData)144 static void spi_mcux_transfer_callback(SPI_Type *base,
145 		spi_master_handle_t *handle, status_t status, void *userData)
146 {
147 	struct spi_mcux_data *data = userData;
148 
149 	spi_context_update_tx(&data->ctx, 1, data->transfer_len);
150 	spi_context_update_rx(&data->ctx, 1, data->transfer_len);
151 
152 	spi_mcux_transfer_next_packet(data->dev);
153 }
154 
spi_clock_cycles(uint32_t delay_ns,uint32_t sck_frequency_hz)155 static uint8_t spi_clock_cycles(uint32_t delay_ns, uint32_t sck_frequency_hz)
156 {
157 	/* Convert delay_ns to an integer number of clock cycles of frequency
158 	 * sck_frequency_hz. The maximum delay is 15 clock cycles.
159 	 */
160 	uint8_t delay_cycles = (uint64_t)delay_ns * sck_frequency_hz / NSEC_PER_SEC;
161 
162 	delay_cycles = MIN(delay_cycles, 15);
163 
164 	return delay_cycles;
165 }
166 
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)167 static int spi_mcux_configure(const struct device *dev,
168 			      const struct spi_config *spi_cfg)
169 {
170 	const struct spi_mcux_config *config = dev->config;
171 	struct spi_mcux_data *data = dev->data;
172 	SPI_Type *base = config->base;
173 	uint32_t clock_freq;
174 	uint32_t word_size;
175 
176 	if (spi_context_configured(&data->ctx, spi_cfg)) {
177 		/* This configuration is already in use */
178 		return 0;
179 	}
180 
181 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
182 		LOG_ERR("Half-duplex not supported");
183 		return -ENOTSUP;
184 	}
185 
186 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
187 	if (word_size > SPI_MAX_DATA_WIDTH) {
188 		LOG_ERR("Word size %d is greater than %d",
189 			    word_size, SPI_MAX_DATA_WIDTH);
190 		return -EINVAL;
191 	}
192 
193 	/*
194 	 * Do master or slave initialisation, depending on the
195 	 * mode requested.
196 	 */
197 	if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_MASTER) {
198 		spi_master_config_t master_config;
199 
200 		SPI_MasterGetDefaultConfig(&master_config);
201 
202 		if (!device_is_ready(config->clock_dev)) {
203 			LOG_ERR("clock control device not ready");
204 			return -ENODEV;
205 		}
206 
207 		/* Get the clock frequency */
208 		if (clock_control_get_rate(config->clock_dev,
209 					   config->clock_subsys, &clock_freq)) {
210 			return -EINVAL;
211 		}
212 
213 		if (spi_cfg->slave > SPI_CHIP_SELECT_COUNT) {
214 			LOG_ERR("Slave %d is greater than %d",
215 				    spi_cfg->slave, SPI_CHIP_SELECT_COUNT);
216 			return -EINVAL;
217 		}
218 
219 		master_config.sselNum = spi_cfg->slave;
220 		master_config.sselPol = kSPI_SpolActiveAllLow;
221 		master_config.dataWidth = word_size - 1;
222 
223 		master_config.polarity =
224 			(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
225 			? kSPI_ClockPolarityActiveLow
226 			: kSPI_ClockPolarityActiveHigh;
227 
228 		master_config.phase =
229 			(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
230 			? kSPI_ClockPhaseSecondEdge
231 			: kSPI_ClockPhaseFirstEdge;
232 
233 		master_config.direction =
234 			(spi_cfg->operation & SPI_TRANSFER_LSB)
235 			? kSPI_LsbFirst
236 			: kSPI_MsbFirst;
237 
238 		master_config.baudRate_Bps = spi_cfg->frequency;
239 
240 		spi_delay_config_t *delayConfig = &master_config.delayConfig;
241 
242 		delayConfig->preDelay = spi_clock_cycles(config->pre_delay,
243 							spi_cfg->frequency);
244 		delayConfig->postDelay = spi_clock_cycles(config->post_delay,
245 							spi_cfg->frequency);
246 		delayConfig->frameDelay = spi_clock_cycles(config->frame_delay,
247 							spi_cfg->frequency);
248 		delayConfig->transferDelay = spi_clock_cycles(config->transfer_delay,
249 							spi_cfg->frequency);
250 
251 		SPI_MasterInit(base, &master_config, clock_freq);
252 
253 		SPI_SetDummyData(base, (uint8_t)config->def_char);
254 
255 		SPI_MasterTransferCreateHandle(base, &data->handle,
256 					     spi_mcux_transfer_callback, data);
257 
258 		data->ctx.config = spi_cfg;
259 	} else {
260 		spi_slave_config_t slave_config;
261 
262 		SPI_SlaveGetDefaultConfig(&slave_config);
263 
264 		slave_config.polarity =
265 			(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
266 			? kSPI_ClockPolarityActiveLow
267 			: kSPI_ClockPolarityActiveHigh;
268 
269 		slave_config.phase =
270 			(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
271 			? kSPI_ClockPhaseSecondEdge
272 			: kSPI_ClockPhaseFirstEdge;
273 
274 		slave_config.direction =
275 			(spi_cfg->operation & SPI_TRANSFER_LSB)
276 			? kSPI_LsbFirst
277 			: kSPI_MsbFirst;
278 
279 		/* SS pin active low */
280 		slave_config.sselPol = kSPI_SpolActiveAllLow;
281 		slave_config.dataWidth = word_size - 1;
282 
283 		SPI_SlaveInit(base, &slave_config);
284 
285 		SPI_SetDummyData(base, (uint8_t)config->def_char);
286 
287 		SPI_SlaveTransferCreateHandle(base, &data->handle,
288 					      spi_mcux_transfer_callback, data);
289 
290 		data->ctx.config = spi_cfg;
291 	}
292 
293 	return 0;
294 }
295 
296 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
297 /* Dummy buffer used as a sink when rc buf is null */
298 uint32_t dummy_rx_buffer;
299 
300 /* This function is executed in the interrupt context */
spi_mcux_dma_callback(const struct device * dev,void * arg,uint32_t channel,int status)301 static void spi_mcux_dma_callback(const struct device *dev, void *arg,
302 			 uint32_t channel, int status)
303 {
304 	/* arg directly holds the spi device */
305 	const struct device *spi_dev = arg;
306 	struct spi_mcux_data *data = spi_dev->data;
307 
308 	if (status < 0) {
309 		LOG_ERR("DMA callback error with channel %d.", channel);
310 		data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG;
311 	} else {
312 		/* identify the origin of this callback */
313 		if (channel == data->dma_tx.channel) {
314 			/* this part of the transfer ends */
315 			data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_TX_DONE_FLAG;
316 		} else if (channel == data->dma_rx.channel) {
317 			/* this part of the transfer ends */
318 			data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_RX_DONE_FLAG;
319 		} else {
320 			LOG_ERR("DMA callback channel %d is not valid.",
321 								channel);
322 			data->status_flags |= SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG;
323 		}
324 	}
325 
326 	spi_context_complete(&data->ctx, spi_dev, 0);
327 }
328 
329 
spi_mcux_prepare_txlastword(uint32_t * txLastWord,const uint8_t * buf,const struct spi_config * spi_cfg,size_t len)330 static void spi_mcux_prepare_txlastword(uint32_t *txLastWord,
331 				const uint8_t *buf, const struct spi_config *spi_cfg,
332 				size_t len)
333 {
334 	uint32_t word_size;
335 
336 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
337 
338 	if (word_size > 8) {
339 		*txLastWord = (((uint32_t)buf[len - 1U] << 8U) |
340 							(buf[len - 2U]));
341 	} else {
342 		*txLastWord = buf[len - 1U];
343 	}
344 
345 	*txLastWord |= (uint32_t)SPI_FIFOWR_EOT_MASK;
346 
347 	*txLastWord |= ((uint32_t)SPI_DEASSERT_ALL &
348 				(~(uint32_t)SPI_DEASSERTNUM_SSEL((uint32_t)spi_cfg->slave)));
349 
350 	/* set width of data - range asserted at entry */
351 	*txLastWord |= SPI_FIFOWR_LEN(word_size - 1);
352 }
353 
spi_mcux_prepare_txdummy(uint32_t * dummy,bool last_packet,const struct spi_config * spi_cfg)354 static void spi_mcux_prepare_txdummy(uint32_t *dummy, bool last_packet,
355 				const struct spi_config *spi_cfg)
356 {
357 	uint32_t word_size;
358 
359 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
360 
361 	if (last_packet) {
362 		*dummy |= (uint32_t)SPI_FIFOWR_EOT_MASK;
363 	}
364 
365 	*dummy |= ((uint32_t)SPI_DEASSERT_ALL &
366 				(~(uint32_t)SPI_DEASSERTNUM_SSEL((uint32_t)spi_cfg->slave)));
367 
368 	/* set width of data - range asserted at entry */
369 	*dummy |= SPI_FIFOWR_LEN(word_size - 1);
370 }
371 
spi_mcux_dma_tx_load(const struct device * dev,const uint8_t * buf,const struct spi_config * spi_cfg,size_t len,bool last_packet)372 static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf,
373 				 const struct spi_config *spi_cfg, size_t len, bool last_packet)
374 {
375 	const struct spi_mcux_config *cfg = dev->config;
376 	struct spi_mcux_data *data = dev->data;
377 	struct dma_block_config *blk_cfg;
378 	int ret;
379 	SPI_Type *base = cfg->base;
380 	uint32_t word_size;
381 
382 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
383 
384 	/* remember active TX DMA channel (used in callback) */
385 	struct stream *stream = &data->dma_tx;
386 
387 	blk_cfg = &stream->dma_blk_cfg[0];
388 
389 	/* prepare the block for this TX DMA channel */
390 	memset(blk_cfg, 0, sizeof(struct dma_block_config));
391 
392 	/* tx direction has memory as source and periph as dest. */
393 	if (buf == NULL) {
394 		data->dummy_tx_buffer = 0;
395 		data->last_word = 0;
396 		spi_mcux_prepare_txdummy(&data->dummy_tx_buffer, last_packet, spi_cfg);
397 
398 		if (last_packet  &&
399 		    ((word_size > 8) ? (len > 2U) : (len > 1U))) {
400 			spi_mcux_prepare_txdummy(&data->last_word, last_packet, spi_cfg);
401 			blk_cfg->source_gather_en = 1;
402 			blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer;
403 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
404 			blk_cfg->block_size = (word_size > 8) ?
405 						(len - 2U) : (len - 1U);
406 			blk_cfg->next_block = &stream->dma_blk_cfg[1];
407 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
408 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
409 
410 			blk_cfg = &stream->dma_blk_cfg[1];
411 
412 			/* prepare the block for this TX DMA channel */
413 			memset(blk_cfg, 0, sizeof(struct dma_block_config));
414 			blk_cfg->source_address = (uint32_t)&data->last_word;
415 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
416 			blk_cfg->block_size = sizeof(uint32_t);
417 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
418 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
419 		} else {
420 			blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer;
421 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
422 			blk_cfg->block_size = len;
423 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
424 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
425 		}
426 	} else {
427 		if (last_packet) {
428 			spi_mcux_prepare_txlastword(&data->last_word, buf, spi_cfg, len);
429 		}
430 		/* If last packet and data transfer frame is bigger then 1,
431 		 * use dma descriptor to send the last data.
432 		 */
433 		if (last_packet  &&
434 		    ((word_size > 8) ? (len > 2U) : (len > 1U))) {
435 			blk_cfg->source_gather_en = 1;
436 			blk_cfg->source_address = (uint32_t)buf;
437 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
438 			blk_cfg->block_size = (word_size > 8) ?
439 						(len - 2U) : (len - 1U);
440 			blk_cfg->next_block = &stream->dma_blk_cfg[1];
441 
442 			blk_cfg = &stream->dma_blk_cfg[1];
443 
444 			/* prepare the block for this TX DMA channel */
445 			memset(blk_cfg, 0, sizeof(struct dma_block_config));
446 			blk_cfg->source_address = (uint32_t)&data->last_word;
447 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
448 			blk_cfg->block_size = sizeof(uint32_t);
449 			blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
450 			blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
451 		} else {
452 			blk_cfg->source_address = (uint32_t)buf;
453 			blk_cfg->dest_address = (uint32_t)&base->FIFOWR;
454 			blk_cfg->block_size = len;
455 		}
456 	}
457 
458 	/* Enables the DMA request from SPI txFIFO */
459 	base->FIFOCFG |= SPI_FIFOCFG_DMATX_MASK;
460 
461 	/* direction is given by the DT */
462 	stream->dma_cfg.head_block = &stream->dma_blk_cfg[0];
463 	/* give the client dev as arg, as the callback comes from the dma */
464 	stream->dma_cfg.user_data = (struct device *)dev;
465 	/* pass our client origin to the dma: data->dma_tx.dma_channel */
466 	ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel,
467 			&stream->dma_cfg);
468 	/* the channel is the actual stream from 0 */
469 	if (ret != 0) {
470 		return ret;
471 	}
472 
473 	uint32_t tmpData = 0U;
474 
475 	spi_mcux_prepare_txdummy(&tmpData, last_packet, spi_cfg);
476 
477 	/* Setup the control info.
478 	 * Halfword writes to just the control bits (offset 0xE22) doesn't push
479 	 * anything into the FIFO. And the data access type of control bits must
480 	 * be uint16_t, byte writes or halfword writes to FIFOWR will push the
481 	 * data and the current control bits into the FIFO.
482 	 */
483 	if ((last_packet) &&
484 		((word_size > 8) ? (len == 2U) : (len == 1U))) {
485 		*((uint16_t *)((uint32_t)&base->FIFOWR) + 1) = (uint16_t)(tmpData >> 16U);
486 	} else {
487 		/* Clear the SPI_FIFOWR_EOT_MASK bit when data is not the last */
488 		tmpData &= (~(uint32_t)SPI_FIFOWR_EOT_MASK);
489 		*((uint16_t *)((uint32_t)&base->FIFOWR) + 1) = (uint16_t)(tmpData >> 16U);
490 	}
491 
492 	/* gives the request ID */
493 	return dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
494 }
495 
spi_mcux_dma_rx_load(const struct device * dev,uint8_t * buf,size_t len)496 static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf,
497 				 size_t len)
498 {
499 	const struct spi_mcux_config *cfg = dev->config;
500 	struct spi_mcux_data *data = dev->data;
501 	struct dma_block_config *blk_cfg;
502 	int ret;
503 	SPI_Type *base = cfg->base;
504 
505 	/* retrieve active RX DMA channel (used in callback) */
506 	struct stream *stream = &data->dma_rx;
507 
508 	blk_cfg = &stream->dma_blk_cfg[0];
509 
510 	/* prepare the block for this RX DMA channel */
511 	memset(blk_cfg, 0, sizeof(struct dma_block_config));
512 	blk_cfg->block_size = len;
513 
514 	/* rx direction has periph as source and mem as dest. */
515 	if (buf == NULL) {
516 		/* if rx buff is null, then write data to dummy address. */
517 		blk_cfg->dest_address = (uint32_t)&dummy_rx_buffer;
518 		blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
519 	} else {
520 		blk_cfg->dest_address = (uint32_t)buf;
521 	}
522 
523 	blk_cfg->source_address = (uint32_t)&base->FIFORD;
524 
525 	/* direction is given by the DT */
526 	stream->dma_cfg.head_block = blk_cfg;
527 	stream->dma_cfg.user_data = (struct device *)dev;
528 
529 	/* Enables the DMA request from SPI rxFIFO */
530 	base->FIFOCFG |= SPI_FIFOCFG_DMARX_MASK;
531 
532 	/* pass our client origin to the dma: data->dma_rx.channel */
533 	ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel,
534 			&stream->dma_cfg);
535 	/* the channel is the actual stream from 0 */
536 	if (ret != 0) {
537 		return ret;
538 	}
539 
540 	/* gives the request ID */
541 	return dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
542 }
543 
spi_mcux_dma_move_buffers(const struct device * dev,size_t len,const struct spi_config * spi_cfg,bool last_packet)544 static int spi_mcux_dma_move_buffers(const struct device *dev, size_t len,
545 			const struct spi_config *spi_cfg, bool last_packet)
546 {
547 	struct spi_mcux_data *data = dev->data;
548 	int ret;
549 
550 	ret = spi_mcux_dma_rx_load(dev, data->ctx.rx_buf, len);
551 
552 	if (ret != 0) {
553 		return ret;
554 	}
555 
556 	ret = spi_mcux_dma_tx_load(dev, data->ctx.tx_buf, spi_cfg,
557 							len, last_packet);
558 
559 	return ret;
560 }
561 
wait_dma_rx_tx_done(const struct device * dev)562 static int wait_dma_rx_tx_done(const struct device *dev)
563 {
564 	struct spi_mcux_data *data = dev->data;
565 	int ret = -1;
566 
567 	while (1) {
568 		ret = spi_context_wait_for_completion(&data->ctx);
569 		if (data->status_flags & SPI_MCUX_FLEXCOMM_DMA_ERROR_FLAG) {
570 			return -EIO;
571 		}
572 
573 		if ((data->status_flags & SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG) ==
574 			SPI_MCUX_FLEXCOMM_DMA_DONE_FLAG) {
575 			return 0;
576 		}
577 	}
578 }
579 
transceive_dma(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)580 static int transceive_dma(const struct device *dev,
581 		      const struct spi_config *spi_cfg,
582 		      const struct spi_buf_set *tx_bufs,
583 		      const struct spi_buf_set *rx_bufs,
584 		      bool asynchronous,
585 		      spi_callback_t cb,
586 		      void *userdata)
587 {
588 	const struct spi_mcux_config *config = dev->config;
589 	struct spi_mcux_data *data = dev->data;
590 	SPI_Type *base = config->base;
591 	int ret;
592 	uint32_t word_size;
593 
594 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
595 
596 	ret = spi_mcux_configure(dev, spi_cfg);
597 	if (ret) {
598 		goto out;
599 	}
600 
601 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
602 
603 	spi_context_cs_control(&data->ctx, true);
604 
605 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
606 
607 	data->dma_rx.dma_cfg.dest_data_size = (word_size > 8) ?
608 				(sizeof(uint16_t)) : (sizeof(uint8_t));
609 	data->dma_tx.dma_cfg.dest_data_size = data->dma_rx.dma_cfg.dest_data_size;
610 
611 	while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) {
612 		size_t dma_len;
613 		bool last = false;
614 
615 		if (data->ctx.rx_len == 0) {
616 			dma_len = data->ctx.tx_len;
617 			last = true;
618 		} else if (data->ctx.tx_len == 0) {
619 			dma_len = data->ctx.rx_len;
620 			last = true;
621 		} else if (data->ctx.tx_len == data->ctx.rx_len) {
622 			dma_len = data->ctx.rx_len;
623 			last = true;
624 		} else {
625 			dma_len = MIN(data->ctx.tx_len, data->ctx.rx_len);
626 			last = false;
627 		}
628 
629 		data->status_flags = 0;
630 
631 		ret = spi_mcux_dma_move_buffers(dev, dma_len, spi_cfg, last);
632 		if (ret != 0) {
633 			break;
634 		}
635 
636 		ret = wait_dma_rx_tx_done(dev);
637 		if (ret != 0) {
638 			break;
639 		}
640 
641 		/* wait until TX FIFO is really empty */
642 		while (0U == (base->FIFOSTAT & SPI_FIFOSTAT_TXEMPTY_MASK)) {
643 		}
644 
645 		spi_context_update_tx(&data->ctx, 1, dma_len);
646 		spi_context_update_rx(&data->ctx, 1, dma_len);
647 	}
648 
649 	base->FIFOCFG &= ~SPI_FIFOCFG_DMATX_MASK;
650 	base->FIFOCFG &= ~SPI_FIFOCFG_DMARX_MASK;
651 
652 	spi_context_cs_control(&data->ctx, false);
653 
654 out:
655 	spi_context_release(&data->ctx, ret);
656 
657 	return ret;
658 }
659 
660 #endif
661 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)662 static int transceive(const struct device *dev,
663 		      const struct spi_config *spi_cfg,
664 		      const struct spi_buf_set *tx_bufs,
665 		      const struct spi_buf_set *rx_bufs,
666 		      bool asynchronous,
667 		      spi_callback_t cb,
668 		      void *userdata)
669 {
670 	struct spi_mcux_data *data = dev->data;
671 	int ret;
672 
673 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
674 
675 	ret = spi_mcux_configure(dev, spi_cfg);
676 	if (ret) {
677 		goto out;
678 	}
679 
680 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
681 
682 	spi_context_cs_control(&data->ctx, true);
683 
684 	spi_mcux_transfer_next_packet(dev);
685 
686 	ret = spi_context_wait_for_completion(&data->ctx);
687 out:
688 	spi_context_release(&data->ctx, ret);
689 
690 	return ret;
691 }
692 
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)693 static int spi_mcux_transceive(const struct device *dev,
694 			       const struct spi_config *spi_cfg,
695 			       const struct spi_buf_set *tx_bufs,
696 			       const struct spi_buf_set *rx_bufs)
697 {
698 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
699 	return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
700 #endif
701 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
702 }
703 
704 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)705 static int spi_mcux_transceive_async(const struct device *dev,
706 				     const struct spi_config *spi_cfg,
707 				     const struct spi_buf_set *tx_bufs,
708 				     const struct spi_buf_set *rx_bufs,
709 				     spi_callback_t cb,
710 				     void *userdata)
711 {
712 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
713 	return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
714 #endif
715 
716 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
717 }
718 #endif /* CONFIG_SPI_ASYNC */
719 
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)720 static int spi_mcux_release(const struct device *dev,
721 			    const struct spi_config *spi_cfg)
722 {
723 	struct spi_mcux_data *data = dev->data;
724 
725 	spi_context_unlock_unconditionally(&data->ctx);
726 
727 	return 0;
728 }
729 
spi_mcux_init(const struct device * dev)730 static int spi_mcux_init(const struct device *dev)
731 {
732 	int err;
733 	const struct spi_mcux_config *config = dev->config;
734 	struct spi_mcux_data *data = dev->data;
735 
736 	config->irq_config_func(dev);
737 
738 	data->dev = dev;
739 
740 	err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
741 	if (err) {
742 		return err;
743 	}
744 
745 #ifdef CONFIG_SPI_MCUX_FLEXCOMM_DMA
746 	if (!device_is_ready(data->dma_tx.dma_dev)) {
747 		LOG_ERR("%s device is not ready", data->dma_tx.dma_dev->name);
748 		return -ENODEV;
749 	}
750 
751 	if (!device_is_ready(data->dma_rx.dma_dev)) {
752 		LOG_ERR("%s device is not ready", data->dma_rx.dma_dev->name);
753 		return -ENODEV;
754 	}
755 #endif /* CONFIG_SPI_MCUX_FLEXCOMM_DMA */
756 
757 
758 	err = spi_context_cs_configure_all(&data->ctx);
759 	if (err < 0) {
760 		return err;
761 	}
762 
763 	spi_context_unlock_unconditionally(&data->ctx);
764 
765 	return 0;
766 }
767 
768 static const struct spi_driver_api spi_mcux_driver_api = {
769 	.transceive = spi_mcux_transceive,
770 #ifdef CONFIG_SPI_ASYNC
771 	.transceive_async = spi_mcux_transceive_async,
772 #endif
773 	.release = spi_mcux_release,
774 };
775 
776 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER_DECL(id)				\
777 	static void spi_mcux_config_func_##id(const struct device *dev)
778 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER_FUNC(id)				\
779 	.irq_config_func = spi_mcux_config_func_##id,
780 #define SPI_MCUX_FLEXCOMM_IRQ_HANDLER(id)				\
781 static void spi_mcux_config_func_##id(const struct device *dev) \
782 {								\
783 	IRQ_CONNECT(DT_INST_IRQN(id),				\
784 			DT_INST_IRQ(id, priority),			\
785 			spi_mcux_isr, DEVICE_DT_INST_GET(id),	\
786 			0);					\
787 	irq_enable(DT_INST_IRQN(id));				\
788 }
789 
790 #ifndef CONFIG_SPI_MCUX_FLEXCOMM_DMA
791 #define SPI_DMA_CHANNELS(id)
792 #else
793 #define SPI_DMA_CHANNELS(id)				\
794 	.dma_tx = {						\
795 		.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \
796 		.channel =					\
797 			DT_INST_DMAS_CELL_BY_NAME(id, tx, channel),	\
798 		.dma_cfg = {					\
799 			.channel_direction = MEMORY_TO_PERIPHERAL,	\
800 			.dma_callback = spi_mcux_dma_callback,		\
801 			.source_data_size = 1,				\
802 			.block_count = 2,		\
803 		}							\
804 	},								\
805 	.dma_rx = {						\
806 		.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \
807 		.channel =					\
808 			DT_INST_DMAS_CELL_BY_NAME(id, rx, channel),	\
809 		.dma_cfg = {				\
810 			.channel_direction = PERIPHERAL_TO_MEMORY,	\
811 			.dma_callback = spi_mcux_dma_callback,		\
812 			.source_data_size = 1,				\
813 			.block_count = 1,		\
814 		}							\
815 	}
816 
817 #endif
818 
819 #define SPI_MCUX_FLEXCOMM_DEVICE(id)					\
820 	SPI_MCUX_FLEXCOMM_IRQ_HANDLER_DECL(id);			\
821 	PINCTRL_DT_INST_DEFINE(id);					\
822 	static const struct spi_mcux_config spi_mcux_config_##id = {	\
823 		.base =							\
824 		(SPI_Type *)DT_INST_REG_ADDR(id),			\
825 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)),	\
826 		.clock_subsys =					\
827 		(clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name),\
828 		SPI_MCUX_FLEXCOMM_IRQ_HANDLER_FUNC(id)			\
829 		.pre_delay = DT_INST_PROP_OR(id, pre_delay, 0),		\
830 		.post_delay = DT_INST_PROP_OR(id, post_delay, 0),		\
831 		.frame_delay = DT_INST_PROP_OR(id, frame_delay, 0),		\
832 		.transfer_delay = DT_INST_PROP_OR(id, transfer_delay, 0),		\
833 		.def_char = DT_INST_PROP_OR(id, def_char, 0),		\
834 		.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id),		\
835 	};								\
836 	static struct spi_mcux_data spi_mcux_data_##id = {		\
837 		SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##id, ctx),		\
838 		SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##id, ctx),		\
839 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx)	\
840 		SPI_DMA_CHANNELS(id)		\
841 	};								\
842 	DEVICE_DT_INST_DEFINE(id,					\
843 			    &spi_mcux_init,				\
844 			    NULL,					\
845 			    &spi_mcux_data_##id,			\
846 			    &spi_mcux_config_##id,			\
847 			    POST_KERNEL,				\
848 			    CONFIG_SPI_INIT_PRIORITY,			\
849 			    &spi_mcux_driver_api);			\
850 	\
851 	SPI_MCUX_FLEXCOMM_IRQ_HANDLER(id)
852 
853 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_FLEXCOMM_DEVICE)
854