1 /*
2  * Copyright (c) 2016, Freescale Semiconductor, Inc.
3  * Copyright (c) 2017, 2020-2021, NXP
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT	nxp_kinetis_dspi
9 
10 #include <errno.h>
11 #include <zephyr/drivers/spi.h>
12 #include <zephyr/drivers/clock_control.h>
13 #include <fsl_dspi.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #ifdef CONFIG_DSPI_MCUX_EDMA
16 #include <zephyr/drivers/dma.h>
17 #include <fsl_edma.h>
18 #endif
19 
20 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
21 #include <zephyr/logging/log.h>
22 #include <zephyr/irq.h>
23 LOG_MODULE_REGISTER(spi_mcux_dspi);
24 
25 #include "spi_context.h"
26 
27 #ifdef CONFIG_DSPI_MCUX_EDMA
28 
29 struct spi_edma_config {
30 	const struct device *dma_dev;
31 	int32_t state;
32 	uint32_t dma_channel;
33 	void (*irq_call_back)(void);
34 	struct dma_config dma_cfg;
35 };
36 #endif
37 
38 struct spi_mcux_config {
39 	SPI_Type *base;
40 	const struct device *clock_dev;
41 	clock_control_subsys_t clock_subsys;
42 	void (*irq_config_func)(const struct device *dev);
43 	uint32_t pcs_sck_delay;
44 	uint32_t sck_pcs_delay;
45 	uint32_t transfer_delay;
46 	uint32_t which_ctar;
47 	uint32_t samplePoint;
48 	bool enable_continuous_sck;
49 	bool enable_rxfifo_overwrite;
50 	bool enable_modified_timing_format;
51 	bool is_dma_chn_shared;
52 	const struct pinctrl_dev_config *pincfg;
53 };
54 
55 struct spi_mcux_data {
56 	const struct device *dev;
57 	dspi_master_handle_t handle;
58 	struct spi_context ctx;
59 	size_t transfer_len;
60 #ifdef CONFIG_DSPI_MCUX_EDMA
61 	struct dma_block_config tx_dma_block;
62 	struct dma_block_config tx_dma_block_end;
63 	struct dma_block_config rx_dma_block;
64 	struct spi_edma_config rx_dma_config;
65 	struct spi_edma_config tx_dma_config;
66 	int frame_size;
67 	int tx_transfer_count;
68 	int rx_transfer_count;
69 	uint32_t which_pcs;
70 	struct spi_buf *inner_tx_buffer;
71 	struct spi_buf *inner_rx_buffer;
72 #endif
73 };
74 
75 #ifdef CONFIG_DSPI_MCUX_EDMA
get_size_byte_by_frame_size(int len,int frame_size)76 static int get_size_byte_by_frame_size(int len, int frame_size)
77 {
78 	if (frame_size == 8) {
79 		return (len * 4);
80 	} else { /* frame_size == 16*/
81 		return (len * 2);
82 	}
83 }
84 #endif
85 
spi_mcux_transfer_next_packet(const struct device * dev)86 static int spi_mcux_transfer_next_packet(const struct device *dev)
87 {
88 	const struct spi_mcux_config *config = dev->config;
89 	struct spi_mcux_data *data = dev->data;
90 	SPI_Type *base = config->base;
91 	struct spi_context *ctx = &data->ctx;
92 	dspi_transfer_t transfer;
93 	status_t status;
94 
95 	if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) {
96 		/* nothing left to rx or tx, we're done! */
97 		LOG_DBG("spi transceive done");
98 		spi_context_cs_control(&data->ctx, false);
99 		spi_context_complete(&data->ctx, dev, 0);
100 		return 0;
101 	}
102 
103 #ifdef CONFIG_DSPI_MCUX_EDMA
104 
105 	if (!config->is_dma_chn_shared) {
106 		/* start dma directly in not shared mode */
107 		if (ctx->tx_len != 0) {
108 			int ret = 0;
109 
110 			LOG_DBG("Starting DMA Ch%u",
111 				data->tx_dma_config.dma_channel);
112 			ret = dma_start(data->tx_dma_config.dma_dev,
113 					data->tx_dma_config.dma_channel);
114 			if (ret < 0) {
115 				LOG_ERR("Failed to start DMA Ch%d (%d)",
116 					data->tx_dma_config.dma_channel, ret);
117 				return ret;
118 			}
119 		}
120 
121 		if (ctx->rx_len != 0) {
122 			int ret = 0;
123 
124 			LOG_DBG("Starting DMA Ch%u",
125 				data->rx_dma_config.dma_channel);
126 			ret = dma_start(data->rx_dma_config.dma_dev,
127 					data->rx_dma_config.dma_channel);
128 			if (ret < 0) {
129 				LOG_ERR("Failed to start DMA Ch%d (%d)",
130 					data->rx_dma_config.dma_channel, ret);
131 				return ret;
132 			}
133 		}
134 	}
135 
136 	DSPI_EnableDMA(base, (uint32_t)kDSPI_RxDmaEnable |
137 					      (uint32_t)kDSPI_TxDmaEnable);
138 	DSPI_StartTransfer(base);
139 
140 	if (config->is_dma_chn_shared) {
141 		/* in master mode start tx */
142 		dma_start(data->tx_dma_config.dma_dev, data->tx_dma_config.dma_channel);
143 		/* TBD kDSPI_TxFifoFillRequestFlag */
144 		DSPI_EnableInterrupts(base,
145 				      (uint32_t)kDSPI_RxFifoDrainRequestFlag);
146 		LOG_DBG("trigger tx to start master");
147 	}
148 
149 	return 0;
150 #endif
151 
152 	transfer.configFlags = kDSPI_MasterCtar0 | kDSPI_MasterPcsContinuous |
153 			       (ctx->config->slave << DSPI_MASTER_PCS_SHIFT);
154 
155 	if (ctx->tx_len == 0) {
156 		/* rx only, nothing to tx */
157 		transfer.txData = NULL;
158 		transfer.rxData = ctx->rx_buf;
159 		transfer.dataSize = ctx->rx_len;
160 	} else if (ctx->rx_len == 0) {
161 		/* tx only, nothing to rx */
162 		transfer.txData = (uint8_t *) ctx->tx_buf;
163 		transfer.rxData = NULL;
164 		transfer.dataSize = ctx->tx_len;
165 	} else if (ctx->tx_len == ctx->rx_len) {
166 		/* rx and tx are the same length */
167 		transfer.txData = (uint8_t *) ctx->tx_buf;
168 		transfer.rxData = ctx->rx_buf;
169 		transfer.dataSize = ctx->tx_len;
170 	} else if (ctx->tx_len > ctx->rx_len) {
171 		/* Break up the tx into multiple transfers so we don't have to
172 		 * rx into a longer intermediate buffer. Leave chip select
173 		 * active between transfers.
174 		 */
175 		transfer.txData = (uint8_t *) ctx->tx_buf;
176 		transfer.rxData = ctx->rx_buf;
177 		transfer.dataSize = ctx->rx_len;
178 		transfer.configFlags |= kDSPI_MasterActiveAfterTransfer;
179 	} else {
180 		/* Break up the rx into multiple transfers so we don't have to
181 		 * tx from a longer intermediate buffer. Leave chip select
182 		 * active between transfers.
183 		 */
184 		transfer.txData = (uint8_t *) ctx->tx_buf;
185 		transfer.rxData = ctx->rx_buf;
186 		transfer.dataSize = ctx->tx_len;
187 		transfer.configFlags |= kDSPI_MasterActiveAfterTransfer;
188 	}
189 
190 	if (!(ctx->tx_count <= 1 && ctx->rx_count <= 1)) {
191 		transfer.configFlags |= kDSPI_MasterActiveAfterTransfer;
192 	}
193 
194 	data->transfer_len = transfer.dataSize;
195 
196 	status = DSPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
197 	if (status != kStatus_Success) {
198 		LOG_ERR("Transfer could not start on %s: %d", dev->name, status);
199 		return status == kDSPI_Busy ? -EBUSY : -EINVAL;
200 	}
201 
202 	return 0;
203 }
204 
spi_mcux_isr(const struct device * dev)205 static void spi_mcux_isr(const struct device *dev)
206 {
207 	const struct spi_mcux_config *config = dev->config;
208 	struct spi_mcux_data *data = dev->data;
209 	SPI_Type *base = config->base;
210 
211 #ifdef CONFIG_DSPI_MCUX_EDMA
212 	LOG_DBG("isr is called");
213 
214 	if (0U != (DSPI_GetStatusFlags(base) &
215 		   (uint32_t)kDSPI_RxFifoDrainRequestFlag)) {
216 		/* start rx */
217 		dma_start(data->rx_dma_config.dma_dev, data->rx_dma_config.dma_channel);
218 	}
219 #else
220 	DSPI_MasterTransferHandleIRQ(base, &data->handle);
221 #endif
222 }
223 
224 #ifdef CONFIG_DSPI_MCUX_EDMA
225 
mcux_init_inner_buffer_with_cmd(const struct device * dev,uint16_t dummy)226 static void mcux_init_inner_buffer_with_cmd(const struct device *dev,
227 					    uint16_t dummy)
228 {
229 	const struct spi_mcux_config *config = dev->config;
230 	struct spi_mcux_data *data = dev->data;
231 	dspi_command_data_config_t commandStruct;
232 	uint32_t *pbuf = data->inner_tx_buffer->buf;
233 	uint32_t command;
234 	int i = 0;
235 
236 	commandStruct.whichPcs = data->which_pcs;
237 
238 	commandStruct.isEndOfQueue = false;
239 	commandStruct.clearTransferCount = false;
240 	commandStruct.whichCtar = config->which_ctar;
241 	commandStruct.isPcsContinuous = config->enable_continuous_sck;
242 	command = DSPI_MasterGetFormattedCommand(&(commandStruct));
243 	for (i = 0; i < data->inner_tx_buffer->len / 4; i++) {
244 		*pbuf = command | dummy;
245 		pbuf++;
246 	}
247 }
248 
249 /**
250  * @brief update the tx data to internal buffer with command embedded,
251  * if no tx data, use dummy value.
252  * tx data frame size shall not bigger than 16 bits
253  * the overall transfer data in one batch shall not larger than FIFO size
254  */
mcux_spi_context_data_update(const struct device * dev)255 static int mcux_spi_context_data_update(const struct device *dev)
256 {
257 	struct spi_mcux_data *data = dev->data;
258 	uint32_t frame_size_bit = data->frame_size;
259 	struct spi_context *ctx = (struct spi_context *)&data->ctx;
260 	uint32_t *pcdata = data->inner_tx_buffer->buf;
261 
262 	if (frame_size_bit > FSL_FEATURE_DSPI_MAX_DATA_WIDTH) {
263 		/* TODO need set to continues PCS to have frame size larger than 16 */
264 		LOG_ERR("frame size is larger than 16");
265 		return -EINVAL;
266 	}
267 
268 #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA
269 	/* only used when use inner buffer to translate tx format */
270 	if (CONFIG_MCUX_DSPI_BUFFER_SIZE * 4 <
271 	    get_size_byte_by_frame_size(ctx->current_tx->len, frame_size_bit)) {
272 		/* inner buffer can not hold all transferred data */
273 		LOG_ERR("inner buffer is too small to hold all data esp %d, act %d",
274 			ctx->current_tx->len * 8 / frame_size_bit,
275 			(CONFIG_MCUX_DSPI_BUFFER_SIZE * 4 / frame_size_bit));
276 		return -EINVAL;
277 	}
278 
279 	if (frame_size_bit == 8) {
280 		int i = 0;
281 		uint8_t *pdata = (uint8_t *)ctx->tx_buf;
282 
283 		if (pdata) {
284 			do {
285 				uint16_t temp_data = 0;
286 
287 				temp_data = *pdata;
288 				pdata++;
289 				*pcdata |= temp_data;
290 				pcdata++;
291 				i++;
292 			} while (i < ctx->current_tx->len &&
293 				 i < data->inner_tx_buffer->len);
294 		}
295 		/* indicate it is the last data */
296 		if (i == ctx->current_tx->len) {
297 			--pcdata;
298 			*pcdata |= SPI_PUSHR_EOQ(1) | SPI_PUSHR_CTCNT(1);
299 			LOG_DBG("last pcdata is %x", *pcdata);
300 		}
301 	} else if (frame_size_bit == 16) {
302 		int i = 0;
303 		uint16_t *pdata = (uint16_t *)ctx->tx_buf;
304 
305 		if (pdata) {
306 			do {
307 				*pcdata |= *pdata;
308 				LOG_DBG("pcdata %d is %x", i / 2, *pcdata);
309 				pdata++;
310 				pcdata++;
311 				i += 2;
312 			} while (i < ctx->current_tx->len &&
313 				 i < data->inner_tx_buffer->len);
314 		}
315 		if (i == ctx->current_tx->len) {
316 			/* indicate it is the last data */
317 			--pcdata;
318 			*pcdata |= SPI_PUSHR_EOQ(1);
319 			LOG_DBG("last pcdata is %x", *pcdata);
320 		}
321 	} else {
322 		/* TODO for other size */
323 		LOG_ERR("DMA mode only support 8/16 bits frame size");
324 		return -EINVAL;
325 	}
326 
327 #endif /* CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA */
328 
329 	return 0;
330 }
331 
update_tx_dma(const struct device * dev)332 static int update_tx_dma(const struct device *dev)
333 {
334 	uint32_t tx_size = 0;
335 	uint8_t *tx_buf;
336 	struct spi_mcux_data *data = dev->data;
337 	const struct spi_mcux_config *config = dev->config;
338 	SPI_Type *base = config->base;
339 	uint32_t frame_size = data->frame_size;
340 	bool rx_only = false;
341 
342 	DSPI_DisableDMA(base, (uint32_t)kDSPI_TxDmaEnable);
343 	if (data->ctx.tx_len == 0) {
344 		LOG_DBG("empty data no need to setup DMA");
345 		return 0;
346 	}
347 
348 	if (data->ctx.current_tx && data->ctx.current_tx->len > 0 &&
349 	    data->ctx.current_tx->buf != NULL) {
350 #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA
351 		tx_size = get_size_byte_by_frame_size(data->transfer_len,
352 						      frame_size);
353 		tx_buf = data->inner_tx_buffer->buf;
354 #else
355 		/* expect the buffer is pre-set */
356 		tx_size = get_size_byte_by_frame_size(data->ctx.current_tx->len,
357 						      frame_size);
358 		LOG_DBG("tx size is %d", tx_size);
359 		tx_buf = data->ctx.current_tx->buf;
360 #endif
361 	} else {
362 		tx_buf = data->inner_tx_buffer->buf;
363 		tx_size = get_size_byte_by_frame_size(data->transfer_len,
364 						      frame_size);
365 		rx_only = true;
366 		LOG_DBG("rx only 0x%x, size %d", (uint32_t)tx_buf, tx_size);
367 	}
368 
369 	data->tx_dma_block.source_address = (uint32_t)tx_buf;
370 	data->tx_dma_block.dest_address =
371 		DSPI_MasterGetTxRegisterAddress(base);
372 	data->tx_dma_block.next_block = NULL;
373 	if (config->is_dma_chn_shared) {
374 		/* transfer FIFO size data */
375 		data->tx_dma_block.block_size = 4;
376 	} else {
377 		data->tx_dma_block.block_size = tx_size;
378 	}
379 
380 	data->tx_dma_config.dma_cfg.user_data = (void *) dev;
381 	dma_config(data->tx_dma_config.dma_dev, data->tx_dma_config.dma_channel,
382 		   (struct dma_config *)&data->tx_dma_config.dma_cfg);
383 
384 	return 0;
385 }
386 
update_rx_dma(const struct device * dev)387 static int update_rx_dma(const struct device *dev)
388 {
389 	uint32_t rx_size = 0;
390 	uint8_t *rx_buf;
391 	struct spi_mcux_data *data = dev->data;
392 	const struct spi_mcux_config *config = dev->config;
393 	SPI_Type *base = config->base;
394 	uint32_t frame_size_byte = (data->frame_size >> 3);
395 	bool tx_only = false;
396 
397 	DSPI_DisableDMA(base, (uint32_t)kDSPI_RxDmaEnable);
398 	if (data->ctx.rx_len == 0) {
399 		LOG_DBG("empty data no need to setup DMA");
400 		return 0;
401 	}
402 
403 	if (data->ctx.current_rx) {
404 		rx_size = data->transfer_len;
405 		if (data->ctx.rx_buf != NULL) {
406 			rx_buf = data->ctx.rx_buf;
407 		} else {
408 			rx_buf = data->inner_rx_buffer->buf;
409 		}
410 	} else {
411 		/* tx only */
412 		rx_buf = data->inner_rx_buffer->buf;
413 		rx_size = data->transfer_len;
414 		tx_only = true;
415 		LOG_DBG("tx only 0x%x, size %d", (uint32_t)rx_buf, rx_size);
416 	}
417 
418 	if (config->is_dma_chn_shared) {
419 		if (data->ctx.rx_len == 1) {
420 			/* do not link tx on last frame*/
421 			LOG_DBG("do not link tx/rx channel for last one");
422 			data->rx_dma_config.dma_cfg.source_chaining_en = 0;
423 			data->rx_dma_config.dma_cfg.dest_chaining_en = 0;
424 		} else {
425 			LOG_DBG("shared mux mode, link tx/rx channel");
426 			data->rx_dma_config.dma_cfg.source_chaining_en = 1;
427 			data->rx_dma_config.dma_cfg.dest_chaining_en = 1;
428 			data->rx_dma_config.dma_cfg.linked_channel =
429 				data->tx_dma_config.dma_channel;
430 		}
431 
432 		data->rx_dma_block.dest_address = (uint32_t)rx_buf;
433 		data->rx_dma_block.source_address =
434 			DSPI_GetRxRegisterAddress(base);
435 		/* do once in share mode */
436 		data->rx_dma_block.block_size = frame_size_byte;
437 		data->rx_dma_config.dma_cfg.source_burst_length =
438 			frame_size_byte;
439 		data->rx_dma_config.dma_cfg.dest_burst_length = frame_size_byte;
440 		data->rx_dma_config.dma_cfg.source_data_size = frame_size_byte;
441 		data->rx_dma_config.dma_cfg.dest_data_size = frame_size_byte;
442 
443 	} else {
444 		data->rx_dma_block.dest_address = (uint32_t)rx_buf;
445 		data->rx_dma_block.source_address =
446 			DSPI_GetRxRegisterAddress(base);
447 		data->rx_dma_block.block_size = rx_size;
448 		data->rx_dma_config.dma_cfg.source_burst_length =
449 			frame_size_byte;
450 		data->rx_dma_config.dma_cfg.dest_burst_length = frame_size_byte;
451 		data->rx_dma_config.dma_cfg.source_data_size = frame_size_byte;
452 		data->rx_dma_config.dma_cfg.dest_data_size = frame_size_byte;
453 	}
454 
455 	data->rx_dma_config.dma_cfg.user_data = (void *) dev;
456 	dma_config(data->rx_dma_config.dma_dev, data->rx_dma_config.dma_channel,
457 		   (struct dma_config *)&data->rx_dma_config.dma_cfg);
458 
459 	return 0;
460 }
461 
configure_dma(const struct device * dev)462 static int configure_dma(const struct device *dev)
463 {
464 	const struct spi_mcux_config *config = dev->config;
465 
466 	if (config->is_dma_chn_shared) {
467 		LOG_DBG("shard DMA request");
468 	}
469 	update_tx_dma(dev);
470 	update_rx_dma(dev);
471 
472 	return 0;
473 }
474 
dma_callback(const struct device * dma_dev,void * callback_arg,uint32_t channel,int error_code)475 static void dma_callback(const struct device *dma_dev, void *callback_arg,
476 			 uint32_t channel, int error_code)
477 {
478 	const struct device *dev = (const struct device *)callback_arg;
479 	const struct spi_mcux_config *config = dev->config;
480 	SPI_Type *base = config->base;
481 	struct spi_mcux_data *data = dev->data;
482 
483 	LOG_DBG("=dma call back @channel %d=", channel);
484 
485 	if (error_code < 0) {
486 		LOG_ERR("error happened no callback process %d", error_code);
487 		return;
488 	}
489 
490 	if (channel == data->tx_dma_config.dma_channel) {
491 		LOG_DBG("ctx.tx_len is %d", data->ctx.tx_len);
492 		LOG_DBG("tx count %d", data->ctx.tx_count);
493 
494 		spi_context_update_tx(&data->ctx, 1, data->transfer_len);
495 		LOG_DBG("tx count %d", data->ctx.tx_count);
496 		LOG_DBG("tx buf/len %p/%zu", data->ctx.tx_buf,
497 			data->ctx.tx_len);
498 		data->tx_transfer_count++;
499 		/* tx done */
500 	} else {
501 		LOG_DBG("ctx.rx_len is %d", data->ctx.rx_len);
502 		LOG_DBG("rx count %d", data->ctx.rx_count);
503 		spi_context_update_rx(&data->ctx, 1, data->transfer_len);
504 		LOG_DBG("rx count %d", data->ctx.rx_count);
505 		/* setup the inner tx buffer */
506 		LOG_DBG("rx buf/len %p/%zu", data->ctx.rx_buf,
507 			data->ctx.rx_len);
508 		data->rx_transfer_count++;
509 	}
510 
511 	if (data->tx_transfer_count == data->rx_transfer_count) {
512 		LOG_DBG("start next packet");
513 		DSPI_StopTransfer(base);
514 		DSPI_FlushFifo(base, true, true);
515 		DSPI_ClearStatusFlags(base,
516 				      (uint32_t)kDSPI_AllStatusFlag);
517 		mcux_init_inner_buffer_with_cmd(dev, 0);
518 		mcux_spi_context_data_update(dev);
519 
520 		if (config->is_dma_chn_shared) {
521 			data->transfer_len = data->frame_size >> 3;
522 		} else {
523 			if (data->ctx.tx_len == 0) {
524 				data->transfer_len = data->ctx.rx_len;
525 			} else if (data->ctx.rx_len == 0) {
526 				data->transfer_len = data->ctx.tx_len;
527 			} else {
528 				data->transfer_len =
529 					data->ctx.tx_len > data->ctx.rx_len ?
530 						data->ctx.rx_len :
531 						data->ctx.tx_len;
532 			}
533 		}
534 		update_tx_dma(dev);
535 		update_rx_dma(dev);
536 		spi_mcux_transfer_next_packet(dev);
537 	} else if (data->ctx.rx_len == 0 && data->ctx.tx_len == 0) {
538 		LOG_DBG("end of transfer");
539 		DSPI_StopTransfer(base);
540 		DSPI_FlushFifo(base, true, true);
541 		DSPI_ClearStatusFlags(base,
542 				      (uint32_t)kDSPI_AllStatusFlag);
543 		data->transfer_len = 0;
544 		spi_mcux_transfer_next_packet(dev);
545 	}
546 	LOG_DBG("TX/RX DMA callback done");
547 }
548 
549 #else
550 
spi_mcux_master_transfer_callback(SPI_Type * base,dspi_master_handle_t * handle,status_t status,void * userData)551 static void spi_mcux_master_transfer_callback(SPI_Type *base,
552 	      dspi_master_handle_t *handle, status_t status, void *userData)
553 {
554 	struct spi_mcux_data *data = userData;
555 
556 	spi_context_update_tx(&data->ctx, 1, data->transfer_len);
557 	spi_context_update_rx(&data->ctx, 1, data->transfer_len);
558 
559 	spi_mcux_transfer_next_packet(data->dev);
560 }
561 
562 #endif /* CONFIG_DSPI_MCUX_EDMA */
563 
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)564 static int spi_mcux_configure(const struct device *dev,
565 			      const struct spi_config *spi_cfg)
566 {
567 	const struct spi_mcux_config *config = dev->config;
568 	struct spi_mcux_data *data = dev->data;
569 	SPI_Type *base = config->base;
570 	dspi_master_config_t master_config;
571 	uint32_t clock_freq;
572 	uint32_t word_size;
573 
574 	dspi_master_ctar_config_t *ctar_config = &master_config.ctarConfig;
575 
576 	if (spi_context_configured(&data->ctx, spi_cfg)) {
577 		/* This configuration is already in use */
578 		return 0;
579 	}
580 
581 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
582 		LOG_ERR("Half-duplex not supported");
583 		return -ENOTSUP;
584 	}
585 
586 	DSPI_MasterGetDefaultConfig(&master_config);
587 
588 	master_config.whichPcs = 1U << spi_cfg->slave;
589 	master_config.whichCtar = config->which_ctar;
590 	master_config.pcsActiveHighOrLow =
591 		(spi_cfg->operation & SPI_CS_ACTIVE_HIGH) ?
592 			kDSPI_PcsActiveHigh :
593 			kDSPI_PcsActiveLow;
594 	master_config.samplePoint = config->samplePoint;
595 	master_config.enableContinuousSCK = config->enable_continuous_sck;
596 	master_config.enableRxFifoOverWrite = config->enable_rxfifo_overwrite;
597 	master_config.enableModifiedTimingFormat =
598 		config->enable_modified_timing_format;
599 
600 	if (spi_cfg->slave > FSL_FEATURE_DSPI_CHIP_SELECT_COUNT) {
601 		LOG_ERR("Slave %d is greater than %d",
602 			    spi_cfg->slave, FSL_FEATURE_DSPI_CHIP_SELECT_COUNT);
603 		return -EINVAL;
604 	}
605 
606 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
607 	if (word_size > FSL_FEATURE_DSPI_MAX_DATA_WIDTH) {
608 		LOG_ERR("Word size %d is greater than %d",
609 			    word_size, FSL_FEATURE_DSPI_MAX_DATA_WIDTH);
610 		return -EINVAL;
611 	}
612 
613 	ctar_config->bitsPerFrame = word_size;
614 
615 	ctar_config->cpol =
616 		(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
617 		? kDSPI_ClockPolarityActiveLow
618 		: kDSPI_ClockPolarityActiveHigh;
619 
620 	ctar_config->cpha =
621 		(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
622 		? kDSPI_ClockPhaseSecondEdge
623 		: kDSPI_ClockPhaseFirstEdge;
624 
625 	ctar_config->direction =
626 		(spi_cfg->operation & SPI_TRANSFER_LSB)
627 		? kDSPI_LsbFirst
628 		: kDSPI_MsbFirst;
629 
630 	ctar_config->baudRate = spi_cfg->frequency;
631 
632 	ctar_config->pcsToSckDelayInNanoSec = config->pcs_sck_delay;
633 	ctar_config->lastSckToPcsDelayInNanoSec = config->sck_pcs_delay;
634 	ctar_config->betweenTransferDelayInNanoSec = config->transfer_delay;
635 
636 	if (!device_is_ready(config->clock_dev)) {
637 		LOG_ERR("clock control device not ready");
638 		return -ENODEV;
639 	}
640 
641 	if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
642 				   &clock_freq)) {
643 		return -EINVAL;
644 	}
645 
646 	LOG_DBG("clock_freq is %d", clock_freq);
647 
648 	DSPI_MasterInit(base, &master_config, clock_freq);
649 
650 #ifdef CONFIG_DSPI_MCUX_EDMA
651 	DSPI_StopTransfer(base);
652 	DSPI_FlushFifo(base, true, true);
653 	DSPI_ClearStatusFlags(base, (uint32_t)kDSPI_AllStatusFlag);
654 	/* record frame_size setting for DMA */
655 	data->frame_size = word_size;
656 	/* keep the pcs settings */
657 	data->which_pcs = 1U << spi_cfg->slave;
658 #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA
659 	mcux_init_inner_buffer_with_cmd(dev, 0);
660 #endif
661 #else
662 	DSPI_MasterTransferCreateHandle(base, &data->handle,
663 					spi_mcux_master_transfer_callback,
664 					data);
665 
666 	DSPI_SetDummyData(base, 0);
667 #endif
668 
669 	data->ctx.config = spi_cfg;
670 
671 	return 0;
672 }
673 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)674 static int transceive(const struct device *dev,
675 		      const struct spi_config *spi_cfg,
676 		      const struct spi_buf_set *tx_bufs,
677 		      const struct spi_buf_set *rx_bufs,
678 		      bool asynchronous,
679 		      spi_callback_t cb,
680 		      void *userdata)
681 {
682 	struct spi_mcux_data *data = dev->data;
683 	int ret;
684 #ifdef CONFIG_DSPI_MCUX_EDMA
685 	const struct spi_mcux_config *config = dev->config;
686 	SPI_Type *base = config->base;
687 #endif
688 
689 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
690 
691 	ret = spi_mcux_configure(dev, spi_cfg);
692 	if (ret) {
693 		goto out;
694 	}
695 
696 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
697 
698 	spi_context_cs_control(&data->ctx, true);
699 
700 #ifdef CONFIG_DSPI_MCUX_EDMA
701 	DSPI_StopTransfer(base);
702 	DSPI_FlushFifo(base, true, true);
703 	DSPI_ClearStatusFlags(base, (uint32_t)kDSPI_AllStatusFlag);
704 	/* setup the tx buffer with end  */
705 	mcux_init_inner_buffer_with_cmd(dev, 0);
706 	mcux_spi_context_data_update(dev);
707 	if (config->is_dma_chn_shared) {
708 		data->transfer_len = data->frame_size >> 3;
709 	} else {
710 		data->transfer_len = data->ctx.tx_len > data->ctx.rx_len ?
711 					     data->ctx.rx_len :
712 					     data->ctx.tx_len;
713 	}
714 	data->tx_transfer_count = 0;
715 	data->rx_transfer_count = 0;
716 	configure_dma(dev);
717 #endif
718 
719 	ret = spi_mcux_transfer_next_packet(dev);
720 	if (ret) {
721 		goto out;
722 	}
723 
724 	ret = spi_context_wait_for_completion(&data->ctx);
725 out:
726 	spi_context_release(&data->ctx, ret);
727 
728 	return ret;
729 }
730 
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)731 static int spi_mcux_transceive(const struct device *dev,
732 			       const struct spi_config *spi_cfg,
733 			       const struct spi_buf_set *tx_bufs,
734 			       const struct spi_buf_set *rx_bufs)
735 {
736 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
737 }
738 
739 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)740 static int spi_mcux_transceive_async(const struct device *dev,
741 				     const struct spi_config *spi_cfg,
742 				     const struct spi_buf_set *tx_bufs,
743 				     const struct spi_buf_set *rx_bufs,
744 				     spi_callback_t cb,
745 				     void *userdata)
746 {
747 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
748 }
749 #endif /* CONFIG_SPI_ASYNC */
750 
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)751 static int spi_mcux_release(const struct device *dev,
752 			    const struct spi_config *spi_cfg)
753 {
754 	struct spi_mcux_data *data = dev->data;
755 
756 	spi_context_unlock_unconditionally(&data->ctx);
757 
758 	return 0;
759 }
760 
spi_mcux_init(const struct device * dev)761 static int spi_mcux_init(const struct device *dev)
762 {
763 	int err;
764 	struct spi_mcux_data *data = dev->data;
765 	const struct spi_mcux_config *config = dev->config;
766 
767 #ifdef CONFIG_DSPI_MCUX_EDMA
768 	enum dma_channel_filter spi_filter = DMA_CHANNEL_NORMAL;
769 	const struct device *dma_dev;
770 
771 	dma_dev = data->rx_dma_config.dma_dev;
772 	data->rx_dma_config.dma_channel =
773 	  dma_request_channel(dma_dev, (void *)&spi_filter);
774 	dma_dev = data->tx_dma_config.dma_dev;
775 	data->tx_dma_config.dma_channel =
776 	  dma_request_channel(dma_dev, (void *)&spi_filter);
777 #else
778 	config->irq_config_func(dev);
779 #endif
780 	err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
781 	if (err != 0) {
782 		return err;
783 	}
784 
785 	data->dev = dev;
786 
787 	err = spi_context_cs_configure_all(&data->ctx);
788 	if (err < 0) {
789 		return err;
790 	}
791 
792 	spi_context_unlock_unconditionally(&data->ctx);
793 
794 	return 0;
795 }
796 
797 static const struct spi_driver_api spi_mcux_driver_api = {
798 	.transceive = spi_mcux_transceive,
799 #ifdef CONFIG_SPI_ASYNC
800 	.transceive_async = spi_mcux_transceive_async,
801 #endif
802 	.release = spi_mcux_release,
803 };
804 
805 
806 /* if a then b otherwise return 1 */
807 #define _UTIL_AND2(a, b) COND_CODE_1(UTIL_BOOL(a), (b), (1))
808 
809 #ifdef CONFIG_DSPI_MCUX_EDMA
810 
811 #define TX_BUFFER(id)							\
812 	static uint32_t							\
813 		edma_tx_buffer_##id[CONFIG_MCUX_DSPI_BUFFER_SIZE >> 2];	\
814 	static struct spi_buf spi_edma_tx_buffer_##id = {		\
815 		.buf = edma_tx_buffer_##id,				\
816 		.len = CONFIG_MCUX_DSPI_BUFFER_SIZE,			\
817 	}
818 
819 #define RX_BUFFER(id)							\
820 	static uint32_t							\
821 		edma_rx_buffer_##id[CONFIG_MCUX_DSPI_BUFFER_SIZE >> 2];	\
822 	static struct spi_buf spi_edma_rx_buffer_##id = {		\
823 		.buf = edma_rx_buffer_##id,				\
824 		.len = CONFIG_MCUX_DSPI_BUFFER_SIZE,			\
825 	}
826 
827 #define TX_DMA_CONFIG(id)						\
828 	.inner_tx_buffer = &spi_edma_tx_buffer_##id,			\
829 	.tx_dma_config = {						\
830 		.dma_dev =						\
831 		    DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)),	\
832 		.dma_cfg = {						\
833 			.source_burst_length = 4,			\
834 			.dest_burst_length = 4,				\
835 			.source_data_size = 4,				\
836 			.dest_data_size = 4,				\
837 			.dma_callback = dma_callback,			\
838 			.complete_callback_en = 1,			\
839 			.error_callback_dis = 0,			\
840 			.block_count = 1,				\
841 			.head_block = &spi_mcux_data_##id.tx_dma_block,	\
842 			.channel_direction = MEMORY_TO_PERIPHERAL,	\
843 			.dma_slot = DT_INST_DMAS_CELL_BY_NAME(		\
844 				id, tx, source),			\
845 		},							\
846 	},
847 
848 #define RX_DMA_CONFIG(id)						\
849 	.inner_rx_buffer = &spi_edma_rx_buffer_##id,			\
850 	.rx_dma_config = {						\
851 		.dma_dev =						\
852 		    DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)),	\
853 		.dma_cfg = {						\
854 			.source_burst_length = 2,			\
855 			.dest_burst_length = 2,				\
856 			.source_data_size = 2,				\
857 			.dest_data_size = 2,				\
858 			.dma_callback = dma_callback,			\
859 			.complete_callback_en = 1,			\
860 			.error_callback_dis = 0,			\
861 			.block_count =					\
862 			_UTIL_AND2(DT_INST_NODE_HAS_PROP(		\
863 				id, nxp_rx_tx_chn_share), 2),		\
864 			.head_block = &spi_mcux_data_##id.rx_dma_block,	\
865 			.channel_direction = PERIPHERAL_TO_MEMORY,	\
866 			.dma_slot = DT_INST_DMAS_CELL_BY_NAME(		\
867 				id, rx, source),			\
868 		},							\
869 	},
870 #else
871 #define TX_BUFFER(id)
872 #define RX_BUFFER(id)
873 #define TX_DMA_CONFIG(id)
874 #define RX_DMA_CONFIG(id)
875 
876 #endif
877 
878 #define SPI_MCUX_DSPI_DEVICE(id)					\
879 	PINCTRL_DT_INST_DEFINE(id);					\
880 	static void spi_mcux_config_func_##id(const struct device *dev);\
881 	TX_BUFFER(id);							\
882 	RX_BUFFER(id);							\
883 	static struct spi_mcux_data spi_mcux_data_##id = {		\
884 		SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##id, ctx),		\
885 		SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##id, ctx),		\
886 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx)	\
887 		TX_DMA_CONFIG(id) RX_DMA_CONFIG(id)			\
888 	};								\
889 	static const struct spi_mcux_config spi_mcux_config_##id = {	\
890 		.base = (SPI_Type *)DT_INST_REG_ADDR(id),		\
891 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)),	\
892 		.clock_subsys = 					\
893 		(clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name),	\
894 		.irq_config_func = spi_mcux_config_func_##id,		\
895 		.pcs_sck_delay =					\
896 		    DT_INST_PROP_OR(id, pcs_sck_delay, 0),		\
897 		.sck_pcs_delay =					\
898 		    DT_INST_PROP_OR(id, sck_pcs_delay, 0),		\
899 		.transfer_delay =					\
900 		    DT_INST_PROP_OR(id, transfer_delay, 0),		\
901 		.which_ctar =						\
902 		    DT_INST_PROP_OR(id, ctar, 0),			\
903 		.samplePoint =						\
904 		    DT_INST_PROP_OR(id, sample_point, 0),		\
905 		.enable_continuous_sck =					\
906 		    DT_INST_PROP(id, continuous_sck),			\
907 		.enable_rxfifo_overwrite =				\
908 		    DT_INST_PROP(id, rx_fifo_overwrite),		\
909 		.enable_modified_timing_format =				\
910 		    DT_INST_PROP(id, modified_timing_format),		\
911 		.is_dma_chn_shared =					\
912 		    DT_INST_PROP(id, nxp_rx_tx_chn_share),		\
913 		.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id),		\
914 	};								\
915 	DEVICE_DT_INST_DEFINE(id,					\
916 			    &spi_mcux_init,				\
917 			    NULL,					\
918 			    &spi_mcux_data_##id,			\
919 			    &spi_mcux_config_##id,			\
920 			    POST_KERNEL,				\
921 			    CONFIG_SPI_INIT_PRIORITY,		\
922 			    &spi_mcux_driver_api);			\
923 	static void spi_mcux_config_func_##id(const struct device *dev)	\
924 	{								\
925 		IRQ_CONNECT(DT_INST_IRQN(id),				\
926 			    DT_INST_IRQ(id, priority),			\
927 			    spi_mcux_isr, DEVICE_DT_INST_GET(id),	\
928 			    0);						\
929 		irq_enable(DT_INST_IRQN(id));				\
930 	}
931 
932 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_DSPI_DEVICE)
933