1 /*
2  * Copyright (c) 2016, Freescale Semiconductor, Inc.
3  * Copyright (c) 2017, 2020-2021, NXP
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT	nxp_dspi
9 
10 #include <errno.h>
11 #include <zephyr/drivers/spi.h>
12 #include <zephyr/drivers/spi/rtio.h>
13 #include <zephyr/drivers/clock_control.h>
14 #include <fsl_dspi.h>
15 #include <zephyr/drivers/pinctrl.h>
16 #ifdef CONFIG_DSPI_MCUX_EDMA
17 #include <zephyr/drivers/dma.h>
18 #include <fsl_edma.h>
19 #endif
20 
21 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
22 #include <zephyr/logging/log.h>
23 #include <zephyr/irq.h>
24 LOG_MODULE_REGISTER(spi_mcux_dspi);
25 
26 #include "spi_context.h"
27 
28 #ifdef CONFIG_DSPI_MCUX_EDMA
29 
30 struct spi_edma_config {
31 	const struct device *dma_dev;
32 	int32_t state;
33 	uint32_t dma_channel;
34 	void (*irq_call_back)(void);
35 	struct dma_config dma_cfg;
36 };
37 #endif
38 
39 struct spi_mcux_config {
40 	SPI_Type *base;
41 	const struct device *clock_dev;
42 	clock_control_subsys_t clock_subsys;
43 	void (*irq_config_func)(const struct device *dev);
44 	uint32_t pcs_sck_delay;
45 	uint32_t sck_pcs_delay;
46 	uint32_t transfer_delay;
47 	uint32_t which_ctar;
48 	uint32_t samplePoint;
49 	bool enable_continuous_sck;
50 	bool enable_rxfifo_overwrite;
51 	bool enable_modified_timing_format;
52 	bool is_dma_chn_shared;
53 	const struct pinctrl_dev_config *pincfg;
54 };
55 
56 struct spi_mcux_data {
57 	const struct device *dev;
58 	dspi_master_handle_t handle;
59 	struct spi_context ctx;
60 	size_t transfer_len;
61 #ifdef CONFIG_DSPI_MCUX_EDMA
62 	struct dma_block_config tx_dma_block;
63 	struct dma_block_config tx_dma_block_end;
64 	struct dma_block_config rx_dma_block;
65 	struct spi_edma_config rx_dma_config;
66 	struct spi_edma_config tx_dma_config;
67 	int frame_size;
68 	int tx_transfer_count;
69 	int rx_transfer_count;
70 	uint32_t which_pcs;
71 	struct spi_buf *inner_tx_buffer;
72 	struct spi_buf *inner_rx_buffer;
73 #endif
74 };
75 
76 #ifdef CONFIG_DSPI_MCUX_EDMA
get_size_byte_by_frame_size(int len,int frame_size)77 static int get_size_byte_by_frame_size(int len, int frame_size)
78 {
79 	if (frame_size == 8) {
80 		return (len * 4);
81 	} else { /* frame_size == 16*/
82 		return (len * 2);
83 	}
84 }
85 #endif
86 
spi_mcux_transfer_next_packet(const struct device * dev)87 static int spi_mcux_transfer_next_packet(const struct device *dev)
88 {
89 	const struct spi_mcux_config *config = dev->config;
90 	struct spi_mcux_data *data = dev->data;
91 	SPI_Type *base = config->base;
92 	struct spi_context *ctx = &data->ctx;
93 	dspi_transfer_t transfer;
94 	status_t status;
95 
96 	if ((ctx->tx_len == 0) && (ctx->rx_len == 0)) {
97 		/* nothing left to rx or tx, we're done! */
98 		LOG_DBG("spi transceive done");
99 		spi_context_cs_control(&data->ctx, false);
100 		spi_context_complete(&data->ctx, dev, 0);
101 		return 0;
102 	}
103 
104 #ifdef CONFIG_DSPI_MCUX_EDMA
105 
106 	if (!config->is_dma_chn_shared) {
107 		/* start dma directly in not shared mode */
108 		if (ctx->tx_len != 0) {
109 			int ret = 0;
110 
111 			LOG_DBG("Starting DMA Ch%u",
112 				data->tx_dma_config.dma_channel);
113 			ret = dma_start(data->tx_dma_config.dma_dev,
114 					data->tx_dma_config.dma_channel);
115 			if (ret < 0) {
116 				LOG_ERR("Failed to start DMA Ch%d (%d)",
117 					data->tx_dma_config.dma_channel, ret);
118 				return ret;
119 			}
120 		}
121 
122 		if (ctx->rx_len != 0) {
123 			int ret = 0;
124 
125 			LOG_DBG("Starting DMA Ch%u",
126 				data->rx_dma_config.dma_channel);
127 			ret = dma_start(data->rx_dma_config.dma_dev,
128 					data->rx_dma_config.dma_channel);
129 			if (ret < 0) {
130 				LOG_ERR("Failed to start DMA Ch%d (%d)",
131 					data->rx_dma_config.dma_channel, ret);
132 				return ret;
133 			}
134 		}
135 	}
136 
137 	DSPI_EnableDMA(base, (uint32_t)kDSPI_RxDmaEnable |
138 					      (uint32_t)kDSPI_TxDmaEnable);
139 	DSPI_StartTransfer(base);
140 
141 	if (config->is_dma_chn_shared) {
142 		/* in master mode start tx */
143 		dma_start(data->tx_dma_config.dma_dev, data->tx_dma_config.dma_channel);
144 		/* TBD kDSPI_TxFifoFillRequestFlag */
145 		DSPI_EnableInterrupts(base,
146 				      (uint32_t)kDSPI_RxFifoDrainRequestFlag);
147 		LOG_DBG("trigger tx to start master");
148 	}
149 
150 	return 0;
151 #endif
152 
153 	transfer.configFlags = kDSPI_MasterCtar0 | kDSPI_MasterPcsContinuous |
154 			       (ctx->config->slave << DSPI_MASTER_PCS_SHIFT);
155 
156 	if (ctx->tx_len == 0) {
157 		/* rx only, nothing to tx */
158 		transfer.txData = NULL;
159 		transfer.rxData = ctx->rx_buf;
160 		transfer.dataSize = ctx->rx_len;
161 	} else if (ctx->rx_len == 0) {
162 		/* tx only, nothing to rx */
163 		transfer.txData = (uint8_t *) ctx->tx_buf;
164 		transfer.rxData = NULL;
165 		transfer.dataSize = ctx->tx_len;
166 	} else if (ctx->tx_len == ctx->rx_len) {
167 		/* rx and tx are the same length */
168 		transfer.txData = (uint8_t *) ctx->tx_buf;
169 		transfer.rxData = ctx->rx_buf;
170 		transfer.dataSize = ctx->tx_len;
171 	} else if (ctx->tx_len > ctx->rx_len) {
172 		/* Break up the tx into multiple transfers so we don't have to
173 		 * rx into a longer intermediate buffer. Leave chip select
174 		 * active between transfers.
175 		 */
176 		transfer.txData = (uint8_t *) ctx->tx_buf;
177 		transfer.rxData = ctx->rx_buf;
178 		transfer.dataSize = ctx->rx_len;
179 		transfer.configFlags |= kDSPI_MasterActiveAfterTransfer;
180 	} else {
181 		/* Break up the rx into multiple transfers so we don't have to
182 		 * tx from a longer intermediate buffer. Leave chip select
183 		 * active between transfers.
184 		 */
185 		transfer.txData = (uint8_t *) ctx->tx_buf;
186 		transfer.rxData = ctx->rx_buf;
187 		transfer.dataSize = ctx->tx_len;
188 		transfer.configFlags |= kDSPI_MasterActiveAfterTransfer;
189 	}
190 
191 	if (!(ctx->tx_count <= 1 && ctx->rx_count <= 1)) {
192 		transfer.configFlags |= kDSPI_MasterActiveAfterTransfer;
193 	}
194 
195 	data->transfer_len = transfer.dataSize;
196 
197 	status = DSPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
198 	if (status != kStatus_Success) {
199 		LOG_ERR("Transfer could not start on %s: %d", dev->name, status);
200 		return status == kDSPI_Busy ? -EBUSY : -EINVAL;
201 	}
202 
203 	return 0;
204 }
205 
spi_mcux_isr(const struct device * dev)206 static void spi_mcux_isr(const struct device *dev)
207 {
208 	const struct spi_mcux_config *config = dev->config;
209 	struct spi_mcux_data *data = dev->data;
210 	SPI_Type *base = config->base;
211 
212 #ifdef CONFIG_DSPI_MCUX_EDMA
213 	LOG_DBG("isr is called");
214 
215 	if (0U != (DSPI_GetStatusFlags(base) &
216 		   (uint32_t)kDSPI_RxFifoDrainRequestFlag)) {
217 		/* start rx */
218 		dma_start(data->rx_dma_config.dma_dev, data->rx_dma_config.dma_channel);
219 	}
220 #else
221 	DSPI_MasterTransferHandleIRQ(base, &data->handle);
222 #endif
223 }
224 
225 #ifdef CONFIG_DSPI_MCUX_EDMA
226 
mcux_init_inner_buffer_with_cmd(const struct device * dev,uint16_t dummy)227 static void mcux_init_inner_buffer_with_cmd(const struct device *dev,
228 					    uint16_t dummy)
229 {
230 	const struct spi_mcux_config *config = dev->config;
231 	struct spi_mcux_data *data = dev->data;
232 	dspi_command_data_config_t commandStruct;
233 	uint32_t *pbuf = data->inner_tx_buffer->buf;
234 	uint32_t command;
235 	int i = 0;
236 
237 	commandStruct.whichPcs = data->which_pcs;
238 
239 	commandStruct.isEndOfQueue = false;
240 	commandStruct.clearTransferCount = false;
241 	commandStruct.whichCtar = config->which_ctar;
242 	commandStruct.isPcsContinuous = config->enable_continuous_sck;
243 	command = DSPI_MasterGetFormattedCommand(&(commandStruct));
244 	for (i = 0; i < data->inner_tx_buffer->len / 4; i++) {
245 		*pbuf = command | dummy;
246 		pbuf++;
247 	}
248 }
249 
250 /**
251  * @brief update the tx data to internal buffer with command embedded,
252  * if no tx data, use dummy value.
253  * tx data frame size shall not bigger than 16 bits
254  * the overall transfer data in one batch shall not larger than FIFO size
255  */
mcux_spi_context_data_update(const struct device * dev)256 static int mcux_spi_context_data_update(const struct device *dev)
257 {
258 	struct spi_mcux_data *data = dev->data;
259 	uint32_t frame_size_bit = data->frame_size;
260 	struct spi_context *ctx = (struct spi_context *)&data->ctx;
261 	uint32_t *pcdata = data->inner_tx_buffer->buf;
262 
263 	if (frame_size_bit > FSL_FEATURE_DSPI_MAX_DATA_WIDTH) {
264 		/* TODO need set to continues PCS to have frame size larger than 16 */
265 		LOG_ERR("frame size is larger than 16");
266 		return -EINVAL;
267 	}
268 
269 #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA
270 	/* only used when use inner buffer to translate tx format */
271 	if (CONFIG_MCUX_DSPI_BUFFER_SIZE * 4 <
272 	    get_size_byte_by_frame_size(ctx->current_tx->len, frame_size_bit)) {
273 		/* inner buffer can not hold all transferred data */
274 		LOG_ERR("inner buffer is too small to hold all data esp %d, act %d",
275 			ctx->current_tx->len * 8 / frame_size_bit,
276 			(CONFIG_MCUX_DSPI_BUFFER_SIZE * 4 / frame_size_bit));
277 		return -EINVAL;
278 	}
279 
280 	if (frame_size_bit == 8) {
281 		int i = 0;
282 		uint8_t *pdata = (uint8_t *)ctx->tx_buf;
283 
284 		if (pdata) {
285 			do {
286 				uint16_t temp_data = 0;
287 
288 				temp_data = *pdata;
289 				pdata++;
290 				*pcdata |= temp_data;
291 				pcdata++;
292 				i++;
293 			} while (i < ctx->current_tx->len &&
294 				 i < data->inner_tx_buffer->len);
295 		}
296 		/* indicate it is the last data */
297 		if (i == ctx->current_tx->len) {
298 			--pcdata;
299 			*pcdata |= SPI_PUSHR_EOQ(1) | SPI_PUSHR_CTCNT(1);
300 			LOG_DBG("last pcdata is %x", *pcdata);
301 		}
302 	} else if (frame_size_bit == 16) {
303 		int i = 0;
304 		uint16_t *pdata = (uint16_t *)ctx->tx_buf;
305 
306 		if (pdata) {
307 			do {
308 				*pcdata |= *pdata;
309 				LOG_DBG("pcdata %d is %x", i / 2, *pcdata);
310 				pdata++;
311 				pcdata++;
312 				i += 2;
313 			} while (i < ctx->current_tx->len &&
314 				 i < data->inner_tx_buffer->len);
315 		}
316 		if (i == ctx->current_tx->len) {
317 			/* indicate it is the last data */
318 			--pcdata;
319 			*pcdata |= SPI_PUSHR_EOQ(1);
320 			LOG_DBG("last pcdata is %x", *pcdata);
321 		}
322 	} else {
323 		/* TODO for other size */
324 		LOG_ERR("DMA mode only support 8/16 bits frame size");
325 		return -EINVAL;
326 	}
327 
328 #endif /* CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA */
329 
330 	return 0;
331 }
332 
update_tx_dma(const struct device * dev)333 static int update_tx_dma(const struct device *dev)
334 {
335 	uint32_t tx_size = 0;
336 	uint8_t *tx_buf;
337 	struct spi_mcux_data *data = dev->data;
338 	const struct spi_mcux_config *config = dev->config;
339 	SPI_Type *base = config->base;
340 	uint32_t frame_size = data->frame_size;
341 	bool rx_only = false;
342 
343 	DSPI_DisableDMA(base, (uint32_t)kDSPI_TxDmaEnable);
344 	if (data->ctx.tx_len == 0) {
345 		LOG_DBG("empty data no need to setup DMA");
346 		return 0;
347 	}
348 
349 	if (data->ctx.current_tx && data->ctx.current_tx->len > 0 &&
350 	    data->ctx.current_tx->buf != NULL) {
351 #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA
352 		tx_size = get_size_byte_by_frame_size(data->transfer_len,
353 						      frame_size);
354 		tx_buf = data->inner_tx_buffer->buf;
355 #else
356 		/* expect the buffer is pre-set */
357 		tx_size = get_size_byte_by_frame_size(data->ctx.current_tx->len,
358 						      frame_size);
359 		LOG_DBG("tx size is %d", tx_size);
360 		tx_buf = data->ctx.current_tx->buf;
361 #endif
362 	} else {
363 		tx_buf = data->inner_tx_buffer->buf;
364 		tx_size = get_size_byte_by_frame_size(data->transfer_len,
365 						      frame_size);
366 		rx_only = true;
367 		LOG_DBG("rx only 0x%x, size %d", (uint32_t)tx_buf, tx_size);
368 	}
369 
370 	data->tx_dma_block.source_address = (uint32_t)tx_buf;
371 	data->tx_dma_block.dest_address =
372 		DSPI_MasterGetTxRegisterAddress(base);
373 	data->tx_dma_block.next_block = NULL;
374 	if (config->is_dma_chn_shared) {
375 		/* transfer FIFO size data */
376 		data->tx_dma_block.block_size = 4;
377 	} else {
378 		data->tx_dma_block.block_size = tx_size;
379 	}
380 
381 	data->tx_dma_config.dma_cfg.user_data = (void *) dev;
382 	dma_config(data->tx_dma_config.dma_dev, data->tx_dma_config.dma_channel,
383 		   (struct dma_config *)&data->tx_dma_config.dma_cfg);
384 
385 	return 0;
386 }
387 
update_rx_dma(const struct device * dev)388 static int update_rx_dma(const struct device *dev)
389 {
390 	uint32_t rx_size = 0;
391 	uint8_t *rx_buf;
392 	struct spi_mcux_data *data = dev->data;
393 	const struct spi_mcux_config *config = dev->config;
394 	SPI_Type *base = config->base;
395 	uint32_t frame_size_byte = (data->frame_size >> 3);
396 	bool tx_only = false;
397 
398 	DSPI_DisableDMA(base, (uint32_t)kDSPI_RxDmaEnable);
399 	if (data->ctx.rx_len == 0) {
400 		LOG_DBG("empty data no need to setup DMA");
401 		return 0;
402 	}
403 
404 	if (data->ctx.current_rx) {
405 		rx_size = data->transfer_len;
406 		if (data->ctx.rx_buf != NULL) {
407 			rx_buf = data->ctx.rx_buf;
408 		} else {
409 			rx_buf = data->inner_rx_buffer->buf;
410 		}
411 	} else {
412 		/* tx only */
413 		rx_buf = data->inner_rx_buffer->buf;
414 		rx_size = data->transfer_len;
415 		tx_only = true;
416 		LOG_DBG("tx only 0x%x, size %d", (uint32_t)rx_buf, rx_size);
417 	}
418 
419 	if (config->is_dma_chn_shared) {
420 		if (data->ctx.rx_len == 1) {
421 			/* do not link tx on last frame*/
422 			LOG_DBG("do not link tx/rx channel for last one");
423 			data->rx_dma_config.dma_cfg.source_chaining_en = 0;
424 			data->rx_dma_config.dma_cfg.dest_chaining_en = 0;
425 		} else {
426 			LOG_DBG("shared mux mode, link tx/rx channel");
427 			data->rx_dma_config.dma_cfg.source_chaining_en = 1;
428 			data->rx_dma_config.dma_cfg.dest_chaining_en = 1;
429 			data->rx_dma_config.dma_cfg.linked_channel =
430 				data->tx_dma_config.dma_channel;
431 		}
432 
433 		data->rx_dma_block.dest_address = (uint32_t)rx_buf;
434 		data->rx_dma_block.source_address =
435 			DSPI_GetRxRegisterAddress(base);
436 		/* do once in share mode */
437 		data->rx_dma_block.block_size = frame_size_byte;
438 		data->rx_dma_config.dma_cfg.source_burst_length =
439 			frame_size_byte;
440 		data->rx_dma_config.dma_cfg.dest_burst_length = frame_size_byte;
441 		data->rx_dma_config.dma_cfg.source_data_size = frame_size_byte;
442 		data->rx_dma_config.dma_cfg.dest_data_size = frame_size_byte;
443 
444 	} else {
445 		data->rx_dma_block.dest_address = (uint32_t)rx_buf;
446 		data->rx_dma_block.source_address =
447 			DSPI_GetRxRegisterAddress(base);
448 		data->rx_dma_block.block_size = rx_size;
449 		data->rx_dma_config.dma_cfg.source_burst_length =
450 			frame_size_byte;
451 		data->rx_dma_config.dma_cfg.dest_burst_length = frame_size_byte;
452 		data->rx_dma_config.dma_cfg.source_data_size = frame_size_byte;
453 		data->rx_dma_config.dma_cfg.dest_data_size = frame_size_byte;
454 	}
455 
456 	data->rx_dma_config.dma_cfg.user_data = (void *) dev;
457 	dma_config(data->rx_dma_config.dma_dev, data->rx_dma_config.dma_channel,
458 		   (struct dma_config *)&data->rx_dma_config.dma_cfg);
459 
460 	return 0;
461 }
462 
configure_dma(const struct device * dev)463 static int configure_dma(const struct device *dev)
464 {
465 	const struct spi_mcux_config *config = dev->config;
466 
467 	if (config->is_dma_chn_shared) {
468 		LOG_DBG("shard DMA request");
469 	}
470 	update_tx_dma(dev);
471 	update_rx_dma(dev);
472 
473 	return 0;
474 }
475 
dma_callback(const struct device * dma_dev,void * callback_arg,uint32_t channel,int error_code)476 static void dma_callback(const struct device *dma_dev, void *callback_arg,
477 			 uint32_t channel, int error_code)
478 {
479 	const struct device *dev = (const struct device *)callback_arg;
480 	const struct spi_mcux_config *config = dev->config;
481 	SPI_Type *base = config->base;
482 	struct spi_mcux_data *data = dev->data;
483 
484 	LOG_DBG("=dma call back @channel %d=", channel);
485 
486 	if (error_code < 0) {
487 		LOG_ERR("error happened no callback process %d", error_code);
488 		return;
489 	}
490 
491 	if (channel == data->tx_dma_config.dma_channel) {
492 		LOG_DBG("ctx.tx_len is %d", data->ctx.tx_len);
493 		LOG_DBG("tx count %d", data->ctx.tx_count);
494 
495 		spi_context_update_tx(&data->ctx, 1, data->transfer_len);
496 		LOG_DBG("tx count %d", data->ctx.tx_count);
497 		LOG_DBG("tx buf/len %p/%zu", data->ctx.tx_buf,
498 			data->ctx.tx_len);
499 		data->tx_transfer_count++;
500 		/* tx done */
501 	} else {
502 		LOG_DBG("ctx.rx_len is %d", data->ctx.rx_len);
503 		LOG_DBG("rx count %d", data->ctx.rx_count);
504 		spi_context_update_rx(&data->ctx, 1, data->transfer_len);
505 		LOG_DBG("rx count %d", data->ctx.rx_count);
506 		/* setup the inner tx buffer */
507 		LOG_DBG("rx buf/len %p/%zu", data->ctx.rx_buf,
508 			data->ctx.rx_len);
509 		data->rx_transfer_count++;
510 	}
511 
512 	if (data->tx_transfer_count == data->rx_transfer_count) {
513 		LOG_DBG("start next packet");
514 		DSPI_StopTransfer(base);
515 		DSPI_FlushFifo(base, true, true);
516 		DSPI_ClearStatusFlags(base,
517 				      (uint32_t)kDSPI_AllStatusFlag);
518 		mcux_init_inner_buffer_with_cmd(dev, 0);
519 		mcux_spi_context_data_update(dev);
520 
521 		if (config->is_dma_chn_shared) {
522 			data->transfer_len = data->frame_size >> 3;
523 		} else {
524 			if (data->ctx.tx_len == 0) {
525 				data->transfer_len = data->ctx.rx_len;
526 			} else if (data->ctx.rx_len == 0) {
527 				data->transfer_len = data->ctx.tx_len;
528 			} else {
529 				data->transfer_len =
530 					data->ctx.tx_len > data->ctx.rx_len ?
531 						data->ctx.rx_len :
532 						data->ctx.tx_len;
533 			}
534 		}
535 		update_tx_dma(dev);
536 		update_rx_dma(dev);
537 		spi_mcux_transfer_next_packet(dev);
538 	} else if (data->ctx.rx_len == 0 && data->ctx.tx_len == 0) {
539 		LOG_DBG("end of transfer");
540 		DSPI_StopTransfer(base);
541 		DSPI_FlushFifo(base, true, true);
542 		DSPI_ClearStatusFlags(base,
543 				      (uint32_t)kDSPI_AllStatusFlag);
544 		data->transfer_len = 0;
545 		spi_mcux_transfer_next_packet(dev);
546 	}
547 	LOG_DBG("TX/RX DMA callback done");
548 }
549 
550 #else
551 
spi_mcux_master_transfer_callback(SPI_Type * base,dspi_master_handle_t * handle,status_t status,void * userData)552 static void spi_mcux_master_transfer_callback(SPI_Type *base,
553 	      dspi_master_handle_t *handle, status_t status, void *userData)
554 {
555 	struct spi_mcux_data *data = userData;
556 
557 	spi_context_update_tx(&data->ctx, 1, data->transfer_len);
558 	spi_context_update_rx(&data->ctx, 1, data->transfer_len);
559 
560 	spi_mcux_transfer_next_packet(data->dev);
561 }
562 
563 #endif /* CONFIG_DSPI_MCUX_EDMA */
564 
spi_mcux_configure(const struct device * dev,const struct spi_config * spi_cfg)565 static int spi_mcux_configure(const struct device *dev,
566 			      const struct spi_config *spi_cfg)
567 {
568 	const struct spi_mcux_config *config = dev->config;
569 	struct spi_mcux_data *data = dev->data;
570 	SPI_Type *base = config->base;
571 	dspi_master_config_t master_config;
572 	uint32_t clock_freq;
573 	uint32_t word_size;
574 
575 	dspi_master_ctar_config_t *ctar_config = &master_config.ctarConfig;
576 
577 	if (spi_context_configured(&data->ctx, spi_cfg)) {
578 		/* This configuration is already in use */
579 		return 0;
580 	}
581 
582 	if (spi_cfg->operation & SPI_HALF_DUPLEX) {
583 		LOG_ERR("Half-duplex not supported");
584 		return -ENOTSUP;
585 	}
586 
587 	DSPI_MasterGetDefaultConfig(&master_config);
588 
589 	master_config.whichPcs = 1U << spi_cfg->slave;
590 	master_config.whichCtar = config->which_ctar;
591 	master_config.pcsActiveHighOrLow =
592 		(spi_cfg->operation & SPI_CS_ACTIVE_HIGH) ?
593 			kDSPI_PcsActiveHigh :
594 			kDSPI_PcsActiveLow;
595 	master_config.samplePoint = config->samplePoint;
596 	master_config.enableContinuousSCK = config->enable_continuous_sck;
597 	master_config.enableRxFifoOverWrite = config->enable_rxfifo_overwrite;
598 	master_config.enableModifiedTimingFormat =
599 		config->enable_modified_timing_format;
600 
601 	if (spi_cfg->slave > FSL_FEATURE_DSPI_CHIP_SELECT_COUNT) {
602 		LOG_ERR("Slave %d is greater than %d",
603 			    spi_cfg->slave, FSL_FEATURE_DSPI_CHIP_SELECT_COUNT);
604 		return -EINVAL;
605 	}
606 
607 	word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
608 	if (word_size > FSL_FEATURE_DSPI_MAX_DATA_WIDTH) {
609 		LOG_ERR("Word size %d is greater than %d",
610 			    word_size, FSL_FEATURE_DSPI_MAX_DATA_WIDTH);
611 		return -EINVAL;
612 	}
613 
614 	ctar_config->bitsPerFrame = word_size;
615 
616 	ctar_config->cpol =
617 		(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
618 		? kDSPI_ClockPolarityActiveLow
619 		: kDSPI_ClockPolarityActiveHigh;
620 
621 	ctar_config->cpha =
622 		(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
623 		? kDSPI_ClockPhaseSecondEdge
624 		: kDSPI_ClockPhaseFirstEdge;
625 
626 	ctar_config->direction =
627 		(spi_cfg->operation & SPI_TRANSFER_LSB)
628 		? kDSPI_LsbFirst
629 		: kDSPI_MsbFirst;
630 
631 	ctar_config->baudRate = spi_cfg->frequency;
632 
633 	ctar_config->pcsToSckDelayInNanoSec = config->pcs_sck_delay;
634 	ctar_config->lastSckToPcsDelayInNanoSec = config->sck_pcs_delay;
635 	ctar_config->betweenTransferDelayInNanoSec = config->transfer_delay;
636 
637 	if (!device_is_ready(config->clock_dev)) {
638 		LOG_ERR("clock control device not ready");
639 		return -ENODEV;
640 	}
641 
642 	if (clock_control_get_rate(config->clock_dev, config->clock_subsys,
643 				   &clock_freq)) {
644 		return -EINVAL;
645 	}
646 
647 	LOG_DBG("clock_freq is %d", clock_freq);
648 
649 	DSPI_MasterInit(base, &master_config, clock_freq);
650 
651 #ifdef CONFIG_DSPI_MCUX_EDMA
652 	DSPI_StopTransfer(base);
653 	DSPI_FlushFifo(base, true, true);
654 	DSPI_ClearStatusFlags(base, (uint32_t)kDSPI_AllStatusFlag);
655 	/* record frame_size setting for DMA */
656 	data->frame_size = word_size;
657 	/* keep the pcs settings */
658 	data->which_pcs = 1U << spi_cfg->slave;
659 #ifdef CONFIG_MCUX_DSPI_EDMA_SHUFFLE_DATA
660 	mcux_init_inner_buffer_with_cmd(dev, 0);
661 #endif
662 #else
663 	DSPI_MasterTransferCreateHandle(base, &data->handle,
664 					spi_mcux_master_transfer_callback,
665 					data);
666 
667 	DSPI_SetDummyData(base, 0);
668 #endif
669 
670 	data->ctx.config = spi_cfg;
671 
672 	return 0;
673 }
674 
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)675 static int transceive(const struct device *dev,
676 		      const struct spi_config *spi_cfg,
677 		      const struct spi_buf_set *tx_bufs,
678 		      const struct spi_buf_set *rx_bufs,
679 		      bool asynchronous,
680 		      spi_callback_t cb,
681 		      void *userdata)
682 {
683 	struct spi_mcux_data *data = dev->data;
684 	int ret;
685 #ifdef CONFIG_DSPI_MCUX_EDMA
686 	const struct spi_mcux_config *config = dev->config;
687 	SPI_Type *base = config->base;
688 #endif
689 
690 	spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
691 
692 	ret = spi_mcux_configure(dev, spi_cfg);
693 	if (ret) {
694 		goto out;
695 	}
696 
697 	spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
698 
699 	spi_context_cs_control(&data->ctx, true);
700 
701 #ifdef CONFIG_DSPI_MCUX_EDMA
702 	DSPI_StopTransfer(base);
703 	DSPI_FlushFifo(base, true, true);
704 	DSPI_ClearStatusFlags(base, (uint32_t)kDSPI_AllStatusFlag);
705 	/* setup the tx buffer with end  */
706 	mcux_init_inner_buffer_with_cmd(dev, 0);
707 	mcux_spi_context_data_update(dev);
708 	if (config->is_dma_chn_shared) {
709 		data->transfer_len = data->frame_size >> 3;
710 	} else {
711 		data->transfer_len = data->ctx.tx_len > data->ctx.rx_len ?
712 					     data->ctx.rx_len :
713 					     data->ctx.tx_len;
714 	}
715 	data->tx_transfer_count = 0;
716 	data->rx_transfer_count = 0;
717 	configure_dma(dev);
718 #endif
719 
720 	ret = spi_mcux_transfer_next_packet(dev);
721 	if (ret) {
722 		goto out;
723 	}
724 
725 	ret = spi_context_wait_for_completion(&data->ctx);
726 out:
727 	spi_context_release(&data->ctx, ret);
728 
729 	return ret;
730 }
731 
spi_mcux_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)732 static int spi_mcux_transceive(const struct device *dev,
733 			       const struct spi_config *spi_cfg,
734 			       const struct spi_buf_set *tx_bufs,
735 			       const struct spi_buf_set *rx_bufs)
736 {
737 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
738 }
739 
740 #ifdef CONFIG_SPI_ASYNC
spi_mcux_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)741 static int spi_mcux_transceive_async(const struct device *dev,
742 				     const struct spi_config *spi_cfg,
743 				     const struct spi_buf_set *tx_bufs,
744 				     const struct spi_buf_set *rx_bufs,
745 				     spi_callback_t cb,
746 				     void *userdata)
747 {
748 	return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
749 }
750 #endif /* CONFIG_SPI_ASYNC */
751 
spi_mcux_release(const struct device * dev,const struct spi_config * spi_cfg)752 static int spi_mcux_release(const struct device *dev,
753 			    const struct spi_config *spi_cfg)
754 {
755 	struct spi_mcux_data *data = dev->data;
756 
757 	spi_context_unlock_unconditionally(&data->ctx);
758 
759 	return 0;
760 }
761 
spi_mcux_init(const struct device * dev)762 static int spi_mcux_init(const struct device *dev)
763 {
764 	int err;
765 	struct spi_mcux_data *data = dev->data;
766 	const struct spi_mcux_config *config = dev->config;
767 
768 #ifdef CONFIG_DSPI_MCUX_EDMA
769 	enum dma_channel_filter spi_filter = DMA_CHANNEL_NORMAL;
770 	const struct device *dma_dev;
771 
772 	dma_dev = data->rx_dma_config.dma_dev;
773 	data->rx_dma_config.dma_channel =
774 	  dma_request_channel(dma_dev, (void *)&spi_filter);
775 	dma_dev = data->tx_dma_config.dma_dev;
776 	data->tx_dma_config.dma_channel =
777 	  dma_request_channel(dma_dev, (void *)&spi_filter);
778 #else
779 	config->irq_config_func(dev);
780 #endif
781 	err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
782 	if (err != 0) {
783 		return err;
784 	}
785 
786 	data->dev = dev;
787 
788 	err = spi_context_cs_configure_all(&data->ctx);
789 	if (err < 0) {
790 		return err;
791 	}
792 
793 	spi_context_unlock_unconditionally(&data->ctx);
794 
795 	return 0;
796 }
797 
798 static DEVICE_API(spi, spi_mcux_driver_api) = {
799 	.transceive = spi_mcux_transceive,
800 #ifdef CONFIG_SPI_ASYNC
801 	.transceive_async = spi_mcux_transceive_async,
802 #endif
803 #ifdef CONFIG_SPI_RTIO
804 	.iodev_submit = spi_rtio_iodev_default_submit,
805 #endif
806 	.release = spi_mcux_release,
807 };
808 
809 
810 /* if a then b otherwise return 1 */
811 #define _UTIL_AND2(a, b) COND_CODE_1(UTIL_BOOL(a), (b), (1))
812 
813 #ifdef CONFIG_DSPI_MCUX_EDMA
814 
815 #define TX_BUFFER(id)							\
816 	static uint32_t							\
817 		edma_tx_buffer_##id[CONFIG_MCUX_DSPI_BUFFER_SIZE >> 2];	\
818 	static struct spi_buf spi_edma_tx_buffer_##id = {		\
819 		.buf = edma_tx_buffer_##id,				\
820 		.len = CONFIG_MCUX_DSPI_BUFFER_SIZE,			\
821 	}
822 
823 #define RX_BUFFER(id)							\
824 	static uint32_t							\
825 		edma_rx_buffer_##id[CONFIG_MCUX_DSPI_BUFFER_SIZE >> 2];	\
826 	static struct spi_buf spi_edma_rx_buffer_##id = {		\
827 		.buf = edma_rx_buffer_##id,				\
828 		.len = CONFIG_MCUX_DSPI_BUFFER_SIZE,			\
829 	}
830 
831 #define TX_DMA_CONFIG(id)						\
832 	.inner_tx_buffer = &spi_edma_tx_buffer_##id,			\
833 	.tx_dma_config = {						\
834 		.dma_dev =						\
835 		    DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)),	\
836 		.dma_cfg = {						\
837 			.source_burst_length = 4,			\
838 			.dest_burst_length = 4,				\
839 			.source_data_size = 4,				\
840 			.dest_data_size = 4,				\
841 			.dma_callback = dma_callback,			\
842 			.complete_callback_en = 1,			\
843 			.error_callback_dis = 0,			\
844 			.block_count = 1,				\
845 			.head_block = &spi_mcux_data_##id.tx_dma_block,	\
846 			.channel_direction = MEMORY_TO_PERIPHERAL,	\
847 			.dma_slot = DT_INST_DMAS_CELL_BY_NAME(		\
848 				id, tx, source),			\
849 		},							\
850 	},
851 
852 #define RX_DMA_CONFIG(id)						\
853 	.inner_rx_buffer = &spi_edma_rx_buffer_##id,			\
854 	.rx_dma_config = {						\
855 		.dma_dev =						\
856 		    DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)),	\
857 		.dma_cfg = {						\
858 			.source_burst_length = 2,			\
859 			.dest_burst_length = 2,				\
860 			.source_data_size = 2,				\
861 			.dest_data_size = 2,				\
862 			.dma_callback = dma_callback,			\
863 			.complete_callback_en = 1,			\
864 			.error_callback_dis = 0,			\
865 			.block_count =					\
866 			_UTIL_AND2(DT_INST_NODE_HAS_PROP(		\
867 				id, nxp_rx_tx_chn_share), 2),		\
868 			.head_block = &spi_mcux_data_##id.rx_dma_block,	\
869 			.channel_direction = PERIPHERAL_TO_MEMORY,	\
870 			.dma_slot = DT_INST_DMAS_CELL_BY_NAME(		\
871 				id, rx, source),			\
872 		},							\
873 	},
874 #else
875 #define TX_BUFFER(id)
876 #define RX_BUFFER(id)
877 #define TX_DMA_CONFIG(id)
878 #define RX_DMA_CONFIG(id)
879 
880 #endif
881 
882 #define SPI_MCUX_DSPI_DEVICE(id)					\
883 	PINCTRL_DT_INST_DEFINE(id);					\
884 	static void spi_mcux_config_func_##id(const struct device *dev);\
885 	TX_BUFFER(id);							\
886 	RX_BUFFER(id);							\
887 	static struct spi_mcux_data spi_mcux_data_##id = {		\
888 		SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##id, ctx),		\
889 		SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##id, ctx),		\
890 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(id), ctx)	\
891 		TX_DMA_CONFIG(id) RX_DMA_CONFIG(id)			\
892 	};								\
893 	static const struct spi_mcux_config spi_mcux_config_##id = {	\
894 		.base = (SPI_Type *)DT_INST_REG_ADDR(id),		\
895 		.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)),	\
896 		.clock_subsys = 					\
897 		(clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name),	\
898 		.irq_config_func = spi_mcux_config_func_##id,		\
899 		.pcs_sck_delay =					\
900 		    DT_INST_PROP_OR(id, pcs_sck_delay, 0),		\
901 		.sck_pcs_delay =					\
902 		    DT_INST_PROP_OR(id, sck_pcs_delay, 0),		\
903 		.transfer_delay =					\
904 		    DT_INST_PROP_OR(id, transfer_delay, 0),		\
905 		.which_ctar =						\
906 		    DT_INST_PROP_OR(id, ctar, 0),			\
907 		.samplePoint =						\
908 		    DT_INST_PROP_OR(id, sample_point, 0),		\
909 		.enable_continuous_sck =				\
910 		    DT_INST_PROP(id, continuous_sck),			\
911 		.enable_rxfifo_overwrite =				\
912 		    DT_INST_PROP(id, rx_fifo_overwrite),		\
913 		.enable_modified_timing_format =			\
914 		    DT_INST_PROP(id, modified_timing_format),		\
915 		.is_dma_chn_shared =					\
916 		    DT_INST_PROP(id, nxp_rx_tx_chn_share),		\
917 		.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id),		\
918 	};								\
919 	SPI_DEVICE_DT_INST_DEFINE(id,					\
920 			    spi_mcux_init,				\
921 			    NULL,					\
922 			    &spi_mcux_data_##id,			\
923 			    &spi_mcux_config_##id,			\
924 			    POST_KERNEL,				\
925 			    CONFIG_SPI_INIT_PRIORITY,			\
926 			    &spi_mcux_driver_api);			\
927 	static void spi_mcux_config_func_##id(const struct device *dev)	\
928 	{								\
929 		IRQ_CONNECT(DT_INST_IRQN(id),				\
930 			    DT_INST_IRQ(id, priority),			\
931 			    spi_mcux_isr, DEVICE_DT_INST_GET(id),	\
932 			    0);						\
933 		irq_enable(DT_INST_IRQN(id));				\
934 	}
935 
936 DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_DSPI_DEVICE)
937