1 /*
2  * Copyright (c) 2024 Analog Devices, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT adi_max32_spi
8 
9 #include <string.h>
10 #include <errno.h>
11 #if CONFIG_SPI_MAX32_DMA
12 #include <zephyr/drivers/dma.h>
13 #endif
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/drivers/spi.h>
16 #include <zephyr/drivers/spi/rtio.h>
17 #include <zephyr/drivers/clock_control/adi_max32_clock_control.h>
18 #include <zephyr/logging/log.h>
19 #include <zephyr/irq.h>
20 #include <zephyr/rtio/rtio.h>
21 #include <zephyr/sys/__assert.h>
22 #include <zephyr/sys/util.h>
23 #include <zephyr/drivers/spi/rtio.h>
24 
25 #include <wrap_max32_spi.h>
26 
27 LOG_MODULE_REGISTER(spi_max32, CONFIG_SPI_LOG_LEVEL);
28 #include "spi_context.h"
29 
30 #ifdef CONFIG_SPI_MAX32_DMA
31 struct max32_spi_dma_config {
32 	const struct device *dev;
33 	const uint32_t channel;
34 	const uint32_t slot;
35 };
36 #endif /* CONFIG_SPI_MAX32_DMA */
37 
38 struct max32_spi_config {
39 	mxc_spi_regs_t *regs;
40 	const struct pinctrl_dev_config *pctrl;
41 	const struct device *clock;
42 	struct max32_perclk perclk;
43 #ifdef CONFIG_SPI_MAX32_INTERRUPT
44 	void (*irq_config_func)(const struct device *dev);
45 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
46 #ifdef CONFIG_SPI_MAX32_DMA
47 	struct max32_spi_dma_config tx_dma;
48 	struct max32_spi_dma_config rx_dma;
49 #endif /* CONFIG_SPI_MAX32_DMA */
50 };
51 
52 /* Device run time data */
53 struct max32_spi_data {
54 	struct spi_context ctx;
55 	const struct device *dev;
56 	mxc_spi_req_t req;
57 	uint8_t dummy[2];
58 
59 #ifdef CONFIG_SPI_MAX32_DMA
60 	volatile uint8_t dma_stat;
61 #endif /* CONFIG_SPI_MAX32_DMA */
62 
63 #ifdef CONFIG_SPI_ASYNC
64 	struct k_work async_work;
65 #endif /* CONFIG_SPI_ASYNC */
66 
67 #ifdef CONFIG_SPI_RTIO
68 	struct spi_rtio *rtio_ctx;
69 #endif
70 };
71 
72 #ifdef CONFIG_SPI_MAX32_DMA
73 #define SPI_MAX32_DMA_ERROR_FLAG   0x01U
74 #define SPI_MAX32_DMA_RX_DONE_FLAG 0x02U
75 #define SPI_MAX32_DMA_TX_DONE_FLAG 0x04U
76 #define SPI_MAX32_DMA_DONE_FLAG    (SPI_MAX32_DMA_RX_DONE_FLAG | SPI_MAX32_DMA_TX_DONE_FLAG)
77 #endif /* CONFIG_SPI_MAX32_DMA */
78 
79 #ifdef CONFIG_SPI_MAX32_INTERRUPT
80 static void spi_max32_callback(mxc_spi_req_t *req, int error);
81 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
82 
spi_configure(const struct device * dev,const struct spi_config * config)83 static int spi_configure(const struct device *dev, const struct spi_config *config)
84 {
85 	int ret = 0;
86 	const struct max32_spi_config *cfg = dev->config;
87 	mxc_spi_regs_t *regs = cfg->regs;
88 	struct max32_spi_data *data = dev->data;
89 
90 	if (spi_context_configured(&data->ctx, config)) {
91 		return 0;
92 	}
93 
94 	if (SPI_OP_MODE_GET(config->operation) & SPI_OP_MODE_SLAVE) {
95 		return -ENOTSUP;
96 	}
97 
98 	int master_mode = 1;
99 	int quad_mode = 0;
100 	int num_slaves = 1;
101 	int ss_polarity = (config->operation & SPI_CS_ACTIVE_HIGH) ? 1 : 0;
102 	unsigned int spi_speed = (unsigned int)config->frequency;
103 
104 	ret = Wrap_MXC_SPI_Init(regs, master_mode, quad_mode, num_slaves, ss_polarity, spi_speed);
105 	if (ret) {
106 		return ret;
107 	}
108 
109 	int cpol = (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) ? 1 : 0;
110 	int cpha = (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0;
111 
112 	if (cpol && cpha) {
113 		ret = MXC_SPI_SetMode(regs, SPI_MODE_3);
114 	} else if (cpha) {
115 		ret = MXC_SPI_SetMode(regs, SPI_MODE_2);
116 	} else if (cpol) {
117 		ret = MXC_SPI_SetMode(regs, SPI_MODE_1);
118 	} else {
119 		ret = MXC_SPI_SetMode(regs, SPI_MODE_0);
120 	}
121 	if (ret) {
122 		return ret;
123 	}
124 
125 	ret = MXC_SPI_SetDataSize(regs, SPI_WORD_SIZE_GET(config->operation));
126 	if (ret) {
127 		return ret;
128 	}
129 
130 #if defined(CONFIG_SPI_EXTENDED_MODES)
131 	switch (config->operation & SPI_LINES_MASK) {
132 	case SPI_LINES_QUAD:
133 		ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_QUAD);
134 		break;
135 	case SPI_LINES_DUAL:
136 		ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_DUAL);
137 		break;
138 	case SPI_LINES_OCTAL:
139 		ret = -ENOTSUP;
140 		break;
141 	case SPI_LINES_SINGLE:
142 	default:
143 		ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_STANDARD);
144 		break;
145 	}
146 
147 	if (ret) {
148 		return ret;
149 	}
150 #endif
151 
152 	data->ctx.config = config;
153 
154 	return ret;
155 }
156 
spi_max32_get_dfs_shift(const struct spi_context * ctx)157 static inline int spi_max32_get_dfs_shift(const struct spi_context *ctx)
158 {
159 	if (SPI_WORD_SIZE_GET(ctx->config->operation) < 9) {
160 		return 0;
161 	}
162 
163 	return 1;
164 }
165 
spi_max32_setup(mxc_spi_regs_t * spi,mxc_spi_req_t * req)166 static void spi_max32_setup(mxc_spi_regs_t *spi, mxc_spi_req_t *req)
167 {
168 	req->rxCnt = 0;
169 	req->txCnt = 0;
170 
171 	if (spi->ctrl0 & ADI_MAX32_SPI_CTRL_MASTER_MODE) {
172 		MXC_SPI_SetSlave(spi, req->ssIdx);
173 	}
174 
175 	if (req->rxData && req->rxLen) {
176 		MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_RX_NUM_CHAR,
177 			     req->rxLen << MXC_F_SPI_CTRL1_RX_NUM_CHAR_POS);
178 		spi->dma |= MXC_F_SPI_DMA_RX_FIFO_EN;
179 	} else {
180 		spi->ctrl1 &= ~MXC_F_SPI_CTRL1_RX_NUM_CHAR;
181 		spi->dma &= ~MXC_F_SPI_DMA_RX_FIFO_EN;
182 	}
183 
184 	if (req->txLen) {
185 		MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_TX_NUM_CHAR,
186 			     req->txLen << MXC_F_SPI_CTRL1_TX_NUM_CHAR_POS);
187 		spi->dma |= MXC_F_SPI_DMA_TX_FIFO_EN;
188 	} else {
189 		spi->ctrl1 &= ~MXC_F_SPI_CTRL1_TX_NUM_CHAR;
190 		spi->dma &= ~MXC_F_SPI_DMA_TX_FIFO_EN;
191 	}
192 
193 	spi->dma |= (ADI_MAX32_SPI_DMA_TX_FIFO_CLEAR | ADI_MAX32_SPI_DMA_RX_FIFO_CLEAR);
194 	spi->ctrl0 |= MXC_F_SPI_CTRL0_EN;
195 	MXC_SPI_ClearFlags(spi);
196 }
197 
198 #ifndef CONFIG_SPI_MAX32_INTERRUPT
spi_max32_transceive_sync(mxc_spi_regs_t * spi,struct max32_spi_data * data,uint8_t dfs_shift)199 static int spi_max32_transceive_sync(mxc_spi_regs_t *spi, struct max32_spi_data *data,
200 				     uint8_t dfs_shift)
201 {
202 	int ret = 0;
203 	mxc_spi_req_t *req = &data->req;
204 	uint32_t remain, flags, tx_len, rx_len;
205 
206 	MXC_SPI_ClearTXFIFO(spi);
207 	MXC_SPI_ClearRXFIFO(spi);
208 
209 	tx_len = req->txLen << dfs_shift;
210 	rx_len = req->rxLen << dfs_shift;
211 	do {
212 		remain = tx_len - req->txCnt;
213 		if (remain > 0) {
214 			if (!data->req.txData) {
215 				req->txCnt += MXC_SPI_WriteTXFIFO(spi, data->dummy,
216 								  MIN(remain, sizeof(data->dummy)));
217 			} else {
218 				req->txCnt +=
219 					MXC_SPI_WriteTXFIFO(spi, &req->txData[req->txCnt], remain);
220 			}
221 			if (!(spi->ctrl0 & MXC_F_SPI_CTRL0_START)) {
222 				spi->ctrl0 |= MXC_F_SPI_CTRL0_START;
223 			}
224 		}
225 
226 		if (req->rxCnt < rx_len) {
227 			req->rxCnt += MXC_SPI_ReadRXFIFO(spi, &req->rxData[req->rxCnt],
228 							 rx_len - req->rxCnt);
229 		}
230 	} while ((req->txCnt < tx_len) || (req->rxCnt < rx_len));
231 
232 	do {
233 		flags = MXC_SPI_GetFlags(spi);
234 	} while (!(flags & ADI_MAX32_SPI_INT_FL_MST_DONE));
235 	MXC_SPI_ClearFlags(spi);
236 
237 	return ret;
238 }
239 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
240 
spi_max32_transceive(const struct device * dev)241 static int spi_max32_transceive(const struct device *dev)
242 {
243 	int ret = 0;
244 	const struct max32_spi_config *cfg = dev->config;
245 	struct max32_spi_data *data = dev->data;
246 	struct spi_context *ctx = &data->ctx;
247 #ifdef CONFIG_SPI_RTIO
248 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
249 	struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
250 #endif
251 	uint32_t len;
252 	uint8_t dfs_shift;
253 
254 	MXC_SPI_ClearTXFIFO(cfg->regs);
255 
256 	dfs_shift = spi_max32_get_dfs_shift(ctx);
257 
258 	len = spi_context_max_continuous_chunk(ctx);
259 
260 #ifdef CONFIG_SPI_RTIO
261 	switch (sqe->op) {
262 	case RTIO_OP_RX:
263 		len = sqe->rx.buf_len;
264 		data->req.rxData = sqe->rx.buf;
265 		data->req.rxLen = sqe->rx.buf_len;
266 		data->req.txData = NULL;
267 		data->req.txLen = len >> dfs_shift;
268 		break;
269 	case RTIO_OP_TX:
270 		len = sqe->tx.buf_len;
271 		data->req.rxLen = 0;
272 		data->req.rxData = data->dummy;
273 		data->req.txData = (uint8_t *)sqe->tx.buf;
274 		data->req.txLen = len >> dfs_shift;
275 		break;
276 	case RTIO_OP_TINY_TX:
277 		len = sqe->tiny_tx.buf_len;
278 		data->req.txData = (uint8_t *)sqe->tiny_tx.buf;
279 		data->req.rxData = data->dummy;
280 		data->req.txLen = len >> dfs_shift;
281 		data->req.rxLen = 0;
282 		break;
283 	case RTIO_OP_TXRX:
284 		len = sqe->txrx.buf_len;
285 		data->req.txData = (uint8_t *)sqe->txrx.tx_buf;
286 		data->req.rxData = sqe->txrx.rx_buf;
287 		data->req.txLen = len >> dfs_shift;
288 		data->req.rxLen = len >> dfs_shift;
289 		break;
290 	default:
291 		break;
292 	}
293 #else
294 	data->req.txLen = len >> dfs_shift;
295 	data->req.txData = (uint8_t *)ctx->tx_buf;
296 	data->req.rxLen = len >> dfs_shift;
297 	data->req.rxData = ctx->rx_buf;
298 
299 	data->req.rxData = ctx->rx_buf;
300 
301 	data->req.rxLen = len >> dfs_shift;
302 	if (!data->req.rxData) {
303 		/* Pass a dummy buffer to HAL if receive buffer is NULL, otherwise
304 		 * corrupt data is read during subsequent transactions.
305 		 */
306 		data->req.rxData = data->dummy;
307 		data->req.rxLen = 0;
308 	}
309 #endif
310 	data->req.spi = cfg->regs;
311 	data->req.ssIdx = ctx->config->slave;
312 	data->req.ssDeassert = 0;
313 	data->req.txCnt = 0;
314 	data->req.rxCnt = 0;
315 	spi_max32_setup(cfg->regs, &data->req);
316 #ifdef CONFIG_SPI_MAX32_INTERRUPT
317 	MXC_SPI_SetTXThreshold(cfg->regs, 1);
318 	if (data->req.rxLen) {
319 		MXC_SPI_SetRXThreshold(cfg->regs, 2);
320 		MXC_SPI_EnableInt(cfg->regs, ADI_MAX32_SPI_INT_EN_RX_THD);
321 	}
322 	MXC_SPI_EnableInt(cfg->regs, ADI_MAX32_SPI_INT_EN_TX_THD | ADI_MAX32_SPI_INT_EN_MST_DONE);
323 
324 	if (!data->req.txData) {
325 		data->req.txCnt =
326 			MXC_SPI_WriteTXFIFO(cfg->regs, data->dummy, MIN(len, sizeof(data->dummy)));
327 	} else {
328 		data->req.txCnt = MXC_SPI_WriteTXFIFO(cfg->regs, data->req.txData, len);
329 	}
330 
331 	MXC_SPI_StartTransmission(cfg->regs);
332 #else
333 	ret = spi_max32_transceive_sync(cfg->regs, data, dfs_shift);
334 	if (ret) {
335 		ret = -EIO;
336 	} else {
337 		spi_context_update_tx(ctx, 1, len);
338 		spi_context_update_rx(ctx, 1, len);
339 	}
340 #endif
341 
342 	return ret;
343 }
344 
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool async,spi_callback_t cb,void * userdata)345 static int transceive(const struct device *dev, const struct spi_config *config,
346 		      const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
347 		      bool async, spi_callback_t cb, void *userdata)
348 {
349 	int ret = 0;
350 	struct max32_spi_data *data = dev->data;
351 	struct spi_context *ctx = &data->ctx;
352 #ifndef CONFIG_SPI_RTIO
353 	const struct max32_spi_config *cfg = dev->config;
354 	bool hw_cs_ctrl = true;
355 #endif
356 
357 #ifndef CONFIG_SPI_MAX32_INTERRUPT
358 	if (async) {
359 		return -ENOTSUP;
360 	}
361 #endif
362 
363 	spi_context_lock(ctx, async, cb, userdata, config);
364 
365 #ifndef CONFIG_SPI_RTIO
366 	ret = spi_configure(dev, config);
367 	if (ret != 0) {
368 		spi_context_release(ctx, ret);
369 		return -EIO;
370 	}
371 
372 	spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
373 
374 	/* Check if CS GPIO exists */
375 	if (spi_cs_is_gpio(config)) {
376 		hw_cs_ctrl = false;
377 	}
378 	MXC_SPI_HWSSControl(cfg->regs, hw_cs_ctrl);
379 
380 	/* Assert the CS line if HW control disabled */
381 	if (!hw_cs_ctrl) {
382 		spi_context_cs_control(ctx, true);
383 	} else {
384 		cfg->regs->ctrl0 =
385 			(cfg->regs->ctrl0 & ~MXC_F_SPI_CTRL0_START) | MXC_F_SPI_CTRL0_SS_CTRL;
386 	}
387 
388 #ifdef CONFIG_SPI_MAX32_INTERRUPT
389 	do {
390 		ret = spi_max32_transceive(dev);
391 		if (!ret) {
392 			ret = spi_context_wait_for_completion(ctx);
393 			if (ret || async) {
394 				break;
395 			}
396 		} else {
397 			break;
398 		}
399 	} while ((spi_context_tx_on(ctx) || spi_context_rx_on(ctx)));
400 #else
401 	do {
402 		ret = spi_max32_transceive(dev);
403 		if (ret) {
404 			break;
405 		}
406 	} while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx));
407 
408 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
409 
410 	/* Deassert the CS line if hw control disabled */
411 	if (!async) {
412 		if (!hw_cs_ctrl) {
413 			spi_context_cs_control(ctx, false);
414 		} else {
415 			cfg->regs->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL |
416 					      MXC_F_SPI_CTRL0_EN);
417 			cfg->regs->ctrl0 |= MXC_F_SPI_CTRL0_EN;
418 		}
419 	}
420 #else
421 		struct spi_rtio *rtio_ctx = data->rtio_ctx;
422 
423 		ret = spi_rtio_transceive(rtio_ctx, config, tx_bufs, rx_bufs);
424 #endif
425 	spi_context_release(ctx, ret);
426 	return ret;
427 }
428 
429 #ifdef CONFIG_SPI_MAX32_DMA
spi_max32_dma_callback(const struct device * dev,void * arg,uint32_t channel,int status)430 static void spi_max32_dma_callback(const struct device *dev, void *arg, uint32_t channel,
431 				   int status)
432 {
433 	struct max32_spi_data *data = arg;
434 	const struct device *spi_dev = data->dev;
435 	const struct max32_spi_config *config = spi_dev->config;
436 	uint32_t len;
437 
438 	if (status < 0) {
439 		LOG_ERR("DMA callback error with channel %d.", channel);
440 	} else {
441 		/* identify the origin of this callback */
442 		if (channel == config->tx_dma.channel) {
443 			data->dma_stat |= SPI_MAX32_DMA_TX_DONE_FLAG;
444 		} else if (channel == config->rx_dma.channel) {
445 			data->dma_stat |= SPI_MAX32_DMA_RX_DONE_FLAG;
446 		}
447 	}
448 	if ((data->dma_stat & SPI_MAX32_DMA_DONE_FLAG) == SPI_MAX32_DMA_DONE_FLAG) {
449 		len = spi_context_max_continuous_chunk(&data->ctx);
450 		spi_context_update_tx(&data->ctx, 1, len);
451 		spi_context_update_rx(&data->ctx, 1, len);
452 		spi_context_complete(&data->ctx, spi_dev, status == 0 ? 0 : -EIO);
453 	}
454 }
455 
spi_max32_tx_dma_load(const struct device * dev,const uint8_t * buf,uint32_t len,uint8_t word_shift)456 static int spi_max32_tx_dma_load(const struct device *dev, const uint8_t *buf, uint32_t len,
457 				 uint8_t word_shift)
458 {
459 	int ret;
460 	const struct max32_spi_config *config = dev->config;
461 	struct max32_spi_data *data = dev->data;
462 	struct dma_config dma_cfg = {0};
463 	struct dma_block_config dma_blk = {0};
464 
465 	dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
466 	dma_cfg.dma_callback = spi_max32_dma_callback;
467 	dma_cfg.user_data = (void *)data;
468 	dma_cfg.dma_slot = config->tx_dma.slot;
469 	dma_cfg.block_count = 1;
470 	dma_cfg.source_data_size = 1U << word_shift;
471 	dma_cfg.source_burst_length = 1U;
472 	dma_cfg.dest_data_size = 1U << word_shift;
473 	dma_cfg.head_block = &dma_blk;
474 	dma_blk.block_size = len;
475 	if (buf) {
476 		dma_blk.source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
477 		dma_blk.source_address = (uint32_t)buf;
478 	} else {
479 		dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
480 		dma_blk.source_address = (uint32_t)data->dummy;
481 	}
482 
483 	ret = dma_config(config->tx_dma.dev, config->tx_dma.channel, &dma_cfg);
484 	if (ret < 0) {
485 		LOG_ERR("Error configuring Tx DMA (%d)", ret);
486 	}
487 
488 	return dma_start(config->tx_dma.dev, config->tx_dma.channel);
489 }
490 
spi_max32_rx_dma_load(const struct device * dev,const uint8_t * buf,uint32_t len,uint8_t word_shift)491 static int spi_max32_rx_dma_load(const struct device *dev, const uint8_t *buf, uint32_t len,
492 				 uint8_t word_shift)
493 {
494 	int ret;
495 	const struct max32_spi_config *config = dev->config;
496 	struct max32_spi_data *data = dev->data;
497 	struct dma_config dma_cfg = {0};
498 	struct dma_block_config dma_blk = {0};
499 
500 	dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
501 	dma_cfg.dma_callback = spi_max32_dma_callback;
502 	dma_cfg.user_data = (void *)data;
503 	dma_cfg.dma_slot = config->rx_dma.slot;
504 	dma_cfg.block_count = 1;
505 	dma_cfg.source_data_size = 1U << word_shift;
506 	dma_cfg.source_burst_length = 1U;
507 	dma_cfg.dest_data_size = 1U << word_shift;
508 	dma_cfg.head_block = &dma_blk;
509 	dma_blk.block_size = len;
510 	if (buf) {
511 		dma_blk.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
512 		dma_blk.dest_address = (uint32_t)buf;
513 	} else {
514 		dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
515 		dma_blk.dest_address = (uint32_t)data->dummy;
516 	}
517 	ret = dma_config(config->rx_dma.dev, config->rx_dma.channel, &dma_cfg);
518 	if (ret < 0) {
519 		LOG_ERR("Error configuring Rx DMA (%d)", ret);
520 	}
521 
522 	return dma_start(config->rx_dma.dev, config->rx_dma.channel);
523 }
524 
transceive_dma(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool async,spi_callback_t cb,void * userdata)525 static int transceive_dma(const struct device *dev, const struct spi_config *config,
526 			  const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
527 			  bool async, spi_callback_t cb, void *userdata)
528 {
529 	int ret = 0;
530 	const struct max32_spi_config *cfg = dev->config;
531 	struct max32_spi_data *data = dev->data;
532 	struct spi_context *ctx = &data->ctx;
533 	mxc_spi_regs_t *spi = cfg->regs;
534 	struct dma_status status;
535 	uint32_t len, word_count;
536 	uint8_t dfs_shift;
537 
538 	bool hw_cs_ctrl = true;
539 
540 	spi_context_lock(ctx, async, cb, userdata, config);
541 
542 	MXC_SPI_ClearTXFIFO(spi);
543 
544 	ret = dma_get_status(cfg->tx_dma.dev, cfg->tx_dma.channel, &status);
545 	if (ret < 0 || status.busy) {
546 		ret = ret < 0 ? ret : -EBUSY;
547 		goto unlock;
548 	}
549 
550 	ret = dma_get_status(cfg->rx_dma.dev, cfg->rx_dma.channel, &status);
551 	if (ret < 0 || status.busy) {
552 		ret = ret < 0 ? ret : -EBUSY;
553 		goto unlock;
554 	}
555 
556 	ret = spi_configure(dev, config);
557 	if (ret != 0) {
558 		ret = -EIO;
559 		goto unlock;
560 	}
561 
562 	spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
563 
564 	/* Check if CS GPIO exists */
565 	if (spi_cs_is_gpio(config)) {
566 		hw_cs_ctrl = false;
567 	}
568 	MXC_SPI_HWSSControl(cfg->regs, hw_cs_ctrl);
569 
570 	/* Assert the CS line if HW control disabled */
571 	if (!hw_cs_ctrl) {
572 		spi_context_cs_control(ctx, true);
573 	} else {
574 		spi->ctrl0 = (spi->ctrl0 & ~MXC_F_SPI_CTRL0_START) | MXC_F_SPI_CTRL0_SS_CTRL;
575 	}
576 
577 	MXC_SPI_SetSlave(cfg->regs, ctx->config->slave);
578 
579 	do {
580 		len = spi_context_max_continuous_chunk(ctx);
581 		dfs_shift = spi_max32_get_dfs_shift(ctx);
582 		word_count = len >> dfs_shift;
583 
584 		MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_RX_NUM_CHAR,
585 			     word_count << MXC_F_SPI_CTRL1_RX_NUM_CHAR_POS);
586 		spi->dma |= ADI_MAX32_SPI_DMA_RX_FIFO_CLEAR;
587 		spi->dma |= MXC_F_SPI_DMA_RX_FIFO_EN;
588 		spi->dma |= ADI_MAX32_SPI_DMA_RX_DMA_EN;
589 		MXC_SPI_SetRXThreshold(spi, 0);
590 
591 		ret = spi_max32_rx_dma_load(dev, ctx->rx_buf, len, dfs_shift);
592 		if (ret < 0) {
593 			goto unlock;
594 		}
595 
596 		MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_TX_NUM_CHAR,
597 			     word_count << MXC_F_SPI_CTRL1_TX_NUM_CHAR_POS);
598 		spi->dma |= ADI_MAX32_SPI_DMA_TX_FIFO_CLEAR;
599 		spi->dma |= MXC_F_SPI_DMA_TX_FIFO_EN;
600 		spi->dma |= ADI_MAX32_SPI_DMA_TX_DMA_EN;
601 		MXC_SPI_SetTXThreshold(spi, 1);
602 
603 		ret = spi_max32_tx_dma_load(dev, ctx->tx_buf, len, dfs_shift);
604 		if (ret < 0) {
605 			goto unlock;
606 		}
607 
608 		data->dma_stat = 0;
609 		MXC_SPI_StartTransmission(spi);
610 		ret = spi_context_wait_for_completion(ctx);
611 	} while (!ret && (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)));
612 
613 	if (ret < 0) {
614 		dma_stop(cfg->tx_dma.dev, cfg->tx_dma.channel);
615 		dma_stop(cfg->rx_dma.dev, cfg->rx_dma.channel);
616 	}
617 
618 unlock:
619 	/* Deassert the CS line if hw control disabled */
620 	if (!hw_cs_ctrl) {
621 		spi_context_cs_control(ctx, false);
622 	} else {
623 		spi->ctrl0 &=
624 			~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL | MXC_F_SPI_CTRL0_EN);
625 		spi->ctrl0 |= MXC_F_SPI_CTRL0_EN;
626 	}
627 
628 	spi_context_release(ctx, ret);
629 
630 	return ret;
631 }
632 #endif /* CONFIG_SPI_MAX32_DMA */
633 
634 #ifdef CONFIG_SPI_RTIO
635 static void spi_max32_iodev_complete(const struct device *dev, int status);
636 
spi_max32_iodev_start(const struct device * dev)637 static void spi_max32_iodev_start(const struct device *dev)
638 {
639 	struct max32_spi_data *data = dev->data;
640 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
641 	struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
642 	int ret = 0;
643 
644 	switch (sqe->op) {
645 	case RTIO_OP_RX:
646 	case RTIO_OP_TX:
647 	case RTIO_OP_TINY_TX:
648 	case RTIO_OP_TXRX:
649 		ret = spi_max32_transceive(dev);
650 		break;
651 	default:
652 		spi_max32_iodev_complete(dev, -EINVAL);
653 		break;
654 	}
655 	if (ret != 0) {
656 		spi_max32_iodev_complete(dev, -EIO);
657 	}
658 }
659 
spi_max32_iodev_prepare_start(const struct device * dev)660 static inline void spi_max32_iodev_prepare_start(const struct device *dev)
661 {
662 	struct max32_spi_data *data = dev->data;
663 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
664 	struct spi_dt_spec *spi_dt_spec = rtio_ctx->txn_curr->sqe.iodev->data;
665 	struct spi_config *spi_config = &spi_dt_spec->config;
666 	struct max32_spi_config *cfg = (struct max32_spi_config *)dev->config;
667 	int ret;
668 	bool hw_cs_ctrl = true;
669 
670 	ret = spi_configure(dev, spi_config);
671 	__ASSERT(!ret, "%d", ret);
672 
673 	/* Check if CS GPIO exists */
674 	if (spi_cs_is_gpio(spi_config)) {
675 		hw_cs_ctrl = false;
676 	}
677 	MXC_SPI_HWSSControl(cfg->regs, hw_cs_ctrl);
678 
679 	/* Assert the CS line if HW control disabled */
680 	if (!hw_cs_ctrl) {
681 		spi_context_cs_control(&data->ctx, true);
682 	} else {
683 		cfg->regs->ctrl0 = (cfg->regs->ctrl0 & ~MXC_F_SPI_CTRL0_START) |
684 					MXC_F_SPI_CTRL0_SS_CTRL;
685 	};
686 }
687 
spi_max32_iodev_complete(const struct device * dev,int status)688 static void spi_max32_iodev_complete(const struct device *dev, int status)
689 {
690 	struct max32_spi_data *data = dev->data;
691 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
692 
693 	if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
694 		rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr);
695 		spi_max32_iodev_start(dev);
696 	} else {
697 		struct max32_spi_config *cfg = (struct max32_spi_config *)dev->config;
698 		bool hw_cs_ctrl = true;
699 
700 		if (!hw_cs_ctrl) {
701 			spi_context_cs_control(&data->ctx, false);
702 		} else {
703 			cfg->regs->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL |
704 					      MXC_F_SPI_CTRL0_EN);
705 			cfg->regs->ctrl0 |= MXC_F_SPI_CTRL0_EN;
706 		}
707 
708 		if (spi_rtio_complete(rtio_ctx, status)) {
709 			spi_max32_iodev_prepare_start(dev);
710 			spi_max32_iodev_start(dev);
711 		}
712 	}
713 }
714 
api_iodev_submit(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)715 static void api_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
716 {
717 	struct max32_spi_data *data = dev->data;
718 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
719 
720 	if (spi_rtio_submit(rtio_ctx, iodev_sqe)) {
721 		spi_max32_iodev_prepare_start(dev);
722 		spi_max32_iodev_start(dev);
723 	}
724 }
725 #endif
726 
api_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)727 static int api_transceive(const struct device *dev, const struct spi_config *config,
728 			  const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs)
729 {
730 #ifdef CONFIG_SPI_MAX32_DMA
731 	const struct max32_spi_config *cfg = dev->config;
732 
733 	if (cfg->tx_dma.channel != 0xFF && cfg->rx_dma.channel != 0xFF) {
734 		return transceive_dma(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
735 	}
736 #endif /* CONFIG_SPI_MAX32_DMA */
737 	return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
738 }
739 
740 #ifdef CONFIG_SPI_ASYNC
api_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)741 static int api_transceive_async(const struct device *dev, const struct spi_config *config,
742 				const struct spi_buf_set *tx_bufs,
743 				const struct spi_buf_set *rx_bufs, spi_callback_t cb,
744 				void *userdata)
745 {
746 	return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
747 }
748 #endif /* CONFIG_SPI_ASYNC */
749 
750 #ifdef CONFIG_SPI_MAX32_INTERRUPT
spi_max32_callback(mxc_spi_req_t * req,int error)751 static void spi_max32_callback(mxc_spi_req_t *req, int error)
752 {
753 	struct max32_spi_data *data = CONTAINER_OF(req, struct max32_spi_data, req);
754 	struct spi_context *ctx = &data->ctx;
755 	const struct device *dev = data->dev;
756 	uint32_t len;
757 
758 #ifdef CONFIG_SPI_RTIO
759 	struct spi_rtio *rtio_ctx = data->rtio_ctx;
760 
761 	if (rtio_ctx->txn_head != NULL) {
762 		spi_max32_iodev_complete(data->dev, 0);
763 	}
764 #endif
765 	len = spi_context_max_continuous_chunk(ctx);
766 	spi_context_update_tx(ctx, 1, len);
767 	spi_context_update_rx(ctx, 1, len);
768 #ifdef CONFIG_SPI_ASYNC
769 	if (ctx->asynchronous && ((spi_context_tx_on(ctx) || spi_context_rx_on(ctx)))) {
770 		k_work_submit(&data->async_work);
771 	} else {
772 		if (spi_cs_is_gpio(ctx->config)) {
773 			spi_context_cs_control(ctx, false);
774 		} else {
775 			req->spi->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL |
776 					     MXC_F_SPI_CTRL0_EN);
777 			req->spi->ctrl0 |= MXC_F_SPI_CTRL0_EN;
778 		}
779 		spi_context_complete(ctx, dev, error == E_NO_ERROR ? 0 : -EIO);
780 	}
781 #else
782 	spi_context_complete(ctx, dev, error == E_NO_ERROR ? 0 : -EIO);
783 #endif
784 }
785 
786 #ifdef CONFIG_SPI_ASYNC
spi_max32_async_work_handler(struct k_work * work)787 void spi_max32_async_work_handler(struct k_work *work)
788 {
789 	struct max32_spi_data *data = CONTAINER_OF(work, struct max32_spi_data, async_work);
790 	const struct device *dev = data->dev;
791 	int ret;
792 
793 	ret = spi_max32_transceive(dev);
794 	if (ret) {
795 		spi_context_complete(&data->ctx, dev, -EIO);
796 	}
797 }
798 #endif /* CONFIG_SPI_ASYNC */
799 
spi_max32_isr(const struct device * dev)800 static void spi_max32_isr(const struct device *dev)
801 {
802 	const struct max32_spi_config *cfg = dev->config;
803 	struct max32_spi_data *data = dev->data;
804 	mxc_spi_req_t *req = &data->req;
805 	mxc_spi_regs_t *spi = cfg->regs;
806 	uint32_t flags, remain;
807 	uint8_t dfs_shift = spi_max32_get_dfs_shift(&data->ctx);
808 
809 	flags = MXC_SPI_GetFlags(spi);
810 	MXC_SPI_ClearFlags(spi);
811 
812 	remain = (req->txLen << dfs_shift) - req->txCnt;
813 	if (flags & ADI_MAX32_SPI_INT_FL_TX_THD) {
814 		if (remain) {
815 			if (!data->req.txData) {
816 				req->txCnt += MXC_SPI_WriteTXFIFO(cfg->regs, data->dummy,
817 								  MIN(remain, sizeof(data->dummy)));
818 			} else {
819 				req->txCnt +=
820 					MXC_SPI_WriteTXFIFO(spi, &req->txData[req->txCnt], remain);
821 			}
822 		} else {
823 			MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_TX_THD);
824 		}
825 	}
826 
827 	remain = (req->rxLen << dfs_shift) - req->rxCnt;
828 	if (remain) {
829 		req->rxCnt += MXC_SPI_ReadRXFIFO(spi, &req->rxData[req->rxCnt], remain);
830 		remain = (req->rxLen << dfs_shift) - req->rxCnt;
831 		if (remain >= MXC_SPI_FIFO_DEPTH) {
832 			MXC_SPI_SetRXThreshold(spi, 2);
833 		} else {
834 			MXC_SPI_SetRXThreshold(spi, remain);
835 		}
836 	} else {
837 		MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_RX_THD);
838 	}
839 
840 	if ((req->txLen == req->txCnt) && (req->rxLen == req->rxCnt)) {
841 		MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_TX_THD | ADI_MAX32_SPI_INT_EN_RX_THD);
842 		if (flags & ADI_MAX32_SPI_INT_FL_MST_DONE) {
843 			MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_MST_DONE);
844 			spi_max32_callback(req, 0);
845 		}
846 	}
847 }
848 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
849 
api_release(const struct device * dev,const struct spi_config * config)850 static int api_release(const struct device *dev, const struct spi_config *config)
851 {
852 	struct max32_spi_data *data = dev->data;
853 
854 #ifndef CONFIG_SPI_RTIO
855 	if (!spi_context_configured(&data->ctx, config)) {
856 		return -EINVAL;
857 	}
858 #endif
859 	spi_context_unlock_unconditionally(&data->ctx);
860 	return 0;
861 }
862 
spi_max32_init(const struct device * dev)863 static int spi_max32_init(const struct device *dev)
864 {
865 	int ret = 0;
866 	const struct max32_spi_config *const cfg = dev->config;
867 	mxc_spi_regs_t *regs = cfg->regs;
868 	struct max32_spi_data *data = dev->data;
869 
870 	if (!device_is_ready(cfg->clock)) {
871 		return -ENODEV;
872 	}
873 
874 	MXC_SPI_Shutdown(regs);
875 
876 	ret = clock_control_on(cfg->clock, (clock_control_subsys_t)&cfg->perclk);
877 	if (ret) {
878 		return ret;
879 	}
880 
881 	ret = pinctrl_apply_state(cfg->pctrl, PINCTRL_STATE_DEFAULT);
882 	if (ret) {
883 		return ret;
884 	}
885 
886 	ret = spi_context_cs_configure_all(&data->ctx);
887 	if (ret < 0) {
888 		return ret;
889 	}
890 
891 	data->dev = dev;
892 
893 #ifdef CONFIG_SPI_RTIO
894 	spi_rtio_init(data->rtio_ctx, dev);
895 #endif
896 
897 #ifdef CONFIG_SPI_MAX32_INTERRUPT
898 	cfg->irq_config_func(dev);
899 #ifdef CONFIG_SPI_ASYNC
900 	k_work_init(&data->async_work, spi_max32_async_work_handler);
901 #endif
902 #endif
903 
904 	spi_context_unlock_unconditionally(&data->ctx);
905 
906 	return ret;
907 }
908 
909 /* SPI driver APIs structure */
910 static DEVICE_API(spi, spi_max32_api) = {
911 	.transceive = api_transceive,
912 #ifdef CONFIG_SPI_ASYNC
913 	.transceive_async = api_transceive_async,
914 #endif /* CONFIG_SPI_ASYNC */
915 #ifdef CONFIG_SPI_RTIO
916 	.iodev_submit = api_iodev_submit,
917 #endif /* CONFIG_SPI_RTIO */
918 	.release = api_release,
919 };
920 
921 /* SPI driver registration */
922 #ifdef CONFIG_SPI_MAX32_INTERRUPT
923 #define SPI_MAX32_CONFIG_IRQ_FUNC(n) .irq_config_func = spi_max32_irq_config_func_##n,
924 
925 #define SPI_MAX32_IRQ_CONFIG_FUNC(n)                                                               \
926 	static void spi_max32_irq_config_func_##n(const struct device *dev)                        \
927 	{                                                                                          \
928 		IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_max32_isr,              \
929 			    DEVICE_DT_INST_GET(n), 0);                                             \
930 		irq_enable(DT_INST_IRQN(n));                                                       \
931 	}
932 #else
933 #define SPI_MAX32_CONFIG_IRQ_FUNC(n)
934 #define SPI_MAX32_IRQ_CONFIG_FUNC(n)
935 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
936 
937 #if CONFIG_SPI_MAX32_DMA
938 #define MAX32_DT_INST_DMA_CTLR(n, name)                                                            \
939 	COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas),                                                \
940 		    (DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, name))), (NULL))
941 
942 #define MAX32_DT_INST_DMA_CELL(n, name, cell)                                                      \
943 	COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), (DT_INST_DMAS_CELL_BY_NAME(n, name, cell)),    \
944 		    (0xff))
945 
946 #define MAX32_SPI_DMA_INIT(n)                                                                      \
947 	.tx_dma.dev = MAX32_DT_INST_DMA_CTLR(n, tx),                                               \
948 	.tx_dma.channel = MAX32_DT_INST_DMA_CELL(n, tx, channel),                                  \
949 	.tx_dma.slot = MAX32_DT_INST_DMA_CELL(n, tx, slot),                                        \
950 	.rx_dma.dev = MAX32_DT_INST_DMA_CTLR(n, rx),                                               \
951 	.rx_dma.channel = MAX32_DT_INST_DMA_CELL(n, rx, channel),                                  \
952 	.rx_dma.slot = MAX32_DT_INST_DMA_CELL(n, rx, slot),
953 #else
954 #define MAX32_SPI_DMA_INIT(n)
955 #endif
956 
957 #define DEFINE_SPI_MAX32_RTIO(_num) SPI_RTIO_DEFINE(max32_spi_rtio_##_num,                 \
958 			CONFIG_SPI_MAX32_RTIO_SQ_SIZE,                              \
959 			CONFIG_SPI_MAX32_RTIO_CQ_SIZE)
960 
961 #define DEFINE_SPI_MAX32(_num)                                                                     \
962 	PINCTRL_DT_INST_DEFINE(_num);                                                              \
963 	SPI_MAX32_IRQ_CONFIG_FUNC(_num)                                                            \
964 	COND_CODE_1(CONFIG_SPI_RTIO, (DEFINE_SPI_MAX32_RTIO(_num)), ());                           \
965 	static const struct max32_spi_config max32_spi_config_##_num = {                           \
966 		.regs = (mxc_spi_regs_t *)DT_INST_REG_ADDR(_num),                                  \
967 		.pctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(_num),                                     \
968 		.clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(_num)),                                 \
969 		.perclk.bus = DT_INST_CLOCKS_CELL(_num, offset),                                   \
970 		.perclk.bit = DT_INST_CLOCKS_CELL(_num, bit),                                      \
971 		MAX32_SPI_DMA_INIT(_num) SPI_MAX32_CONFIG_IRQ_FUNC(_num)};                         \
972 	static struct max32_spi_data max32_spi_data_##_num = {                                     \
973 		SPI_CONTEXT_INIT_LOCK(max32_spi_data_##_num, ctx),                                 \
974 		SPI_CONTEXT_INIT_SYNC(max32_spi_data_##_num, ctx),                                 \
975 		SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(_num), ctx)                            \
976 		IF_ENABLED(CONFIG_SPI_RTIO, (.rtio_ctx = &max32_spi_rtio_##_num))};                \
977 	SPI_DEVICE_DT_INST_DEFINE(_num, spi_max32_init, NULL, &max32_spi_data_##_num,              \
978 			      &max32_spi_config_##_num, PRE_KERNEL_2, CONFIG_SPI_INIT_PRIORITY,    \
979 			      &spi_max32_api);
980 
981 DT_INST_FOREACH_STATUS_OKAY(DEFINE_SPI_MAX32)
982