1 /*
2 * Copyright (c) 2024 Analog Devices, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT adi_max32_spi
8
9 #include <string.h>
10 #include <errno.h>
11 #if CONFIG_SPI_MAX32_DMA
12 #include <zephyr/drivers/dma.h>
13 #endif
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/drivers/spi.h>
16 #include <zephyr/drivers/spi/rtio.h>
17 #include <zephyr/drivers/clock_control/adi_max32_clock_control.h>
18 #include <zephyr/logging/log.h>
19 #include <zephyr/irq.h>
20 #include <zephyr/rtio/rtio.h>
21 #include <zephyr/sys/__assert.h>
22 #include <zephyr/sys/util.h>
23 #include <zephyr/drivers/spi/rtio.h>
24
25 #include <wrap_max32_spi.h>
26
27 LOG_MODULE_REGISTER(spi_max32, CONFIG_SPI_LOG_LEVEL);
28 #include "spi_context.h"
29
30 #ifdef CONFIG_SPI_MAX32_DMA
31 struct max32_spi_dma_config {
32 const struct device *dev;
33 const uint32_t channel;
34 const uint32_t slot;
35 };
36 #endif /* CONFIG_SPI_MAX32_DMA */
37
38 struct max32_spi_config {
39 mxc_spi_regs_t *regs;
40 const struct pinctrl_dev_config *pctrl;
41 const struct device *clock;
42 struct max32_perclk perclk;
43 #ifdef CONFIG_SPI_MAX32_INTERRUPT
44 void (*irq_config_func)(const struct device *dev);
45 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
46 #ifdef CONFIG_SPI_MAX32_DMA
47 struct max32_spi_dma_config tx_dma;
48 struct max32_spi_dma_config rx_dma;
49 #endif /* CONFIG_SPI_MAX32_DMA */
50 };
51
52 /* Device run time data */
53 struct max32_spi_data {
54 struct spi_context ctx;
55 const struct device *dev;
56 mxc_spi_req_t req;
57 uint8_t dummy[2];
58
59 #ifdef CONFIG_SPI_MAX32_DMA
60 volatile uint8_t dma_stat;
61 #endif /* CONFIG_SPI_MAX32_DMA */
62
63 #ifdef CONFIG_SPI_ASYNC
64 struct k_work async_work;
65 #endif /* CONFIG_SPI_ASYNC */
66
67 #ifdef CONFIG_SPI_RTIO
68 struct spi_rtio *rtio_ctx;
69 #endif
70 };
71
72 #ifdef CONFIG_SPI_MAX32_DMA
73 #define SPI_MAX32_DMA_ERROR_FLAG 0x01U
74 #define SPI_MAX32_DMA_RX_DONE_FLAG 0x02U
75 #define SPI_MAX32_DMA_TX_DONE_FLAG 0x04U
76 #define SPI_MAX32_DMA_DONE_FLAG (SPI_MAX32_DMA_RX_DONE_FLAG | SPI_MAX32_DMA_TX_DONE_FLAG)
77 #endif /* CONFIG_SPI_MAX32_DMA */
78
79 #ifdef CONFIG_SPI_MAX32_INTERRUPT
80 static void spi_max32_callback(mxc_spi_req_t *req, int error);
81 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
82
spi_configure(const struct device * dev,const struct spi_config * config)83 static int spi_configure(const struct device *dev, const struct spi_config *config)
84 {
85 int ret = 0;
86 const struct max32_spi_config *cfg = dev->config;
87 mxc_spi_regs_t *regs = cfg->regs;
88 struct max32_spi_data *data = dev->data;
89
90 if (spi_context_configured(&data->ctx, config)) {
91 return 0;
92 }
93
94 if (SPI_OP_MODE_GET(config->operation) & SPI_OP_MODE_SLAVE) {
95 return -ENOTSUP;
96 }
97
98 int master_mode = 1;
99 int quad_mode = 0;
100 int num_slaves = 1;
101 int ss_polarity = (config->operation & SPI_CS_ACTIVE_HIGH) ? 1 : 0;
102 unsigned int spi_speed = (unsigned int)config->frequency;
103
104 ret = Wrap_MXC_SPI_Init(regs, master_mode, quad_mode, num_slaves, ss_polarity, spi_speed);
105 if (ret) {
106 return ret;
107 }
108
109 int cpol = (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) ? 1 : 0;
110 int cpha = (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) ? 1 : 0;
111
112 if (cpol && cpha) {
113 ret = MXC_SPI_SetMode(regs, SPI_MODE_3);
114 } else if (cpha) {
115 ret = MXC_SPI_SetMode(regs, SPI_MODE_2);
116 } else if (cpol) {
117 ret = MXC_SPI_SetMode(regs, SPI_MODE_1);
118 } else {
119 ret = MXC_SPI_SetMode(regs, SPI_MODE_0);
120 }
121 if (ret) {
122 return ret;
123 }
124
125 ret = MXC_SPI_SetDataSize(regs, SPI_WORD_SIZE_GET(config->operation));
126 if (ret) {
127 return ret;
128 }
129
130 #if defined(CONFIG_SPI_EXTENDED_MODES)
131 switch (config->operation & SPI_LINES_MASK) {
132 case SPI_LINES_QUAD:
133 ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_QUAD);
134 break;
135 case SPI_LINES_DUAL:
136 ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_DUAL);
137 break;
138 case SPI_LINES_OCTAL:
139 ret = -ENOTSUP;
140 break;
141 case SPI_LINES_SINGLE:
142 default:
143 ret = MXC_SPI_SetWidth(regs, SPI_WIDTH_STANDARD);
144 break;
145 }
146
147 if (ret) {
148 return ret;
149 }
150 #endif
151
152 data->ctx.config = config;
153
154 return ret;
155 }
156
spi_max32_get_dfs_shift(const struct spi_context * ctx)157 static inline int spi_max32_get_dfs_shift(const struct spi_context *ctx)
158 {
159 if (SPI_WORD_SIZE_GET(ctx->config->operation) < 9) {
160 return 0;
161 }
162
163 return 1;
164 }
165
spi_max32_setup(mxc_spi_regs_t * spi,mxc_spi_req_t * req)166 static void spi_max32_setup(mxc_spi_regs_t *spi, mxc_spi_req_t *req)
167 {
168 req->rxCnt = 0;
169 req->txCnt = 0;
170
171 if (spi->ctrl0 & ADI_MAX32_SPI_CTRL_MASTER_MODE) {
172 MXC_SPI_SetSlave(spi, req->ssIdx);
173 }
174
175 if (req->rxData && req->rxLen) {
176 MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_RX_NUM_CHAR,
177 req->rxLen << MXC_F_SPI_CTRL1_RX_NUM_CHAR_POS);
178 spi->dma |= MXC_F_SPI_DMA_RX_FIFO_EN;
179 } else {
180 spi->ctrl1 &= ~MXC_F_SPI_CTRL1_RX_NUM_CHAR;
181 spi->dma &= ~MXC_F_SPI_DMA_RX_FIFO_EN;
182 }
183
184 if (req->txLen) {
185 MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_TX_NUM_CHAR,
186 req->txLen << MXC_F_SPI_CTRL1_TX_NUM_CHAR_POS);
187 spi->dma |= MXC_F_SPI_DMA_TX_FIFO_EN;
188 } else {
189 spi->ctrl1 &= ~MXC_F_SPI_CTRL1_TX_NUM_CHAR;
190 spi->dma &= ~MXC_F_SPI_DMA_TX_FIFO_EN;
191 }
192
193 spi->dma |= (ADI_MAX32_SPI_DMA_TX_FIFO_CLEAR | ADI_MAX32_SPI_DMA_RX_FIFO_CLEAR);
194 spi->ctrl0 |= MXC_F_SPI_CTRL0_EN;
195 MXC_SPI_ClearFlags(spi);
196 }
197
198 #ifndef CONFIG_SPI_MAX32_INTERRUPT
spi_max32_transceive_sync(mxc_spi_regs_t * spi,struct max32_spi_data * data,uint8_t dfs_shift)199 static int spi_max32_transceive_sync(mxc_spi_regs_t *spi, struct max32_spi_data *data,
200 uint8_t dfs_shift)
201 {
202 int ret = 0;
203 mxc_spi_req_t *req = &data->req;
204 uint32_t remain, flags, tx_len, rx_len;
205
206 MXC_SPI_ClearTXFIFO(spi);
207 MXC_SPI_ClearRXFIFO(spi);
208
209 tx_len = req->txLen << dfs_shift;
210 rx_len = req->rxLen << dfs_shift;
211 do {
212 remain = tx_len - req->txCnt;
213 if (remain > 0) {
214 if (!data->req.txData) {
215 req->txCnt += MXC_SPI_WriteTXFIFO(spi, data->dummy,
216 MIN(remain, sizeof(data->dummy)));
217 } else {
218 req->txCnt +=
219 MXC_SPI_WriteTXFIFO(spi, &req->txData[req->txCnt], remain);
220 }
221 if (!(spi->ctrl0 & MXC_F_SPI_CTRL0_START)) {
222 spi->ctrl0 |= MXC_F_SPI_CTRL0_START;
223 }
224 }
225
226 if (req->rxCnt < rx_len) {
227 req->rxCnt += MXC_SPI_ReadRXFIFO(spi, &req->rxData[req->rxCnt],
228 rx_len - req->rxCnt);
229 }
230 } while ((req->txCnt < tx_len) || (req->rxCnt < rx_len));
231
232 do {
233 flags = MXC_SPI_GetFlags(spi);
234 } while (!(flags & ADI_MAX32_SPI_INT_FL_MST_DONE));
235 MXC_SPI_ClearFlags(spi);
236
237 return ret;
238 }
239 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
240
spi_max32_transceive(const struct device * dev)241 static int spi_max32_transceive(const struct device *dev)
242 {
243 int ret = 0;
244 const struct max32_spi_config *cfg = dev->config;
245 struct max32_spi_data *data = dev->data;
246 struct spi_context *ctx = &data->ctx;
247 #ifdef CONFIG_SPI_RTIO
248 struct spi_rtio *rtio_ctx = data->rtio_ctx;
249 struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
250 #endif
251 uint32_t len;
252 uint8_t dfs_shift;
253
254 MXC_SPI_ClearTXFIFO(cfg->regs);
255
256 dfs_shift = spi_max32_get_dfs_shift(ctx);
257
258 len = spi_context_max_continuous_chunk(ctx);
259
260 #ifdef CONFIG_SPI_RTIO
261 switch (sqe->op) {
262 case RTIO_OP_RX:
263 len = sqe->rx.buf_len;
264 data->req.rxData = sqe->rx.buf;
265 data->req.rxLen = sqe->rx.buf_len;
266 data->req.txData = NULL;
267 data->req.txLen = len >> dfs_shift;
268 break;
269 case RTIO_OP_TX:
270 len = sqe->tx.buf_len;
271 data->req.rxLen = 0;
272 data->req.rxData = data->dummy;
273 data->req.txData = (uint8_t *)sqe->tx.buf;
274 data->req.txLen = len >> dfs_shift;
275 break;
276 case RTIO_OP_TINY_TX:
277 len = sqe->tiny_tx.buf_len;
278 data->req.txData = (uint8_t *)sqe->tiny_tx.buf;
279 data->req.rxData = data->dummy;
280 data->req.txLen = len >> dfs_shift;
281 data->req.rxLen = 0;
282 break;
283 case RTIO_OP_TXRX:
284 len = sqe->txrx.buf_len;
285 data->req.txData = (uint8_t *)sqe->txrx.tx_buf;
286 data->req.rxData = sqe->txrx.rx_buf;
287 data->req.txLen = len >> dfs_shift;
288 data->req.rxLen = len >> dfs_shift;
289 break;
290 default:
291 break;
292 }
293 #else
294 data->req.txLen = len >> dfs_shift;
295 data->req.txData = (uint8_t *)ctx->tx_buf;
296 data->req.rxLen = len >> dfs_shift;
297 data->req.rxData = ctx->rx_buf;
298
299 data->req.rxData = ctx->rx_buf;
300
301 data->req.rxLen = len >> dfs_shift;
302 if (!data->req.rxData) {
303 /* Pass a dummy buffer to HAL if receive buffer is NULL, otherwise
304 * corrupt data is read during subsequent transactions.
305 */
306 data->req.rxData = data->dummy;
307 data->req.rxLen = 0;
308 }
309 #endif
310 data->req.spi = cfg->regs;
311 data->req.ssIdx = ctx->config->slave;
312 data->req.ssDeassert = 0;
313 data->req.txCnt = 0;
314 data->req.rxCnt = 0;
315 spi_max32_setup(cfg->regs, &data->req);
316 #ifdef CONFIG_SPI_MAX32_INTERRUPT
317 MXC_SPI_SetTXThreshold(cfg->regs, 1);
318 if (data->req.rxLen) {
319 MXC_SPI_SetRXThreshold(cfg->regs, 2);
320 MXC_SPI_EnableInt(cfg->regs, ADI_MAX32_SPI_INT_EN_RX_THD);
321 }
322 MXC_SPI_EnableInt(cfg->regs, ADI_MAX32_SPI_INT_EN_TX_THD | ADI_MAX32_SPI_INT_EN_MST_DONE);
323
324 if (!data->req.txData) {
325 data->req.txCnt =
326 MXC_SPI_WriteTXFIFO(cfg->regs, data->dummy, MIN(len, sizeof(data->dummy)));
327 } else {
328 data->req.txCnt = MXC_SPI_WriteTXFIFO(cfg->regs, data->req.txData, len);
329 }
330
331 MXC_SPI_StartTransmission(cfg->regs);
332 #else
333 ret = spi_max32_transceive_sync(cfg->regs, data, dfs_shift);
334 if (ret) {
335 ret = -EIO;
336 } else {
337 spi_context_update_tx(ctx, 1, len);
338 spi_context_update_rx(ctx, 1, len);
339 }
340 #endif
341
342 return ret;
343 }
344
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool async,spi_callback_t cb,void * userdata)345 static int transceive(const struct device *dev, const struct spi_config *config,
346 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
347 bool async, spi_callback_t cb, void *userdata)
348 {
349 int ret = 0;
350 struct max32_spi_data *data = dev->data;
351 struct spi_context *ctx = &data->ctx;
352 #ifndef CONFIG_SPI_RTIO
353 const struct max32_spi_config *cfg = dev->config;
354 bool hw_cs_ctrl = true;
355 #endif
356
357 #ifndef CONFIG_SPI_MAX32_INTERRUPT
358 if (async) {
359 return -ENOTSUP;
360 }
361 #endif
362
363 spi_context_lock(ctx, async, cb, userdata, config);
364
365 #ifndef CONFIG_SPI_RTIO
366 ret = spi_configure(dev, config);
367 if (ret != 0) {
368 spi_context_release(ctx, ret);
369 return -EIO;
370 }
371
372 spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
373
374 /* Check if CS GPIO exists */
375 if (spi_cs_is_gpio(config)) {
376 hw_cs_ctrl = false;
377 }
378 MXC_SPI_HWSSControl(cfg->regs, hw_cs_ctrl);
379
380 /* Assert the CS line if HW control disabled */
381 if (!hw_cs_ctrl) {
382 spi_context_cs_control(ctx, true);
383 } else {
384 cfg->regs->ctrl0 =
385 (cfg->regs->ctrl0 & ~MXC_F_SPI_CTRL0_START) | MXC_F_SPI_CTRL0_SS_CTRL;
386 }
387
388 #ifdef CONFIG_SPI_MAX32_INTERRUPT
389 do {
390 ret = spi_max32_transceive(dev);
391 if (!ret) {
392 ret = spi_context_wait_for_completion(ctx);
393 if (ret || async) {
394 break;
395 }
396 } else {
397 break;
398 }
399 } while ((spi_context_tx_on(ctx) || spi_context_rx_on(ctx)));
400 #else
401 do {
402 ret = spi_max32_transceive(dev);
403 if (ret) {
404 break;
405 }
406 } while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx));
407
408 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
409
410 /* Deassert the CS line if hw control disabled */
411 if (!async) {
412 if (!hw_cs_ctrl) {
413 spi_context_cs_control(ctx, false);
414 } else {
415 cfg->regs->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL |
416 MXC_F_SPI_CTRL0_EN);
417 cfg->regs->ctrl0 |= MXC_F_SPI_CTRL0_EN;
418 }
419 }
420 #else
421 struct spi_rtio *rtio_ctx = data->rtio_ctx;
422
423 ret = spi_rtio_transceive(rtio_ctx, config, tx_bufs, rx_bufs);
424 #endif
425 spi_context_release(ctx, ret);
426 return ret;
427 }
428
429 #ifdef CONFIG_SPI_MAX32_DMA
spi_max32_dma_callback(const struct device * dev,void * arg,uint32_t channel,int status)430 static void spi_max32_dma_callback(const struct device *dev, void *arg, uint32_t channel,
431 int status)
432 {
433 struct max32_spi_data *data = arg;
434 const struct device *spi_dev = data->dev;
435 const struct max32_spi_config *config = spi_dev->config;
436 uint32_t len;
437
438 if (status < 0) {
439 LOG_ERR("DMA callback error with channel %d.", channel);
440 } else {
441 /* identify the origin of this callback */
442 if (channel == config->tx_dma.channel) {
443 data->dma_stat |= SPI_MAX32_DMA_TX_DONE_FLAG;
444 } else if (channel == config->rx_dma.channel) {
445 data->dma_stat |= SPI_MAX32_DMA_RX_DONE_FLAG;
446 }
447 }
448 if ((data->dma_stat & SPI_MAX32_DMA_DONE_FLAG) == SPI_MAX32_DMA_DONE_FLAG) {
449 len = spi_context_max_continuous_chunk(&data->ctx);
450 spi_context_update_tx(&data->ctx, 1, len);
451 spi_context_update_rx(&data->ctx, 1, len);
452 spi_context_complete(&data->ctx, spi_dev, status == 0 ? 0 : -EIO);
453 }
454 }
455
spi_max32_tx_dma_load(const struct device * dev,const uint8_t * buf,uint32_t len,uint8_t word_shift)456 static int spi_max32_tx_dma_load(const struct device *dev, const uint8_t *buf, uint32_t len,
457 uint8_t word_shift)
458 {
459 int ret;
460 const struct max32_spi_config *config = dev->config;
461 struct max32_spi_data *data = dev->data;
462 struct dma_config dma_cfg = {0};
463 struct dma_block_config dma_blk = {0};
464
465 dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
466 dma_cfg.dma_callback = spi_max32_dma_callback;
467 dma_cfg.user_data = (void *)data;
468 dma_cfg.dma_slot = config->tx_dma.slot;
469 dma_cfg.block_count = 1;
470 dma_cfg.source_data_size = 1U << word_shift;
471 dma_cfg.source_burst_length = 1U;
472 dma_cfg.dest_data_size = 1U << word_shift;
473 dma_cfg.head_block = &dma_blk;
474 dma_blk.block_size = len;
475 if (buf) {
476 dma_blk.source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
477 dma_blk.source_address = (uint32_t)buf;
478 } else {
479 dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
480 dma_blk.source_address = (uint32_t)data->dummy;
481 }
482
483 ret = dma_config(config->tx_dma.dev, config->tx_dma.channel, &dma_cfg);
484 if (ret < 0) {
485 LOG_ERR("Error configuring Tx DMA (%d)", ret);
486 }
487
488 return dma_start(config->tx_dma.dev, config->tx_dma.channel);
489 }
490
spi_max32_rx_dma_load(const struct device * dev,const uint8_t * buf,uint32_t len,uint8_t word_shift)491 static int spi_max32_rx_dma_load(const struct device *dev, const uint8_t *buf, uint32_t len,
492 uint8_t word_shift)
493 {
494 int ret;
495 const struct max32_spi_config *config = dev->config;
496 struct max32_spi_data *data = dev->data;
497 struct dma_config dma_cfg = {0};
498 struct dma_block_config dma_blk = {0};
499
500 dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
501 dma_cfg.dma_callback = spi_max32_dma_callback;
502 dma_cfg.user_data = (void *)data;
503 dma_cfg.dma_slot = config->rx_dma.slot;
504 dma_cfg.block_count = 1;
505 dma_cfg.source_data_size = 1U << word_shift;
506 dma_cfg.source_burst_length = 1U;
507 dma_cfg.dest_data_size = 1U << word_shift;
508 dma_cfg.head_block = &dma_blk;
509 dma_blk.block_size = len;
510 if (buf) {
511 dma_blk.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
512 dma_blk.dest_address = (uint32_t)buf;
513 } else {
514 dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
515 dma_blk.dest_address = (uint32_t)data->dummy;
516 }
517 ret = dma_config(config->rx_dma.dev, config->rx_dma.channel, &dma_cfg);
518 if (ret < 0) {
519 LOG_ERR("Error configuring Rx DMA (%d)", ret);
520 }
521
522 return dma_start(config->rx_dma.dev, config->rx_dma.channel);
523 }
524
transceive_dma(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool async,spi_callback_t cb,void * userdata)525 static int transceive_dma(const struct device *dev, const struct spi_config *config,
526 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
527 bool async, spi_callback_t cb, void *userdata)
528 {
529 int ret = 0;
530 const struct max32_spi_config *cfg = dev->config;
531 struct max32_spi_data *data = dev->data;
532 struct spi_context *ctx = &data->ctx;
533 mxc_spi_regs_t *spi = cfg->regs;
534 struct dma_status status;
535 uint32_t len, word_count;
536 uint8_t dfs_shift;
537
538 bool hw_cs_ctrl = true;
539
540 spi_context_lock(ctx, async, cb, userdata, config);
541
542 ret = dma_get_status(cfg->tx_dma.dev, cfg->tx_dma.channel, &status);
543 if (ret < 0 || status.busy) {
544 ret = ret < 0 ? ret : -EBUSY;
545 goto unlock;
546 }
547
548 ret = dma_get_status(cfg->rx_dma.dev, cfg->rx_dma.channel, &status);
549 if (ret < 0 || status.busy) {
550 ret = ret < 0 ? ret : -EBUSY;
551 goto unlock;
552 }
553
554 ret = spi_configure(dev, config);
555 if (ret != 0) {
556 ret = -EIO;
557 goto unlock;
558 }
559
560 spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
561
562 /* Check if CS GPIO exists */
563 if (spi_cs_is_gpio(config)) {
564 hw_cs_ctrl = false;
565 }
566 MXC_SPI_HWSSControl(cfg->regs, hw_cs_ctrl);
567
568 /* Assert the CS line if HW control disabled */
569 if (!hw_cs_ctrl) {
570 spi_context_cs_control(ctx, true);
571 }
572
573 MXC_SPI_SetSlave(cfg->regs, ctx->config->slave);
574
575 do {
576 spi->ctrl0 &= ~(MXC_F_SPI_CTRL0_EN);
577
578 len = spi_context_max_continuous_chunk(ctx);
579 dfs_shift = spi_max32_get_dfs_shift(ctx);
580 word_count = len >> dfs_shift;
581
582 MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_RX_NUM_CHAR,
583 word_count << MXC_F_SPI_CTRL1_RX_NUM_CHAR_POS);
584 spi->dma |= ADI_MAX32_SPI_DMA_RX_FIFO_CLEAR;
585 spi->dma |= MXC_F_SPI_DMA_RX_FIFO_EN;
586 spi->dma |= ADI_MAX32_SPI_DMA_RX_DMA_EN;
587 MXC_SPI_SetRXThreshold(spi, 0);
588
589 ret = spi_max32_rx_dma_load(dev, ctx->rx_buf, len, dfs_shift);
590 if (ret < 0) {
591 goto unlock;
592 }
593
594 MXC_SETFIELD(spi->ctrl1, MXC_F_SPI_CTRL1_TX_NUM_CHAR,
595 word_count << MXC_F_SPI_CTRL1_TX_NUM_CHAR_POS);
596 spi->dma |= ADI_MAX32_SPI_DMA_TX_FIFO_CLEAR;
597 spi->dma |= MXC_F_SPI_DMA_TX_FIFO_EN;
598 spi->dma |= ADI_MAX32_SPI_DMA_TX_DMA_EN;
599 MXC_SPI_SetTXThreshold(spi, 1);
600
601 ret = spi_max32_tx_dma_load(dev, ctx->tx_buf, len, dfs_shift);
602 if (ret < 0) {
603 goto unlock;
604 }
605
606 spi->ctrl0 |= MXC_F_SPI_CTRL0_EN;
607
608 data->dma_stat = 0;
609 MXC_SPI_StartTransmission(spi);
610 ret = spi_context_wait_for_completion(ctx);
611 } while (!ret && (spi_context_tx_on(ctx) || spi_context_rx_on(ctx)));
612
613 unlock:
614 /* Deassert the CS line if hw control disabled */
615 if (!hw_cs_ctrl) {
616 spi_context_cs_control(ctx, false);
617 }
618
619 spi_context_release(ctx, ret);
620
621 return ret;
622 }
623 #endif /* CONFIG_SPI_MAX32_DMA */
624
625 #ifdef CONFIG_SPI_RTIO
626 static void spi_max32_iodev_complete(const struct device *dev, int status);
627
spi_max32_iodev_start(const struct device * dev)628 static void spi_max32_iodev_start(const struct device *dev)
629 {
630 struct max32_spi_data *data = dev->data;
631 struct spi_rtio *rtio_ctx = data->rtio_ctx;
632 struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
633 int ret = 0;
634
635 switch (sqe->op) {
636 case RTIO_OP_RX:
637 case RTIO_OP_TX:
638 case RTIO_OP_TINY_TX:
639 case RTIO_OP_TXRX:
640 ret = spi_max32_transceive(dev);
641 break;
642 default:
643 spi_max32_iodev_complete(dev, -EINVAL);
644 break;
645 }
646 if (ret != 0) {
647 spi_max32_iodev_complete(dev, -EIO);
648 }
649 }
650
spi_max32_iodev_prepare_start(const struct device * dev)651 static inline void spi_max32_iodev_prepare_start(const struct device *dev)
652 {
653 struct max32_spi_data *data = dev->data;
654 struct spi_rtio *rtio_ctx = data->rtio_ctx;
655 struct spi_dt_spec *spi_dt_spec = rtio_ctx->txn_curr->sqe.iodev->data;
656 struct spi_config *spi_config = &spi_dt_spec->config;
657 struct max32_spi_config *cfg = (struct max32_spi_config *)dev->config;
658 int ret;
659 bool hw_cs_ctrl = true;
660
661 ret = spi_configure(dev, spi_config);
662 __ASSERT(!ret, "%d", ret);
663
664 /* Check if CS GPIO exists */
665 if (spi_cs_is_gpio(spi_config)) {
666 hw_cs_ctrl = false;
667 }
668 MXC_SPI_HWSSControl(cfg->regs, hw_cs_ctrl);
669
670 /* Assert the CS line if HW control disabled */
671 if (!hw_cs_ctrl) {
672 spi_context_cs_control(&data->ctx, true);
673 } else {
674 cfg->regs->ctrl0 = (cfg->regs->ctrl0 & ~MXC_F_SPI_CTRL0_START) |
675 MXC_F_SPI_CTRL0_SS_CTRL;
676 };
677 }
678
spi_max32_iodev_complete(const struct device * dev,int status)679 static void spi_max32_iodev_complete(const struct device *dev, int status)
680 {
681 struct max32_spi_data *data = dev->data;
682 struct spi_rtio *rtio_ctx = data->rtio_ctx;
683
684 if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
685 rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr);
686 spi_max32_iodev_start(dev);
687 } else {
688 struct max32_spi_config *cfg = (struct max32_spi_config *)dev->config;
689 bool hw_cs_ctrl = true;
690
691 if (!hw_cs_ctrl) {
692 spi_context_cs_control(&data->ctx, false);
693 } else {
694 cfg->regs->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL |
695 MXC_F_SPI_CTRL0_EN);
696 cfg->regs->ctrl0 |= MXC_F_SPI_CTRL0_EN;
697 }
698
699 if (spi_rtio_complete(rtio_ctx, status)) {
700 spi_max32_iodev_prepare_start(dev);
701 spi_max32_iodev_start(dev);
702 }
703 }
704 }
705
api_iodev_submit(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)706 static void api_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
707 {
708 struct max32_spi_data *data = dev->data;
709 struct spi_rtio *rtio_ctx = data->rtio_ctx;
710
711 if (spi_rtio_submit(rtio_ctx, iodev_sqe)) {
712 spi_max32_iodev_prepare_start(dev);
713 spi_max32_iodev_start(dev);
714 }
715 }
716 #endif
717
api_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)718 static int api_transceive(const struct device *dev, const struct spi_config *config,
719 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs)
720 {
721 #ifdef CONFIG_SPI_MAX32_DMA
722 const struct max32_spi_config *cfg = dev->config;
723
724 if (cfg->tx_dma.channel != 0xFF && cfg->rx_dma.channel != 0xFF) {
725 return transceive_dma(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
726 }
727 #endif /* CONFIG_SPI_MAX32_DMA */
728 return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
729 }
730
731 #ifdef CONFIG_SPI_ASYNC
api_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)732 static int api_transceive_async(const struct device *dev, const struct spi_config *config,
733 const struct spi_buf_set *tx_bufs,
734 const struct spi_buf_set *rx_bufs, spi_callback_t cb,
735 void *userdata)
736 {
737 return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
738 }
739 #endif /* CONFIG_SPI_ASYNC */
740
741 #ifdef CONFIG_SPI_MAX32_INTERRUPT
spi_max32_callback(mxc_spi_req_t * req,int error)742 static void spi_max32_callback(mxc_spi_req_t *req, int error)
743 {
744 struct max32_spi_data *data = CONTAINER_OF(req, struct max32_spi_data, req);
745 struct spi_context *ctx = &data->ctx;
746 const struct device *dev = data->dev;
747 uint32_t len;
748
749 #ifdef CONFIG_SPI_RTIO
750 struct spi_rtio *rtio_ctx = data->rtio_ctx;
751
752 if (rtio_ctx->txn_head != NULL) {
753 spi_max32_iodev_complete(data->dev, 0);
754 }
755 #endif
756 len = spi_context_max_continuous_chunk(ctx);
757 spi_context_update_tx(ctx, 1, len);
758 spi_context_update_rx(ctx, 1, len);
759 #ifdef CONFIG_SPI_ASYNC
760 if (ctx->asynchronous && ((spi_context_tx_on(ctx) || spi_context_rx_on(ctx)))) {
761 k_work_submit(&data->async_work);
762 } else {
763 if (spi_cs_is_gpio(ctx->config)) {
764 spi_context_cs_control(ctx, false);
765 } else {
766 req->spi->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL |
767 MXC_F_SPI_CTRL0_EN);
768 req->spi->ctrl0 |= MXC_F_SPI_CTRL0_EN;
769 }
770 spi_context_complete(ctx, dev, error == E_NO_ERROR ? 0 : -EIO);
771 }
772 #else
773 spi_context_complete(ctx, dev, error == E_NO_ERROR ? 0 : -EIO);
774 #endif
775 }
776
777 #ifdef CONFIG_SPI_ASYNC
spi_max32_async_work_handler(struct k_work * work)778 void spi_max32_async_work_handler(struct k_work *work)
779 {
780 struct max32_spi_data *data = CONTAINER_OF(work, struct max32_spi_data, async_work);
781 const struct device *dev = data->dev;
782 int ret;
783
784 ret = spi_max32_transceive(dev);
785 if (ret) {
786 spi_context_complete(&data->ctx, dev, -EIO);
787 }
788 }
789 #endif /* CONFIG_SPI_ASYNC */
790
spi_max32_isr(const struct device * dev)791 static void spi_max32_isr(const struct device *dev)
792 {
793 const struct max32_spi_config *cfg = dev->config;
794 struct max32_spi_data *data = dev->data;
795 mxc_spi_req_t *req = &data->req;
796 mxc_spi_regs_t *spi = cfg->regs;
797 uint32_t flags, remain;
798 uint8_t dfs_shift = spi_max32_get_dfs_shift(&data->ctx);
799
800 flags = MXC_SPI_GetFlags(spi);
801 MXC_SPI_ClearFlags(spi);
802
803 remain = (req->txLen << dfs_shift) - req->txCnt;
804 if (flags & ADI_MAX32_SPI_INT_FL_TX_THD) {
805 if (remain) {
806 if (!data->req.txData) {
807 req->txCnt += MXC_SPI_WriteTXFIFO(cfg->regs, data->dummy,
808 MIN(remain, sizeof(data->dummy)));
809 } else {
810 req->txCnt +=
811 MXC_SPI_WriteTXFIFO(spi, &req->txData[req->txCnt], remain);
812 }
813 } else {
814 MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_TX_THD);
815 }
816 }
817
818 remain = (req->rxLen << dfs_shift) - req->rxCnt;
819 if (remain) {
820 req->rxCnt += MXC_SPI_ReadRXFIFO(spi, &req->rxData[req->rxCnt], remain);
821 remain = (req->rxLen << dfs_shift) - req->rxCnt;
822 if (remain >= MXC_SPI_FIFO_DEPTH) {
823 MXC_SPI_SetRXThreshold(spi, 2);
824 } else {
825 MXC_SPI_SetRXThreshold(spi, remain);
826 }
827 } else {
828 MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_RX_THD);
829 }
830
831 if ((req->txLen == req->txCnt) && (req->rxLen == req->rxCnt)) {
832 MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_TX_THD | ADI_MAX32_SPI_INT_EN_RX_THD);
833 if (flags & ADI_MAX32_SPI_INT_FL_MST_DONE) {
834 MXC_SPI_DisableInt(spi, ADI_MAX32_SPI_INT_EN_MST_DONE);
835 spi_max32_callback(req, 0);
836 }
837 }
838 }
839 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
840
api_release(const struct device * dev,const struct spi_config * config)841 static int api_release(const struct device *dev, const struct spi_config *config)
842 {
843 struct max32_spi_data *data = dev->data;
844
845 #ifndef CONFIG_SPI_RTIO
846 if (!spi_context_configured(&data->ctx, config)) {
847 return -EINVAL;
848 }
849 #endif
850 spi_context_unlock_unconditionally(&data->ctx);
851 return 0;
852 }
853
spi_max32_init(const struct device * dev)854 static int spi_max32_init(const struct device *dev)
855 {
856 int ret = 0;
857 const struct max32_spi_config *const cfg = dev->config;
858 mxc_spi_regs_t *regs = cfg->regs;
859 struct max32_spi_data *data = dev->data;
860
861 if (!device_is_ready(cfg->clock)) {
862 return -ENODEV;
863 }
864
865 MXC_SPI_Shutdown(regs);
866
867 ret = clock_control_on(cfg->clock, (clock_control_subsys_t)&cfg->perclk);
868 if (ret) {
869 return ret;
870 }
871
872 ret = pinctrl_apply_state(cfg->pctrl, PINCTRL_STATE_DEFAULT);
873 if (ret) {
874 return ret;
875 }
876
877 ret = spi_context_cs_configure_all(&data->ctx);
878 if (ret < 0) {
879 return ret;
880 }
881
882 data->dev = dev;
883
884 #ifdef CONFIG_SPI_RTIO
885 spi_rtio_init(data->rtio_ctx, dev);
886 #endif
887
888 #ifdef CONFIG_SPI_MAX32_INTERRUPT
889 cfg->irq_config_func(dev);
890 #ifdef CONFIG_SPI_ASYNC
891 k_work_init(&data->async_work, spi_max32_async_work_handler);
892 #endif
893 #endif
894
895 spi_context_unlock_unconditionally(&data->ctx);
896
897 return ret;
898 }
899
900 /* SPI driver APIs structure */
901 static DEVICE_API(spi, spi_max32_api) = {
902 .transceive = api_transceive,
903 #ifdef CONFIG_SPI_ASYNC
904 .transceive_async = api_transceive_async,
905 #endif /* CONFIG_SPI_ASYNC */
906 #ifdef CONFIG_SPI_RTIO
907 .iodev_submit = api_iodev_submit,
908 #endif /* CONFIG_SPI_RTIO */
909 .release = api_release,
910 };
911
912 /* SPI driver registration */
913 #ifdef CONFIG_SPI_MAX32_INTERRUPT
914 #define SPI_MAX32_CONFIG_IRQ_FUNC(n) .irq_config_func = spi_max32_irq_config_func_##n,
915
916 #define SPI_MAX32_IRQ_CONFIG_FUNC(n) \
917 static void spi_max32_irq_config_func_##n(const struct device *dev) \
918 { \
919 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_max32_isr, \
920 DEVICE_DT_INST_GET(n), 0); \
921 irq_enable(DT_INST_IRQN(n)); \
922 }
923 #else
924 #define SPI_MAX32_CONFIG_IRQ_FUNC(n)
925 #define SPI_MAX32_IRQ_CONFIG_FUNC(n)
926 #endif /* CONFIG_SPI_MAX32_INTERRUPT */
927
928 #if CONFIG_SPI_MAX32_DMA
929 #define MAX32_DT_INST_DMA_CTLR(n, name) \
930 COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \
931 (DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, name))), (NULL))
932
933 #define MAX32_DT_INST_DMA_CELL(n, name, cell) \
934 COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), (DT_INST_DMAS_CELL_BY_NAME(n, name, cell)), \
935 (0xff))
936
937 #define MAX32_SPI_DMA_INIT(n) \
938 .tx_dma.dev = MAX32_DT_INST_DMA_CTLR(n, tx), \
939 .tx_dma.channel = MAX32_DT_INST_DMA_CELL(n, tx, channel), \
940 .tx_dma.slot = MAX32_DT_INST_DMA_CELL(n, tx, slot), \
941 .rx_dma.dev = MAX32_DT_INST_DMA_CTLR(n, rx), \
942 .rx_dma.channel = MAX32_DT_INST_DMA_CELL(n, rx, channel), \
943 .rx_dma.slot = MAX32_DT_INST_DMA_CELL(n, rx, slot),
944 #else
945 #define MAX32_SPI_DMA_INIT(n)
946 #endif
947
948 #define DEFINE_SPI_MAX32_RTIO(_num) SPI_RTIO_DEFINE(max32_spi_rtio_##_num, \
949 CONFIG_SPI_MAX32_RTIO_SQ_SIZE, \
950 CONFIG_SPI_MAX32_RTIO_CQ_SIZE)
951
952 #define DEFINE_SPI_MAX32(_num) \
953 PINCTRL_DT_INST_DEFINE(_num); \
954 SPI_MAX32_IRQ_CONFIG_FUNC(_num) \
955 COND_CODE_1(CONFIG_SPI_RTIO, (DEFINE_SPI_MAX32_RTIO(_num)), ()); \
956 static const struct max32_spi_config max32_spi_config_##_num = { \
957 .regs = (mxc_spi_regs_t *)DT_INST_REG_ADDR(_num), \
958 .pctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(_num), \
959 .clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(_num)), \
960 .perclk.bus = DT_INST_CLOCKS_CELL(_num, offset), \
961 .perclk.bit = DT_INST_CLOCKS_CELL(_num, bit), \
962 MAX32_SPI_DMA_INIT(_num) SPI_MAX32_CONFIG_IRQ_FUNC(_num)}; \
963 static struct max32_spi_data max32_spi_data_##_num = { \
964 SPI_CONTEXT_INIT_LOCK(max32_spi_data_##_num, ctx), \
965 SPI_CONTEXT_INIT_SYNC(max32_spi_data_##_num, ctx), \
966 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(_num), ctx) \
967 IF_ENABLED(CONFIG_SPI_RTIO, (.rtio_ctx = &max32_spi_rtio_##_num))}; \
968 SPI_DEVICE_DT_INST_DEFINE(_num, spi_max32_init, NULL, &max32_spi_data_##_num, \
969 &max32_spi_config_##_num, PRE_KERNEL_2, CONFIG_SPI_INIT_PRIORITY, \
970 &spi_max32_api);
971
972 DT_INST_FOREACH_STATUS_OKAY(DEFINE_SPI_MAX32)
973