1 /*
2 * Copyright (c) 2017 Google LLC.
3 * Copyright (c) 2018 qianfan Zhao.
4 * Copyright (c) 2023 Gerson Fernando Budke.
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define DT_DRV_COMPAT atmel_sam_spi
10
11 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(spi_sam);
14
15 #include "spi_context.h"
16 #include <errno.h>
17 #include <zephyr/spinlock.h>
18 #include <zephyr/device.h>
19 #include <zephyr/drivers/spi.h>
20 #include <zephyr/drivers/spi/rtio.h>
21 #include <zephyr/drivers/dma.h>
22 #include <zephyr/drivers/pinctrl.h>
23 #include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
24 #include <zephyr/rtio/rtio.h>
25 #include <zephyr/sys/__assert.h>
26 #include <zephyr/sys/util.h>
27 #include <soc.h>
28
29 #define SAM_SPI_CHIP_SELECT_COUNT 4
30
31 /* Number of bytes in transfer before using DMA if available */
32 #define SAM_SPI_DMA_THRESHOLD 32
33
34 /* Device constant configuration parameters */
35 struct spi_sam_config {
36 Spi *regs;
37 const struct atmel_sam_pmc_config clock_cfg;
38 const struct pinctrl_dev_config *pcfg;
39 bool loopback;
40
41 #ifdef CONFIG_SPI_SAM_DMA
42 const struct device *dma_dev;
43 const uint32_t dma_tx_channel;
44 const uint32_t dma_tx_perid;
45 const uint32_t dma_rx_channel;
46 const uint32_t dma_rx_perid;
47 #endif /* CONFIG_SPI_SAM_DMA */
48 };
49
50 /* Device run time data */
51 struct spi_sam_data {
52 struct spi_context ctx;
53 struct k_spinlock lock;
54
55 #ifdef CONFIG_SPI_RTIO
56 struct spi_rtio *rtio_ctx;
57 #endif
58
59 #ifdef CONFIG_SPI_SAM_DMA
60 struct k_sem dma_sem;
61 #endif /* CONFIG_SPI_SAM_DMA */
62 };
63
spi_spin_lock(const struct device * dev)64 static inline k_spinlock_key_t spi_spin_lock(const struct device *dev)
65 {
66 struct spi_sam_data *data = dev->data;
67
68 return k_spin_lock(&data->lock);
69 }
70
spi_spin_unlock(const struct device * dev,k_spinlock_key_t key)71 static inline void spi_spin_unlock(const struct device *dev, k_spinlock_key_t key)
72 {
73 struct spi_sam_data *data = dev->data;
74
75 k_spin_unlock(&data->lock, key);
76 }
77
spi_slave_to_mr_pcs(int slave)78 static int spi_slave_to_mr_pcs(int slave)
79 {
80 int pcs[SAM_SPI_CHIP_SELECT_COUNT] = {0x0, 0x1, 0x3, 0x7};
81
82 /* SPI worked in fixed peripheral mode(SPI_MR.PS = 0) and disabled chip
83 * select decode(SPI_MR.PCSDEC = 0), based on Atmel | SMART ARM-based
84 * Flash MCU DATASHEET 40.8.2 SPI Mode Register:
85 * PCS = xxx0 NPCS[3:0] = 1110
86 * PCS = xx01 NPCS[3:0] = 1101
87 * PCS = x011 NPCS[3:0] = 1011
88 * PCS = 0111 NPCS[3:0] = 0111
89 */
90
91 return pcs[slave];
92 }
93
spi_sam_configure(const struct device * dev,const struct spi_config * config)94 static int spi_sam_configure(const struct device *dev,
95 const struct spi_config *config)
96 {
97 const struct spi_sam_config *cfg = dev->config;
98 struct spi_sam_data *data = dev->data;
99 Spi *regs = cfg->regs;
100 uint32_t spi_mr = 0U, spi_csr = 0U;
101 uint16_t spi_csr_idx = spi_cs_is_gpio(config) ? 0 : config->slave;
102 int div;
103
104 if (spi_context_configured(&data->ctx, config)) {
105 return 0;
106 }
107
108 if (config->operation & SPI_HALF_DUPLEX) {
109 LOG_ERR("Half-duplex not supported");
110 return -ENOTSUP;
111 }
112
113 if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) {
114 /* Slave mode is not implemented. */
115 return -ENOTSUP;
116 }
117
118 if (config->slave > (SAM_SPI_CHIP_SELECT_COUNT - 1)) {
119 LOG_ERR("Slave %d is greater than %d",
120 config->slave, SAM_SPI_CHIP_SELECT_COUNT - 1);
121 return -EINVAL;
122 }
123
124 /* Set master mode, disable mode fault detection, set fixed peripheral
125 * select mode.
126 */
127 spi_mr |= (SPI_MR_MSTR | SPI_MR_MODFDIS);
128 spi_mr |= SPI_MR_PCS(spi_slave_to_mr_pcs(spi_csr_idx));
129
130 if (cfg->loopback) {
131 spi_mr |= SPI_MR_LLB;
132 }
133
134 if ((config->operation & SPI_MODE_CPOL) != 0U) {
135 spi_csr |= SPI_CSR_CPOL;
136 }
137
138 if ((config->operation & SPI_MODE_CPHA) == 0U) {
139 spi_csr |= SPI_CSR_NCPHA;
140 }
141
142 if (SPI_WORD_SIZE_GET(config->operation) != 8) {
143 return -ENOTSUP;
144 } else {
145 spi_csr |= SPI_CSR_BITS(SPI_CSR_BITS_8_BIT);
146 }
147
148 /* Use the requested or next highest possible frequency */
149 div = SOC_ATMEL_SAM_MCK_FREQ_HZ / config->frequency;
150 div = CLAMP(div, 1, UINT8_MAX);
151 spi_csr |= SPI_CSR_SCBR(div);
152
153 regs->SPI_CR = SPI_CR_SPIDIS; /* Disable SPI */
154 regs->SPI_MR = spi_mr;
155 regs->SPI_CSR[spi_csr_idx] = spi_csr;
156 regs->SPI_CR = SPI_CR_SPIEN; /* Enable SPI */
157
158 data->ctx.config = config;
159
160 return 0;
161 }
162
163 /* Finish any ongoing writes and drop any remaining read data */
spi_sam_finish(Spi * regs)164 static void spi_sam_finish(Spi *regs)
165 {
166 while ((regs->SPI_SR & SPI_SR_TXEMPTY) == 0) {
167 }
168
169 while (regs->SPI_SR & SPI_SR_RDRF) {
170 (void)regs->SPI_RDR;
171 }
172 }
173
174 /* Fast path that transmits a buf */
spi_sam_fast_tx(Spi * regs,const uint8_t * tx_buf,const uint32_t tx_buf_len)175 static void spi_sam_fast_tx(Spi *regs, const uint8_t *tx_buf, const uint32_t tx_buf_len)
176 {
177 const uint8_t *p = tx_buf;
178 const uint8_t *pend = (uint8_t *)tx_buf + tx_buf_len;
179 uint8_t ch;
180
181 while (p != pend) {
182 ch = *p++;
183
184 while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
185 }
186
187 regs->SPI_TDR = SPI_TDR_TD(ch);
188 }
189 }
190
191 /* Fast path that reads into a buf */
spi_sam_fast_rx(Spi * regs,uint8_t * rx_buf,const uint32_t rx_buf_len)192 static void spi_sam_fast_rx(Spi *regs, uint8_t *rx_buf, const uint32_t rx_buf_len)
193 {
194 uint8_t *rx = rx_buf;
195 int len = rx_buf_len;
196
197 if (len <= 0) {
198 return;
199 }
200
201 /* Write the first byte */
202 regs->SPI_TDR = SPI_TDR_TD(0);
203 len--;
204
205 while (len) {
206 while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
207 }
208
209 /* Read byte N+0 from the receive register */
210 while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
211 }
212
213 *rx = (uint8_t)regs->SPI_RDR;
214 rx++;
215
216 /* Load byte N+1 into the transmit register */
217 regs->SPI_TDR = SPI_TDR_TD(0);
218 len--;
219 }
220
221 /* Read the final incoming byte */
222 while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
223 }
224
225 *rx = (uint8_t)regs->SPI_RDR;
226 }
227
228 /* Fast path that writes and reads bufs of the same length */
spi_sam_fast_txrx(Spi * regs,const uint8_t * tx_buf,const uint8_t * rx_buf,const uint32_t len)229 static void spi_sam_fast_txrx(Spi *regs,
230 const uint8_t *tx_buf,
231 const uint8_t *rx_buf,
232 const uint32_t len)
233 {
234 const uint8_t *tx = tx_buf;
235 const uint8_t *txend = tx_buf + len;
236 uint8_t *rx = (uint8_t *)rx_buf;
237
238 if (len == 0) {
239 return;
240 }
241
242 /*
243 * The code below interleaves the transmit writes with the
244 * receive reads to keep the bus fully utilised. The code is
245 * equivalent to:
246 *
247 * Transmit byte 0
248 * Loop:
249 * - Transmit byte n+1
250 * - Receive byte n
251 * Receive the final byte
252 */
253
254 /* Write the first byte */
255 regs->SPI_TDR = SPI_TDR_TD(*tx++);
256
257 while (tx != txend) {
258 while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
259 }
260
261 /* Load byte N+1 into the transmit register. TX is
262 * single buffered and we have at most one byte in
263 * flight so skip the DRE check.
264 */
265 regs->SPI_TDR = SPI_TDR_TD(*tx++);
266
267 /* Read byte N+0 from the receive register */
268 while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
269 }
270
271 *rx++ = (uint8_t)regs->SPI_RDR;
272 }
273
274 /* Read the final incoming byte */
275 while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
276 }
277
278 *rx = (uint8_t)regs->SPI_RDR;
279
280 }
281
282
283 #ifdef CONFIG_SPI_SAM_DMA
284
285 static __aligned(4) uint32_t tx_dummy;
286 static __aligned(4) uint32_t rx_dummy;
287
288 #ifdef CONFIG_SPI_RTIO
289 static void spi_sam_iodev_complete(const struct device *dev, int status);
290 #endif
291
dma_callback(const struct device * dma_dev,void * user_data,uint32_t channel,int status)292 static void dma_callback(const struct device *dma_dev, void *user_data,
293 uint32_t channel, int status)
294 {
295 ARG_UNUSED(dma_dev);
296 ARG_UNUSED(channel);
297 ARG_UNUSED(status);
298
299 const struct device *dev = user_data;
300 struct spi_sam_data *drv_data = dev->data;
301
302 #ifdef CONFIG_SPI_RTIO
303 struct spi_rtio *rtio_ctx = drv_data->rtio_ctx;
304
305 if (rtio_ctx->txn_head != NULL) {
306 spi_sam_iodev_complete(dev, status);
307 return;
308 }
309 #endif
310 k_sem_give(&drv_data->dma_sem);
311 }
312
313
314 /* DMA transceive path */
spi_sam_dma_txrx(const struct device * dev,Spi * regs,const uint8_t * tx_buf,const uint8_t * rx_buf,const uint32_t len)315 static int spi_sam_dma_txrx(const struct device *dev,
316 Spi *regs,
317 const uint8_t *tx_buf,
318 const uint8_t *rx_buf,
319 const uint32_t len)
320 {
321 const struct spi_sam_config *drv_cfg = dev->config;
322 struct spi_sam_data *drv_data = dev->data;
323 #ifdef CONFIG_SPI_RTIO
324 struct spi_rtio *rtio_ctx = drv_data->rtio_ctx;
325 bool blocking = rtio_ctx->txn_head == NULL;
326 #else
327 bool blocking = true;
328 #endif
329
330 int res = 0;
331
332 __ASSERT_NO_MSG(rx_buf != NULL || tx_buf != NULL);
333
334 struct dma_config rx_dma_cfg = {
335 .source_data_size = 1,
336 .dest_data_size = 1,
337 .block_count = 1,
338 .dma_slot = drv_cfg->dma_rx_perid,
339 .channel_direction = PERIPHERAL_TO_MEMORY,
340 .source_burst_length = 1,
341 .dest_burst_length = 1,
342 .complete_callback_en = true,
343 .dma_callback = NULL,
344 .user_data = (void *)dev,
345 };
346
347 uint32_t dest_address, dest_addr_adjust;
348
349 if (rx_buf != NULL) {
350 dest_address = (uint32_t)rx_buf;
351 dest_addr_adjust = DMA_ADDR_ADJ_INCREMENT;
352 } else {
353 dest_address = (uint32_t)&rx_dummy;
354 dest_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE;
355 }
356
357 struct dma_block_config rx_block_cfg = {
358 .dest_addr_adj = dest_addr_adjust,
359 .block_size = len,
360 .source_address = (uint32_t)®s->SPI_RDR,
361 .dest_address = dest_address
362 };
363
364 rx_dma_cfg.head_block = &rx_block_cfg;
365
366 struct dma_config tx_dma_cfg = {
367 .source_data_size = 1,
368 .dest_data_size = 1,
369 .block_count = 1,
370 .dma_slot = drv_cfg->dma_tx_perid,
371 .channel_direction = MEMORY_TO_PERIPHERAL,
372 .source_burst_length = 1,
373 .dest_burst_length = 1,
374 .complete_callback_en = true,
375 .dma_callback = dma_callback,
376 .user_data = (void *)dev,
377 };
378
379 uint32_t source_address, source_addr_adjust;
380
381 if (tx_buf != NULL) {
382 source_address = (uint32_t)tx_buf;
383 source_addr_adjust = DMA_ADDR_ADJ_INCREMENT;
384 } else {
385 source_address = (uint32_t)&tx_dummy;
386 source_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE;
387 }
388
389 struct dma_block_config tx_block_cfg = {
390 .source_addr_adj = source_addr_adjust,
391 .block_size = len,
392 .source_address = source_address,
393 .dest_address = (uint32_t)®s->SPI_TDR
394 };
395
396 tx_dma_cfg.head_block = &tx_block_cfg;
397
398 res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_rx_channel, &rx_dma_cfg);
399 if (res != 0) {
400 LOG_ERR("failed to configure SPI DMA RX");
401 goto out;
402 }
403
404 res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_tx_channel, &tx_dma_cfg);
405 if (res != 0) {
406 LOG_ERR("failed to configure SPI DMA TX");
407 goto out;
408 }
409
410 /* Clocking begins on tx, so start rx first */
411 res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_rx_channel);
412 if (res != 0) {
413 LOG_ERR("failed to start SPI DMA RX");
414 goto out;
415 }
416
417 res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_tx_channel);
418 if (res != 0) {
419 LOG_ERR("failed to start SPI DMA TX");
420 dma_stop(drv_cfg->dma_dev, drv_cfg->dma_rx_channel);
421 }
422
423 /* Move up a level or wrap in branch when blocking */
424 if (blocking) {
425 k_sem_take(&drv_data->dma_sem, K_FOREVER);
426 spi_sam_finish(regs);
427 } else {
428 res = -EWOULDBLOCK;
429 }
430
431 out:
432 return res;
433 }
434
435 #endif /* CONFIG_SPI_SAM_DMA */
436
437
spi_sam_rx(const struct device * dev,Spi * regs,uint8_t * rx_buf,uint32_t rx_buf_len)438 static inline int spi_sam_rx(const struct device *dev,
439 Spi *regs,
440 uint8_t *rx_buf,
441 uint32_t rx_buf_len)
442 {
443 k_spinlock_key_t key;
444
445 #ifdef CONFIG_SPI_SAM_DMA
446 const struct spi_sam_config *cfg = dev->config;
447
448 if ((rx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) &&
449 !IS_ENABLED(CONFIG_SPI_RTIO)) {
450 key = spi_spin_lock(dev);
451 spi_sam_fast_rx(regs, rx_buf, rx_buf_len);
452 } else {
453 /* RTIO Transfers should always fall here */
454 return spi_sam_dma_txrx(dev, regs, NULL, rx_buf, rx_buf_len);
455 }
456 #else
457 key = spi_spin_lock(dev);
458 spi_sam_fast_rx(regs, rx_buf, rx_buf_len);
459 #endif
460 spi_sam_finish(regs);
461
462 spi_spin_unlock(dev, key);
463 return 0;
464 }
465
spi_sam_tx(const struct device * dev,Spi * regs,const uint8_t * tx_buf,uint32_t tx_buf_len)466 static inline int spi_sam_tx(const struct device *dev,
467 Spi *regs,
468 const uint8_t *tx_buf,
469 uint32_t tx_buf_len)
470 {
471 k_spinlock_key_t key;
472
473 #ifdef CONFIG_SPI_SAM_DMA
474 const struct spi_sam_config *cfg = dev->config;
475
476 if ((tx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) &&
477 !IS_ENABLED(CONFIG_SPI_RTIO)) {
478 key = spi_spin_lock(dev);
479 spi_sam_fast_tx(regs, tx_buf, tx_buf_len);
480 } else {
481 /* RTIO Transfers should always fall here */
482 return spi_sam_dma_txrx(dev, regs, tx_buf, NULL, tx_buf_len);
483 }
484 #else
485 key = spi_spin_lock(dev);
486 spi_sam_fast_tx(regs, tx_buf, tx_buf_len);
487 #endif
488 spi_sam_finish(regs);
489 spi_spin_unlock(dev, key);
490 return 0;
491 }
492
493
spi_sam_txrx(const struct device * dev,Spi * regs,const uint8_t * tx_buf,const uint8_t * rx_buf,uint32_t buf_len)494 static inline int spi_sam_txrx(const struct device *dev,
495 Spi *regs,
496 const uint8_t *tx_buf,
497 const uint8_t *rx_buf,
498 uint32_t buf_len)
499 {
500 k_spinlock_key_t key;
501
502 #ifdef CONFIG_SPI_SAM_DMA
503 const struct spi_sam_config *cfg = dev->config;
504
505 if ((buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) &&
506 !IS_ENABLED(CONFIG_SPI_RTIO)) {
507 key = spi_spin_lock(dev);
508 spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len);
509 } else {
510 /* RTIO Transfers should always fall here */
511 return spi_sam_dma_txrx(dev, regs, tx_buf, rx_buf, buf_len);
512 }
513 #else
514 key = spi_spin_lock(dev);
515 spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len);
516 #endif
517 spi_sam_finish(regs);
518 spi_spin_unlock(dev, key);
519 return 0;
520 }
521
522 #ifndef CONFIG_SPI_RTIO
523
524 /* Fast path where every overlapping tx and rx buffer is the same length */
spi_sam_fast_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)525 static void spi_sam_fast_transceive(const struct device *dev,
526 const struct spi_config *config,
527 const struct spi_buf_set *tx_bufs,
528 const struct spi_buf_set *rx_bufs)
529 {
530 const struct spi_sam_config *cfg = dev->config;
531 size_t tx_count = 0;
532 size_t rx_count = 0;
533 Spi *regs = cfg->regs;
534 const struct spi_buf *tx = NULL;
535 const struct spi_buf *rx = NULL;
536
537 if (tx_bufs) {
538 tx = tx_bufs->buffers;
539 tx_count = tx_bufs->count;
540 }
541
542 if (rx_bufs) {
543 rx = rx_bufs->buffers;
544 rx_count = rx_bufs->count;
545 }
546
547 while (tx_count != 0 && rx_count != 0) {
548 if (tx->buf == NULL) {
549 spi_sam_rx(dev, regs, rx->buf, rx->len);
550 } else if (rx->buf == NULL) {
551 spi_sam_tx(dev, regs, tx->buf, tx->len);
552 } else if (rx->len == tx->len) {
553 spi_sam_txrx(dev, regs, tx->buf, rx->buf, rx->len);
554 } else {
555 __ASSERT_NO_MSG("Invalid fast transceive configuration");
556 }
557
558 tx++;
559 tx_count--;
560 rx++;
561 rx_count--;
562 }
563
564 for (; tx_count != 0; tx_count--) {
565 spi_sam_tx(dev, regs, tx->buf, tx->len);
566 tx++;
567 }
568
569 for (; rx_count != 0; rx_count--) {
570 spi_sam_rx(dev, regs, rx->buf, rx->len);
571 rx++;
572 }
573 }
574
spi_sam_transfer_ongoing(struct spi_sam_data * data)575 static bool spi_sam_transfer_ongoing(struct spi_sam_data *data)
576 {
577 return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
578 }
579
spi_sam_shift_master(Spi * regs,struct spi_sam_data * data)580 static void spi_sam_shift_master(Spi *regs, struct spi_sam_data *data)
581 {
582 uint8_t tx;
583 uint8_t rx;
584
585 if (spi_context_tx_buf_on(&data->ctx)) {
586 tx = *(uint8_t *)(data->ctx.tx_buf);
587 } else {
588 tx = 0U;
589 }
590
591 while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
592 }
593
594 regs->SPI_TDR = SPI_TDR_TD(tx);
595 spi_context_update_tx(&data->ctx, 1, 1);
596
597 while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
598 }
599
600 rx = (uint8_t)regs->SPI_RDR;
601
602 if (spi_context_rx_buf_on(&data->ctx)) {
603 *data->ctx.rx_buf = rx;
604 }
605 spi_context_update_rx(&data->ctx, 1, 1);
606 }
607
608 /* Returns true if the request is suitable for the fast
609 * path. Specifically, the bufs are a sequence of:
610 *
611 * - Zero or more RX and TX buf pairs where each is the same length.
612 * - Zero or more trailing RX only bufs
613 * - Zero or more trailing TX only bufs
614 */
spi_sam_is_regular(const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)615 static bool spi_sam_is_regular(const struct spi_buf_set *tx_bufs,
616 const struct spi_buf_set *rx_bufs)
617 {
618 const struct spi_buf *tx = NULL;
619 const struct spi_buf *rx = NULL;
620 size_t tx_count = 0;
621 size_t rx_count = 0;
622
623 if (tx_bufs) {
624 tx = tx_bufs->buffers;
625 tx_count = tx_bufs->count;
626 }
627
628 if (rx_bufs) {
629 rx = rx_bufs->buffers;
630 rx_count = rx_bufs->count;
631 }
632
633 if (!tx || !rx) {
634 return true;
635 }
636
637 while (tx_count != 0 && rx_count != 0) {
638 if (tx->len != rx->len) {
639 return false;
640 }
641
642 tx++;
643 tx_count--;
644 rx++;
645 rx_count--;
646 }
647
648 return true;
649 }
650
651 #else
652
653 static void spi_sam_iodev_complete(const struct device *dev, int status);
654
spi_sam_iodev_start(const struct device * dev)655 static void spi_sam_iodev_start(const struct device *dev)
656 {
657 const struct spi_sam_config *cfg = dev->config;
658 struct spi_sam_data *data = dev->data;
659 struct spi_rtio *rtio_ctx = data->rtio_ctx;
660 struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
661 int ret = 0;
662
663 switch (sqe->op) {
664 case RTIO_OP_RX:
665 ret = spi_sam_rx(dev, cfg->regs, sqe->rx.buf, sqe->rx.buf_len);
666 break;
667 case RTIO_OP_TX:
668 ret = spi_sam_tx(dev, cfg->regs, sqe->tx.buf, sqe->tx.buf_len);
669 break;
670 case RTIO_OP_TINY_TX:
671 ret = spi_sam_tx(dev, cfg->regs, sqe->tiny_tx.buf, sqe->tiny_tx.buf_len);
672 break;
673 case RTIO_OP_TXRX:
674 ret = spi_sam_txrx(dev, cfg->regs, sqe->txrx.tx_buf, sqe->txrx.rx_buf,
675 sqe->txrx.buf_len);
676 break;
677 default:
678 LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
679 spi_sam_iodev_complete(dev, -EINVAL);
680 return;
681 }
682
683 /** Completion of the RTIO transfer should come through the DMA
684 * callback when successful, otherwise complete it here as an error.
685 */
686 if (ret != 0 && ret != -EWOULDBLOCK) {
687 spi_sam_iodev_complete(dev, ret);
688 }
689 }
690
spi_sam_iodev_prepare_start(const struct device * dev)691 static inline void spi_sam_iodev_prepare_start(const struct device *dev)
692 {
693 struct spi_sam_data *data = dev->data;
694 struct spi_rtio *rtio_ctx = data->rtio_ctx;
695 struct spi_dt_spec *spi_dt_spec = rtio_ctx->txn_curr->sqe.iodev->data;
696 struct spi_config *spi_config = &spi_dt_spec->config;
697 int err;
698
699 err = spi_sam_configure(dev, spi_config);
700 __ASSERT(!err, "%d", err);
701
702 spi_context_cs_control(&data->ctx, true);
703 }
704
spi_sam_iodev_complete(const struct device * dev,int status)705 static void spi_sam_iodev_complete(const struct device *dev, int status)
706 {
707 struct spi_sam_data *data = dev->data;
708 struct spi_rtio *rtio_ctx = data->rtio_ctx;
709
710 if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
711 rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr);
712 spi_sam_iodev_start(dev);
713 } else {
714 /** De-assert CS-line to space from next transaction */
715 spi_context_cs_control(&data->ctx, false);
716
717 if (spi_rtio_complete(rtio_ctx, status)) {
718 spi_sam_iodev_prepare_start(dev);
719 spi_sam_iodev_start(dev);
720 }
721 }
722 }
723
spi_sam_iodev_submit(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)724 static void spi_sam_iodev_submit(const struct device *dev,
725 struct rtio_iodev_sqe *iodev_sqe)
726 {
727 struct spi_sam_data *data = dev->data;
728 struct spi_rtio *rtio_ctx = data->rtio_ctx;
729
730 if (spi_rtio_submit(rtio_ctx, iodev_sqe)) {
731 spi_sam_iodev_prepare_start(dev);
732 spi_sam_iodev_start(dev);
733 }
734 }
735 #endif
736
spi_sam_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)737 static int spi_sam_transceive(const struct device *dev,
738 const struct spi_config *config,
739 const struct spi_buf_set *tx_bufs,
740 const struct spi_buf_set *rx_bufs)
741 {
742 struct spi_sam_data *data = dev->data;
743 int err = 0;
744
745 spi_context_lock(&data->ctx, false, NULL, NULL, config);
746
747 #if CONFIG_SPI_RTIO
748 struct spi_rtio *rtio_ctx = data->rtio_ctx;
749
750 err = spi_rtio_transceive(rtio_ctx, config, tx_bufs, rx_bufs);
751 #else
752 const struct spi_sam_config *cfg = dev->config;
753
754 err = spi_sam_configure(dev, config);
755 if (err != 0) {
756 goto done;
757 }
758
759 spi_context_cs_control(&data->ctx, true);
760
761 if (spi_sam_is_regular(tx_bufs, rx_bufs)) {
762 spi_sam_fast_transceive(dev, config, tx_bufs, rx_bufs);
763 } else {
764 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
765
766 do {
767 spi_sam_shift_master(cfg->regs, data);
768 } while (spi_sam_transfer_ongoing(data));
769 }
770
771 spi_context_cs_control(&data->ctx, false);
772 done:
773 #endif
774 spi_context_release(&data->ctx, err);
775 return err;
776 }
777
spi_sam_transceive_sync(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)778 static int spi_sam_transceive_sync(const struct device *dev,
779 const struct spi_config *config,
780 const struct spi_buf_set *tx_bufs,
781 const struct spi_buf_set *rx_bufs)
782 {
783 return spi_sam_transceive(dev, config, tx_bufs, rx_bufs);
784 }
785
786 #ifdef CONFIG_SPI_ASYNC
spi_sam_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)787 static int spi_sam_transceive_async(const struct device *dev,
788 const struct spi_config *config,
789 const struct spi_buf_set *tx_bufs,
790 const struct spi_buf_set *rx_bufs,
791 spi_callback_t cb,
792 void *userdata)
793 {
794 /* TODO: implement async transceive */
795 return -ENOTSUP;
796 }
797 #endif /* CONFIG_SPI_ASYNC */
798
spi_sam_release(const struct device * dev,const struct spi_config * config)799 static int spi_sam_release(const struct device *dev,
800 const struct spi_config *config)
801 {
802 struct spi_sam_data *data = dev->data;
803
804 spi_context_unlock_unconditionally(&data->ctx);
805
806 return 0;
807 }
808
spi_sam_init(const struct device * dev)809 static int spi_sam_init(const struct device *dev)
810 {
811 int err;
812 const struct spi_sam_config *cfg = dev->config;
813 struct spi_sam_data *data = dev->data;
814
815 /* Enable SPI clock in PMC */
816 (void)clock_control_on(SAM_DT_PMC_CONTROLLER,
817 (clock_control_subsys_t)&cfg->clock_cfg);
818
819 err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
820 if (err < 0) {
821 return err;
822 }
823
824 err = spi_context_cs_configure_all(&data->ctx);
825 if (err < 0) {
826 return err;
827 }
828
829 #ifdef CONFIG_SPI_SAM_DMA
830 k_sem_init(&data->dma_sem, 0, K_SEM_MAX_LIMIT);
831 #endif
832
833 #ifdef CONFIG_SPI_RTIO
834 spi_rtio_init(data->rtio_ctx, dev);
835 #endif
836
837 spi_context_unlock_unconditionally(&data->ctx);
838
839 /* The device will be configured and enabled when transceive
840 * is called.
841 */
842
843 return 0;
844 }
845
846 static DEVICE_API(spi, spi_sam_driver_api) = {
847 .transceive = spi_sam_transceive_sync,
848 #ifdef CONFIG_SPI_ASYNC
849 .transceive_async = spi_sam_transceive_async,
850 #endif
851 #ifdef CONFIG_SPI_RTIO
852 .iodev_submit = spi_sam_iodev_submit,
853 #endif
854 .release = spi_sam_release,
855 };
856
857 #define SPI_DMA_INIT(n) \
858 .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \
859 .dma_tx_channel = DT_INST_DMAS_CELL_BY_NAME(n, tx, channel), \
860 .dma_tx_perid = DT_INST_DMAS_CELL_BY_NAME(n, tx, perid), \
861 .dma_rx_channel = DT_INST_DMAS_CELL_BY_NAME(n, rx, channel), \
862 .dma_rx_perid = DT_INST_DMAS_CELL_BY_NAME(n, rx, perid),
863
864 #ifdef CONFIG_SPI_SAM_DMA
865 #define SPI_SAM_USE_DMA(n) DT_INST_DMAS_HAS_NAME(n, tx)
866 #else
867 #define SPI_SAM_USE_DMA(n) 0
868 #endif
869
870 #define SPI_SAM_DEFINE_CONFIG(n) \
871 static const struct spi_sam_config spi_sam_config_##n = { \
872 .regs = (Spi *)DT_INST_REG_ADDR(n), \
873 .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(n), \
874 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
875 .loopback = DT_INST_PROP(n, loopback), \
876 COND_CODE_1(SPI_SAM_USE_DMA(n), (SPI_DMA_INIT(n)), ()) \
877 }
878
879 #define SPI_SAM_RTIO_DEFINE(n) SPI_RTIO_DEFINE(spi_sam_rtio_##n, \
880 CONFIG_SPI_SAM_RTIO_SQ_SIZE, \
881 CONFIG_SPI_SAM_RTIO_SQ_SIZE)
882
883 #define SPI_SAM_DEVICE_INIT(n) \
884 PINCTRL_DT_INST_DEFINE(n); \
885 SPI_SAM_DEFINE_CONFIG(n); \
886 COND_CODE_1(CONFIG_SPI_RTIO, (SPI_SAM_RTIO_DEFINE(n)), ()); \
887 static struct spi_sam_data spi_sam_dev_data_##n = { \
888 SPI_CONTEXT_INIT_LOCK(spi_sam_dev_data_##n, ctx), \
889 SPI_CONTEXT_INIT_SYNC(spi_sam_dev_data_##n, ctx), \
890 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
891 IF_ENABLED(CONFIG_SPI_RTIO, (.rtio_ctx = &spi_sam_rtio_##n)) \
892 }; \
893 SPI_DEVICE_DT_INST_DEFINE(n, &spi_sam_init, NULL, \
894 &spi_sam_dev_data_##n, \
895 &spi_sam_config_##n, POST_KERNEL, \
896 CONFIG_SPI_INIT_PRIORITY, &spi_sam_driver_api);
897
898 DT_INST_FOREACH_STATUS_OKAY(SPI_SAM_DEVICE_INIT)
899