1 /*
2 * Copyright (c) 2017 Google LLC.
3 * Copyright (c) 2024 Gerson Fernando Budke <nandojve@gmail.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7 #define DT_DRV_COMPAT atmel_sam0_spi
8
9 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(spi_sam0);
12
13 /* clang-format off */
14
15 #include "spi_context.h"
16 #include <errno.h>
17 #include <zephyr/device.h>
18 #include <zephyr/drivers/spi.h>
19 #include <zephyr/drivers/spi/rtio.h>
20 #include <zephyr/drivers/dma.h>
21 #include <zephyr/drivers/pinctrl.h>
22 #include <soc.h>
23
24 #ifndef SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val
25 #define SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val (0x3)
26 #endif
27
28 /* Device constant configuration parameters */
29 struct spi_sam0_config {
30 SercomSpi *regs;
31 uint32_t pads;
32 const struct pinctrl_dev_config *pcfg;
33
34 volatile uint32_t *mclk;
35 uint32_t mclk_mask;
36 uint32_t gclk_gen;
37 uint16_t gclk_id;
38
39 #ifdef CONFIG_SPI_ASYNC
40 const struct device *dma_dev;
41 uint8_t tx_dma_request;
42 uint8_t tx_dma_channel;
43 uint8_t rx_dma_request;
44 uint8_t rx_dma_channel;
45 #endif
46 };
47
48 /* Device run time data */
49 struct spi_sam0_data {
50 struct spi_context ctx;
51 #ifdef CONFIG_SPI_ASYNC
52 const struct device *dev;
53 uint32_t dma_segment_len;
54 #endif
55 };
56
wait_synchronization(SercomSpi * regs)57 static void wait_synchronization(SercomSpi *regs)
58 {
59 #if defined(SERCOM_SPI_SYNCBUSY_MASK)
60 /* SYNCBUSY is a register */
61 while ((regs->SYNCBUSY.reg & SERCOM_SPI_SYNCBUSY_MASK) != 0) {
62 }
63 #elif defined(SERCOM_SPI_STATUS_SYNCBUSY)
64 /* SYNCBUSY is a bit */
65 while ((regs->STATUS.reg & SERCOM_SPI_STATUS_SYNCBUSY) != 0) {
66 }
67 #else
68 #error Unsupported device
69 #endif
70 }
71
spi_sam0_configure(const struct device * dev,const struct spi_config * config)72 static int spi_sam0_configure(const struct device *dev,
73 const struct spi_config *config)
74 {
75 const struct spi_sam0_config *cfg = dev->config;
76 struct spi_sam0_data *data = dev->data;
77 SercomSpi *regs = cfg->regs;
78 SERCOM_SPI_CTRLA_Type ctrla = {.reg = 0};
79 SERCOM_SPI_CTRLB_Type ctrlb = {.reg = 0};
80 int div;
81
82 if (spi_context_configured(&data->ctx, config)) {
83 return 0;
84 }
85
86 if (config->operation & SPI_HALF_DUPLEX) {
87 LOG_ERR("Half-duplex not supported");
88 return -ENOTSUP;
89 }
90
91 if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) {
92 /* Slave mode is not implemented. */
93 return -ENOTSUP;
94 }
95
96 ctrla.bit.MODE = SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val;
97
98 if ((config->operation & SPI_TRANSFER_LSB) != 0U) {
99 ctrla.bit.DORD = 1;
100 }
101
102 if ((config->operation & SPI_MODE_CPOL) != 0U) {
103 ctrla.bit.CPOL = 1;
104 }
105
106 if ((config->operation & SPI_MODE_CPHA) != 0U) {
107 ctrla.bit.CPHA = 1;
108 }
109
110 ctrla.reg |= cfg->pads;
111
112 if ((config->operation & SPI_MODE_LOOP) != 0U) {
113 /* Put MISO and MOSI on the same pad */
114 ctrla.bit.DOPO = 0;
115 ctrla.bit.DIPO = 0;
116 }
117
118 ctrla.bit.ENABLE = 1;
119 ctrlb.bit.RXEN = 1;
120
121 if (SPI_WORD_SIZE_GET(config->operation) != 8) {
122 return -ENOTSUP;
123 }
124
125 /* 8 bits per transfer */
126 ctrlb.bit.CHSIZE = 0;
127
128 /* Use the requested or next highest possible frequency */
129 div = (SOC_ATMEL_SAM0_GCLK0_FREQ_HZ / config->frequency) / 2U - 1;
130 div = CLAMP(div, 0, UINT8_MAX);
131
132 /* Update the configuration only if it has changed */
133 if (regs->CTRLA.reg != ctrla.reg || regs->CTRLB.reg != ctrlb.reg ||
134 regs->BAUD.reg != div) {
135 regs->CTRLA.bit.ENABLE = 0;
136 wait_synchronization(regs);
137
138 regs->CTRLB = ctrlb;
139 wait_synchronization(regs);
140 regs->BAUD.reg = div;
141 wait_synchronization(regs);
142 regs->CTRLA = ctrla;
143 wait_synchronization(regs);
144 }
145
146 data->ctx.config = config;
147
148 return 0;
149 }
150
spi_sam0_transfer_ongoing(struct spi_sam0_data * data)151 static bool spi_sam0_transfer_ongoing(struct spi_sam0_data *data)
152 {
153 return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
154 }
155
spi_sam0_shift_master(SercomSpi * regs,struct spi_sam0_data * data)156 static void spi_sam0_shift_master(SercomSpi *regs, struct spi_sam0_data *data)
157 {
158 uint8_t tx;
159 uint8_t rx;
160
161 if (spi_context_tx_buf_on(&data->ctx)) {
162 tx = *(uint8_t *)(data->ctx.tx_buf);
163 } else {
164 tx = 0U;
165 }
166
167 while (!regs->INTFLAG.bit.DRE) {
168 }
169
170 regs->DATA.reg = tx;
171 spi_context_update_tx(&data->ctx, 1, 1);
172
173 while (!regs->INTFLAG.bit.RXC) {
174 }
175
176 rx = regs->DATA.reg;
177
178 if (spi_context_rx_buf_on(&data->ctx)) {
179 *data->ctx.rx_buf = rx;
180 }
181 spi_context_update_rx(&data->ctx, 1, 1);
182 }
183
184 /* Finish any ongoing writes and drop any remaining read data */
spi_sam0_finish(SercomSpi * regs)185 static void spi_sam0_finish(SercomSpi *regs)
186 {
187 while (!regs->INTFLAG.bit.TXC) {
188 }
189
190 while (regs->INTFLAG.bit.RXC) {
191 (void)regs->DATA.reg;
192 }
193 }
194
195 /* Fast path that transmits a buf */
spi_sam0_fast_tx(SercomSpi * regs,const struct spi_buf * tx_buf)196 static void spi_sam0_fast_tx(SercomSpi *regs, const struct spi_buf *tx_buf)
197 {
198 const uint8_t *p = tx_buf->buf;
199 const uint8_t *pend = (uint8_t *)tx_buf->buf + tx_buf->len;
200 uint8_t ch;
201
202 while (p != pend) {
203 ch = *p++;
204
205 while (!regs->INTFLAG.bit.DRE) {
206 }
207
208 regs->DATA.reg = ch;
209 }
210
211 spi_sam0_finish(regs);
212 }
213
214 /* Fast path that reads into a buf */
spi_sam0_fast_rx(SercomSpi * regs,const struct spi_buf * rx_buf)215 static void spi_sam0_fast_rx(SercomSpi *regs, const struct spi_buf *rx_buf)
216 {
217 uint8_t *rx = rx_buf->buf;
218 int len = rx_buf->len;
219
220 if (len <= 0) {
221 return;
222 }
223
224 while (len) {
225 /* Send the next byte */
226 regs->DATA.reg = 0;
227 len--;
228
229 /* Wait for completion, and read */
230 while (!regs->INTFLAG.bit.RXC) {
231 }
232 *rx++ = regs->DATA.reg;
233 }
234
235 spi_sam0_finish(regs);
236 }
237
238 /* Fast path that writes and reads bufs of the same length */
spi_sam0_fast_txrx(SercomSpi * regs,const struct spi_buf * tx_buf,const struct spi_buf * rx_buf)239 static void spi_sam0_fast_txrx(SercomSpi *regs,
240 const struct spi_buf *tx_buf,
241 const struct spi_buf *rx_buf)
242 {
243 const uint8_t *tx = tx_buf->buf;
244 const uint8_t *txend = (uint8_t *)tx_buf->buf + tx_buf->len;
245 uint8_t *rx = rx_buf->buf;
246 size_t len = rx_buf->len;
247
248 if (len == 0) {
249 return;
250 }
251
252 while (tx != txend) {
253 /* Send the next byte */
254 regs->DATA.reg = *tx++;
255
256 /* Wait for completion, and read */
257 while (!regs->INTFLAG.bit.RXC) {
258 }
259 *rx++ = regs->DATA.reg;
260 }
261
262 spi_sam0_finish(regs);
263 }
264
265 /* Fast path where every overlapping tx and rx buffer is the same length */
spi_sam0_fast_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)266 static void spi_sam0_fast_transceive(const struct device *dev,
267 const struct spi_config *config,
268 const struct spi_buf_set *tx_bufs,
269 const struct spi_buf_set *rx_bufs)
270 {
271 const struct spi_sam0_config *cfg = dev->config;
272 size_t tx_count = 0;
273 size_t rx_count = 0;
274 SercomSpi *regs = cfg->regs;
275 const struct spi_buf *tx = NULL;
276 const struct spi_buf *rx = NULL;
277
278 if (tx_bufs) {
279 tx = tx_bufs->buffers;
280 tx_count = tx_bufs->count;
281 }
282
283 if (rx_bufs) {
284 rx = rx_bufs->buffers;
285 rx_count = rx_bufs->count;
286 } else {
287 rx = NULL;
288 }
289
290 while (tx_count != 0 && rx_count != 0) {
291 if (tx->buf == NULL) {
292 spi_sam0_fast_rx(regs, rx);
293 } else if (rx->buf == NULL) {
294 spi_sam0_fast_tx(regs, tx);
295 } else {
296 spi_sam0_fast_txrx(regs, tx, rx);
297 }
298
299 tx++;
300 tx_count--;
301 rx++;
302 rx_count--;
303 }
304
305 for (; tx_count != 0; tx_count--) {
306 spi_sam0_fast_tx(regs, tx++);
307 }
308
309 for (; rx_count != 0; rx_count--) {
310 spi_sam0_fast_rx(regs, rx++);
311 }
312 }
313
314 /* Returns true if the request is suitable for the fast
315 * path. Specifically, the bufs are a sequence of:
316 *
317 * - Zero or more RX and TX buf pairs where each is the same length.
318 * - Zero or more trailing RX only bufs
319 * - Zero or more trailing TX only bufs
320 */
spi_sam0_is_regular(const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)321 static bool spi_sam0_is_regular(const struct spi_buf_set *tx_bufs,
322 const struct spi_buf_set *rx_bufs)
323 {
324 const struct spi_buf *tx = NULL;
325 const struct spi_buf *rx = NULL;
326 size_t tx_count = 0;
327 size_t rx_count = 0;
328
329 if (tx_bufs) {
330 tx = tx_bufs->buffers;
331 tx_count = tx_bufs->count;
332 }
333
334 if (rx_bufs) {
335 rx = rx_bufs->buffers;
336 rx_count = rx_bufs->count;
337 }
338
339 while (tx_count != 0 && rx_count != 0) {
340 if (tx->len != rx->len) {
341 return false;
342 }
343
344 tx++;
345 tx_count--;
346 rx++;
347 rx_count--;
348 }
349
350 return true;
351 }
352
spi_sam0_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)353 static int spi_sam0_transceive(const struct device *dev,
354 const struct spi_config *config,
355 const struct spi_buf_set *tx_bufs,
356 const struct spi_buf_set *rx_bufs)
357 {
358 const struct spi_sam0_config *cfg = dev->config;
359 struct spi_sam0_data *data = dev->data;
360 SercomSpi *regs = cfg->regs;
361 int err;
362
363 spi_context_lock(&data->ctx, false, NULL, NULL, config);
364
365 err = spi_sam0_configure(dev, config);
366 if (err != 0) {
367 goto done;
368 }
369
370 spi_context_cs_control(&data->ctx, true);
371
372 /* This driver special cases the common send only, receive
373 * only, and transmit then receive operations. This special
374 * casing is 4x faster than the spi_context() routines
375 * and allows the transmit and receive to be interleaved.
376 */
377 if (spi_sam0_is_regular(tx_bufs, rx_bufs)) {
378 spi_sam0_fast_transceive(dev, config, tx_bufs, rx_bufs);
379 } else {
380 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
381
382 do {
383 spi_sam0_shift_master(regs, data);
384 } while (spi_sam0_transfer_ongoing(data));
385 }
386
387 spi_context_cs_control(&data->ctx, false);
388
389 done:
390 spi_context_release(&data->ctx, err);
391 return err;
392 }
393
spi_sam0_transceive_sync(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)394 static int spi_sam0_transceive_sync(const struct device *dev,
395 const struct spi_config *config,
396 const struct spi_buf_set *tx_bufs,
397 const struct spi_buf_set *rx_bufs)
398 {
399 return spi_sam0_transceive(dev, config, tx_bufs, rx_bufs);
400 }
401
402 #ifdef CONFIG_SPI_ASYNC
403
404 static void spi_sam0_dma_rx_done(const struct device *dma_dev, void *arg,
405 uint32_t id, int error_code);
406
spi_sam0_dma_rx_load(const struct device * dev,uint8_t * buf,size_t len)407 static int spi_sam0_dma_rx_load(const struct device *dev, uint8_t *buf,
408 size_t len)
409 {
410 const struct spi_sam0_config *cfg = dev->config;
411 struct spi_sam0_data *data = dev->data;
412 SercomSpi *regs = cfg->regs;
413 struct dma_config dma_cfg = { 0 };
414 struct dma_block_config dma_blk = { 0 };
415 int retval;
416
417 dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
418 dma_cfg.source_data_size = 1;
419 dma_cfg.dest_data_size = 1;
420 dma_cfg.user_data = data;
421 dma_cfg.dma_callback = spi_sam0_dma_rx_done;
422 dma_cfg.block_count = 1;
423 dma_cfg.head_block = &dma_blk;
424 dma_cfg.dma_slot = cfg->rx_dma_request;
425
426 dma_blk.block_size = len;
427
428 if (buf != NULL) {
429 dma_blk.dest_address = (uint32_t)buf;
430 } else {
431 static uint8_t dummy;
432
433 dma_blk.dest_address = (uint32_t)&dummy;
434 dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
435 }
436
437 dma_blk.source_address = (uint32_t)(&(regs->DATA.reg));
438 dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
439
440 retval = dma_config(cfg->dma_dev, cfg->rx_dma_channel,
441 &dma_cfg);
442 if (retval != 0) {
443 return retval;
444 }
445
446 return dma_start(cfg->dma_dev, cfg->rx_dma_channel);
447 }
448
spi_sam0_dma_tx_load(const struct device * dev,const uint8_t * buf,size_t len)449 static int spi_sam0_dma_tx_load(const struct device *dev, const uint8_t *buf,
450 size_t len)
451 {
452 const struct spi_sam0_config *cfg = dev->config;
453 SercomSpi *regs = cfg->regs;
454 struct dma_config dma_cfg = { 0 };
455 struct dma_block_config dma_blk = { 0 };
456 int retval;
457
458 dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
459 dma_cfg.source_data_size = 1;
460 dma_cfg.dest_data_size = 1;
461 dma_cfg.block_count = 1;
462 dma_cfg.head_block = &dma_blk;
463 dma_cfg.dma_slot = cfg->tx_dma_request;
464
465 dma_blk.block_size = len;
466
467 if (buf != NULL) {
468 dma_blk.source_address = (uint32_t)buf;
469 } else {
470 static const uint8_t dummy;
471
472 dma_blk.source_address = (uint32_t)&dummy;
473 dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
474 }
475
476 dma_blk.dest_address = (uint32_t)(&(regs->DATA.reg));
477 dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
478
479 retval = dma_config(cfg->dma_dev, cfg->tx_dma_channel,
480 &dma_cfg);
481
482 if (retval != 0) {
483 return retval;
484 }
485
486 return dma_start(cfg->dma_dev, cfg->tx_dma_channel);
487 }
488
spi_sam0_dma_advance_segment(const struct device * dev)489 static bool spi_sam0_dma_advance_segment(const struct device *dev)
490 {
491 struct spi_sam0_data *data = dev->data;
492 uint32_t segment_len;
493
494 /* Pick the shorter buffer of ones that have an actual length */
495 if (data->ctx.rx_len != 0) {
496 segment_len = data->ctx.rx_len;
497 if (data->ctx.tx_len != 0) {
498 segment_len = MIN(segment_len, data->ctx.tx_len);
499 }
500 } else {
501 segment_len = data->ctx.tx_len;
502 }
503
504 if (segment_len == 0) {
505 return false;
506 }
507
508 segment_len = MIN(segment_len, 65535);
509
510 data->dma_segment_len = segment_len;
511 return true;
512 }
513
spi_sam0_dma_advance_buffers(const struct device * dev)514 static int spi_sam0_dma_advance_buffers(const struct device *dev)
515 {
516 struct spi_sam0_data *data = dev->data;
517 int retval;
518
519 if (data->dma_segment_len == 0) {
520 return -EINVAL;
521 }
522
523 /* Load receive first, so it can accept transmit data */
524 if (data->ctx.rx_len) {
525 retval = spi_sam0_dma_rx_load(dev, data->ctx.rx_buf,
526 data->dma_segment_len);
527 } else {
528 retval = spi_sam0_dma_rx_load(dev, NULL, data->dma_segment_len);
529 }
530
531 if (retval != 0) {
532 return retval;
533 }
534
535 /* Now load the transmit, which starts the actual bus clocking */
536 if (data->ctx.tx_len) {
537 retval = spi_sam0_dma_tx_load(dev, data->ctx.tx_buf,
538 data->dma_segment_len);
539 } else {
540 retval = spi_sam0_dma_tx_load(dev, NULL, data->dma_segment_len);
541 }
542
543 if (retval != 0) {
544 return retval;
545 }
546
547 return 0;
548 }
549
spi_sam0_dma_rx_done(const struct device * dma_dev,void * arg,uint32_t id,int error_code)550 static void spi_sam0_dma_rx_done(const struct device *dma_dev, void *arg,
551 uint32_t id, int error_code)
552 {
553 struct spi_sam0_data *data = arg;
554 const struct device *dev = data->dev;
555 const struct spi_sam0_config *cfg = dev->config;
556 int retval;
557
558 ARG_UNUSED(id);
559 ARG_UNUSED(error_code);
560
561 spi_context_update_tx(&data->ctx, 1, data->dma_segment_len);
562 spi_context_update_rx(&data->ctx, 1, data->dma_segment_len);
563
564 if (!spi_sam0_dma_advance_segment(dev)) {
565 /* Done */
566 spi_context_cs_control(&data->ctx, false);
567 spi_context_complete(&data->ctx, dev, 0);
568 return;
569 }
570
571 retval = spi_sam0_dma_advance_buffers(dev);
572 if (retval != 0) {
573 dma_stop(cfg->dma_dev, cfg->tx_dma_channel);
574 dma_stop(cfg->dma_dev, cfg->rx_dma_channel);
575 spi_context_cs_control(&data->ctx, false);
576 spi_context_complete(&data->ctx, dev, retval);
577 return;
578 }
579 }
580
581
spi_sam0_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)582 static int spi_sam0_transceive_async(const struct device *dev,
583 const struct spi_config *config,
584 const struct spi_buf_set *tx_bufs,
585 const struct spi_buf_set *rx_bufs,
586 spi_callback_t cb,
587 void *userdata)
588 {
589 const struct spi_sam0_config *cfg = dev->config;
590 struct spi_sam0_data *data = dev->data;
591 int retval;
592
593 /*
594 * Transmit clocks the output and we use receive to determine when
595 * the transmit is done, so we always need both
596 */
597 if (cfg->tx_dma_channel == 0xFF || cfg->rx_dma_channel == 0xFF) {
598 return -ENOTSUP;
599 }
600
601 spi_context_lock(&data->ctx, true, cb, userdata, config);
602
603 retval = spi_sam0_configure(dev, config);
604 if (retval != 0) {
605 goto err_unlock;
606 }
607
608 spi_context_cs_control(&data->ctx, true);
609
610 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
611
612 spi_sam0_dma_advance_segment(dev);
613 retval = spi_sam0_dma_advance_buffers(dev);
614 if (retval != 0) {
615 goto err_cs;
616 }
617
618 return 0;
619
620 err_cs:
621 dma_stop(cfg->dma_dev, cfg->tx_dma_channel);
622 dma_stop(cfg->dma_dev, cfg->rx_dma_channel);
623
624 spi_context_cs_control(&data->ctx, false);
625
626 err_unlock:
627 spi_context_release(&data->ctx, retval);
628 return retval;
629 }
630 #endif /* CONFIG_SPI_ASYNC */
631
spi_sam0_release(const struct device * dev,const struct spi_config * config)632 static int spi_sam0_release(const struct device *dev,
633 const struct spi_config *config)
634 {
635 struct spi_sam0_data *data = dev->data;
636
637 spi_context_unlock_unconditionally(&data->ctx);
638
639 return 0;
640 }
641
spi_sam0_init(const struct device * dev)642 static int spi_sam0_init(const struct device *dev)
643 {
644 int err;
645 const struct spi_sam0_config *cfg = dev->config;
646 struct spi_sam0_data *data = dev->data;
647 SercomSpi *regs = cfg->regs;
648
649 *cfg->mclk |= cfg->mclk_mask;
650
651 #ifdef MCLK
652 GCLK->PCHCTRL[cfg->gclk_id].reg = GCLK_PCHCTRL_CHEN
653 | GCLK_PCHCTRL_GEN(cfg->gclk_gen);
654 #else
655 GCLK->CLKCTRL.reg = GCLK_CLKCTRL_CLKEN
656 | GCLK_CLKCTRL_GEN(cfg->gclk_gen)
657 | GCLK_CLKCTRL_ID(cfg->gclk_id);
658 #endif
659
660 /* Disable all SPI interrupts */
661 regs->INTENCLR.reg = SERCOM_SPI_INTENCLR_MASK;
662 wait_synchronization(regs);
663
664 err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
665 if (err < 0) {
666 return err;
667 }
668
669 #ifdef CONFIG_SPI_ASYNC
670 if (!device_is_ready(cfg->dma_dev)) {
671 return -ENODEV;
672 }
673 data->dev = dev;
674 #endif
675
676 err = spi_context_cs_configure_all(&data->ctx);
677 if (err < 0) {
678 return err;
679 }
680
681 spi_context_unlock_unconditionally(&data->ctx);
682
683 /* The device will be configured and enabled when transceive
684 * is called.
685 */
686
687 return 0;
688 }
689
690 static DEVICE_API(spi, spi_sam0_driver_api) = {
691 .transceive = spi_sam0_transceive_sync,
692 #ifdef CONFIG_SPI_ASYNC
693 .transceive_async = spi_sam0_transceive_async,
694 #endif
695 #ifdef CONFIG_SPI_RTIO
696 .iodev_submit = spi_rtio_iodev_default_submit,
697 #endif
698 .release = spi_sam0_release,
699 };
700
701 #if CONFIG_SPI_ASYNC
702 #define SPI_SAM0_DMA_CHANNELS(n) \
703 .dma_dev = DEVICE_DT_GET(ATMEL_SAM0_DT_INST_DMA_CTLR(n, tx)), \
704 .tx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, tx), \
705 .tx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, tx), \
706 .rx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, rx), \
707 .rx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, rx),
708 #else
709 #define SPI_SAM0_DMA_CHANNELS(n)
710 #endif
711
712 #define SPI_SAM0_SERCOM_PADS(n) \
713 SERCOM_SPI_CTRLA_DIPO(DT_INST_PROP(n, dipo)) | \
714 SERCOM_SPI_CTRLA_DOPO(DT_INST_PROP(n, dopo))
715
716 #define ASSIGNED_CLOCKS_CELL_BY_NAME \
717 ATMEL_SAM0_DT_INST_ASSIGNED_CLOCKS_CELL_BY_NAME
718
719 #ifdef MCLK
720 #define SPI_SAM0_DEFINE_CONFIG(n) \
721 static const struct spi_sam0_config spi_sam0_config_##n = { \
722 .regs = (SercomSpi *)DT_INST_REG_ADDR(n), \
723 .gclk_gen = ASSIGNED_CLOCKS_CELL_BY_NAME(n, gclk, gen), \
724 .gclk_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, id), \
725 .mclk = ATMEL_SAM0_DT_INST_MCLK_PM_REG_ADDR_OFFSET(n), \
726 .mclk_mask = ATMEL_SAM0_DT_INST_MCLK_PM_PERIPH_MASK(n, bit), \
727 .pads = SPI_SAM0_SERCOM_PADS(n), \
728 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
729 SPI_SAM0_DMA_CHANNELS(n) \
730 }
731 #else
732 #define SPI_SAM0_DEFINE_CONFIG(n) \
733 static const struct spi_sam0_config spi_sam0_config_##n = { \
734 .regs = (SercomSpi *)DT_INST_REG_ADDR(n), \
735 .gclk_gen = ASSIGNED_CLOCKS_CELL_BY_NAME(n, gclk, gen), \
736 .gclk_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, id), \
737 .mclk = ATMEL_SAM0_DT_INST_MCLK_PM_REG_ADDR_OFFSET(n), \
738 .mclk_mask = ATMEL_SAM0_DT_INST_MCLK_PM_PERIPH_MASK(n, bit), \
739 .pads = SPI_SAM0_SERCOM_PADS(n), \
740 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
741 SPI_SAM0_DMA_CHANNELS(n) \
742 }
743 #endif /* MCLK */
744
745 #define SPI_SAM0_DEVICE_INIT(n) \
746 PINCTRL_DT_INST_DEFINE(n); \
747 SPI_SAM0_DEFINE_CONFIG(n); \
748 static struct spi_sam0_data spi_sam0_dev_data_##n = { \
749 SPI_CONTEXT_INIT_LOCK(spi_sam0_dev_data_##n, ctx), \
750 SPI_CONTEXT_INIT_SYNC(spi_sam0_dev_data_##n, ctx), \
751 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
752 }; \
753 SPI_DEVICE_DT_INST_DEFINE(n, spi_sam0_init, NULL, \
754 &spi_sam0_dev_data_##n, \
755 &spi_sam0_config_##n, POST_KERNEL, \
756 CONFIG_SPI_INIT_PRIORITY, \
757 &spi_sam0_driver_api);
758
759 DT_INST_FOREACH_STATUS_OKAY(SPI_SAM0_DEVICE_INIT)
760
761 /* clang-format on */
762