1 /*
2 * Copyright (c) 2017 Google LLC.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #define DT_DRV_COMPAT atmel_sam0_spi
7
8 #define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(spi_sam0);
11
12 #include "spi_context.h"
13 #include <errno.h>
14 #include <zephyr/device.h>
15 #include <zephyr/drivers/spi.h>
16 #include <zephyr/drivers/spi/rtio.h>
17 #include <zephyr/drivers/dma.h>
18 #include <zephyr/drivers/pinctrl.h>
19 #include <soc.h>
20
21 #ifndef SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val
22 #define SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val (0x3)
23 #endif
24
25 /* Device constant configuration parameters */
26 struct spi_sam0_config {
27 SercomSpi *regs;
28 uint32_t pads;
29 const struct pinctrl_dev_config *pcfg;
30 #ifdef MCLK
31 volatile uint32_t *mclk;
32 uint32_t mclk_mask;
33 uint16_t gclk_core_id;
34 #else
35 uint32_t pm_apbcmask;
36 uint16_t gclk_clkctrl_id;
37 #endif
38 #ifdef CONFIG_SPI_ASYNC
39 const struct device *dma_dev;
40 uint8_t tx_dma_request;
41 uint8_t tx_dma_channel;
42 uint8_t rx_dma_request;
43 uint8_t rx_dma_channel;
44 #endif
45 };
46
47 /* Device run time data */
48 struct spi_sam0_data {
49 struct spi_context ctx;
50 #ifdef CONFIG_SPI_ASYNC
51 const struct device *dev;
52 uint32_t dma_segment_len;
53 #endif
54 };
55
wait_synchronization(SercomSpi * regs)56 static void wait_synchronization(SercomSpi *regs)
57 {
58 #if defined(SERCOM_SPI_SYNCBUSY_MASK)
59 /* SYNCBUSY is a register */
60 while ((regs->SYNCBUSY.reg & SERCOM_SPI_SYNCBUSY_MASK) != 0) {
61 }
62 #elif defined(SERCOM_SPI_STATUS_SYNCBUSY)
63 /* SYNCBUSY is a bit */
64 while ((regs->STATUS.reg & SERCOM_SPI_STATUS_SYNCBUSY) != 0) {
65 }
66 #else
67 #error Unsupported device
68 #endif
69 }
70
spi_sam0_configure(const struct device * dev,const struct spi_config * config)71 static int spi_sam0_configure(const struct device *dev,
72 const struct spi_config *config)
73 {
74 const struct spi_sam0_config *cfg = dev->config;
75 struct spi_sam0_data *data = dev->data;
76 SercomSpi *regs = cfg->regs;
77 SERCOM_SPI_CTRLA_Type ctrla = {.reg = 0};
78 SERCOM_SPI_CTRLB_Type ctrlb = {.reg = 0};
79 int div;
80
81 if (spi_context_configured(&data->ctx, config)) {
82 return 0;
83 }
84
85 if (config->operation & SPI_HALF_DUPLEX) {
86 LOG_ERR("Half-duplex not supported");
87 return -ENOTSUP;
88 }
89
90 if (SPI_OP_MODE_GET(config->operation) != SPI_OP_MODE_MASTER) {
91 /* Slave mode is not implemented. */
92 return -ENOTSUP;
93 }
94
95 ctrla.bit.MODE = SERCOM_SPI_CTRLA_MODE_SPI_MASTER_Val;
96
97 if ((config->operation & SPI_TRANSFER_LSB) != 0U) {
98 ctrla.bit.DORD = 1;
99 }
100
101 if ((config->operation & SPI_MODE_CPOL) != 0U) {
102 ctrla.bit.CPOL = 1;
103 }
104
105 if ((config->operation & SPI_MODE_CPHA) != 0U) {
106 ctrla.bit.CPHA = 1;
107 }
108
109 ctrla.reg |= cfg->pads;
110
111 if ((config->operation & SPI_MODE_LOOP) != 0U) {
112 /* Put MISO and MOSI on the same pad */
113 ctrla.bit.DOPO = 0;
114 ctrla.bit.DIPO = 0;
115 }
116
117 ctrla.bit.ENABLE = 1;
118 ctrlb.bit.RXEN = 1;
119
120 if (SPI_WORD_SIZE_GET(config->operation) != 8) {
121 return -ENOTSUP;
122 }
123
124 /* 8 bits per transfer */
125 ctrlb.bit.CHSIZE = 0;
126
127 /* Use the requested or next highest possible frequency */
128 div = (SOC_ATMEL_SAM0_GCLK0_FREQ_HZ / config->frequency) / 2U - 1;
129 div = CLAMP(div, 0, UINT8_MAX);
130
131 /* Update the configuration only if it has changed */
132 if (regs->CTRLA.reg != ctrla.reg || regs->CTRLB.reg != ctrlb.reg ||
133 regs->BAUD.reg != div) {
134 regs->CTRLA.bit.ENABLE = 0;
135 wait_synchronization(regs);
136
137 regs->CTRLB = ctrlb;
138 wait_synchronization(regs);
139 regs->BAUD.reg = div;
140 wait_synchronization(regs);
141 regs->CTRLA = ctrla;
142 wait_synchronization(regs);
143 }
144
145 data->ctx.config = config;
146
147 return 0;
148 }
149
spi_sam0_transfer_ongoing(struct spi_sam0_data * data)150 static bool spi_sam0_transfer_ongoing(struct spi_sam0_data *data)
151 {
152 return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
153 }
154
spi_sam0_shift_master(SercomSpi * regs,struct spi_sam0_data * data)155 static void spi_sam0_shift_master(SercomSpi *regs, struct spi_sam0_data *data)
156 {
157 uint8_t tx;
158 uint8_t rx;
159
160 if (spi_context_tx_buf_on(&data->ctx)) {
161 tx = *(uint8_t *)(data->ctx.tx_buf);
162 } else {
163 tx = 0U;
164 }
165
166 while (!regs->INTFLAG.bit.DRE) {
167 }
168
169 regs->DATA.reg = tx;
170 spi_context_update_tx(&data->ctx, 1, 1);
171
172 while (!regs->INTFLAG.bit.RXC) {
173 }
174
175 rx = regs->DATA.reg;
176
177 if (spi_context_rx_buf_on(&data->ctx)) {
178 *data->ctx.rx_buf = rx;
179 }
180 spi_context_update_rx(&data->ctx, 1, 1);
181 }
182
183 /* Finish any ongoing writes and drop any remaining read data */
spi_sam0_finish(SercomSpi * regs)184 static void spi_sam0_finish(SercomSpi *regs)
185 {
186 while (!regs->INTFLAG.bit.TXC) {
187 }
188
189 while (regs->INTFLAG.bit.RXC) {
190 (void)regs->DATA.reg;
191 }
192 }
193
194 /* Fast path that transmits a buf */
spi_sam0_fast_tx(SercomSpi * regs,const struct spi_buf * tx_buf)195 static void spi_sam0_fast_tx(SercomSpi *regs, const struct spi_buf *tx_buf)
196 {
197 const uint8_t *p = tx_buf->buf;
198 const uint8_t *pend = (uint8_t *)tx_buf->buf + tx_buf->len;
199 uint8_t ch;
200
201 while (p != pend) {
202 ch = *p++;
203
204 while (!regs->INTFLAG.bit.DRE) {
205 }
206
207 regs->DATA.reg = ch;
208 }
209
210 spi_sam0_finish(regs);
211 }
212
213 /* Fast path that reads into a buf */
spi_sam0_fast_rx(SercomSpi * regs,const struct spi_buf * rx_buf)214 static void spi_sam0_fast_rx(SercomSpi *regs, const struct spi_buf *rx_buf)
215 {
216 uint8_t *rx = rx_buf->buf;
217 int len = rx_buf->len;
218
219 if (len <= 0) {
220 return;
221 }
222
223 while (len) {
224 /* Send the next byte */
225 regs->DATA.reg = 0;
226 len--;
227
228 /* Wait for completion, and read */
229 while (!regs->INTFLAG.bit.RXC) {
230 }
231 *rx++ = regs->DATA.reg;
232 }
233
234 spi_sam0_finish(regs);
235 }
236
237 /* Fast path that writes and reads bufs of the same length */
spi_sam0_fast_txrx(SercomSpi * regs,const struct spi_buf * tx_buf,const struct spi_buf * rx_buf)238 static void spi_sam0_fast_txrx(SercomSpi *regs,
239 const struct spi_buf *tx_buf,
240 const struct spi_buf *rx_buf)
241 {
242 const uint8_t *tx = tx_buf->buf;
243 const uint8_t *txend = (uint8_t *)tx_buf->buf + tx_buf->len;
244 uint8_t *rx = rx_buf->buf;
245 size_t len = rx_buf->len;
246
247 if (len == 0) {
248 return;
249 }
250
251 while (tx != txend) {
252 /* Send the next byte */
253 regs->DATA.reg = *tx++;
254
255 /* Wait for completion, and read */
256 while (!regs->INTFLAG.bit.RXC) {
257 }
258 *rx++ = regs->DATA.reg;
259 }
260
261 spi_sam0_finish(regs);
262 }
263
264 /* Fast path where every overlapping tx and rx buffer is the same length */
spi_sam0_fast_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)265 static void spi_sam0_fast_transceive(const struct device *dev,
266 const struct spi_config *config,
267 const struct spi_buf_set *tx_bufs,
268 const struct spi_buf_set *rx_bufs)
269 {
270 const struct spi_sam0_config *cfg = dev->config;
271 size_t tx_count = 0;
272 size_t rx_count = 0;
273 SercomSpi *regs = cfg->regs;
274 const struct spi_buf *tx = NULL;
275 const struct spi_buf *rx = NULL;
276
277 if (tx_bufs) {
278 tx = tx_bufs->buffers;
279 tx_count = tx_bufs->count;
280 }
281
282 if (rx_bufs) {
283 rx = rx_bufs->buffers;
284 rx_count = rx_bufs->count;
285 } else {
286 rx = NULL;
287 }
288
289 while (tx_count != 0 && rx_count != 0) {
290 if (tx->buf == NULL) {
291 spi_sam0_fast_rx(regs, rx);
292 } else if (rx->buf == NULL) {
293 spi_sam0_fast_tx(regs, tx);
294 } else {
295 spi_sam0_fast_txrx(regs, tx, rx);
296 }
297
298 tx++;
299 tx_count--;
300 rx++;
301 rx_count--;
302 }
303
304 for (; tx_count != 0; tx_count--) {
305 spi_sam0_fast_tx(regs, tx++);
306 }
307
308 for (; rx_count != 0; rx_count--) {
309 spi_sam0_fast_rx(regs, rx++);
310 }
311 }
312
313 /* Returns true if the request is suitable for the fast
314 * path. Specifically, the bufs are a sequence of:
315 *
316 * - Zero or more RX and TX buf pairs where each is the same length.
317 * - Zero or more trailing RX only bufs
318 * - Zero or more trailing TX only bufs
319 */
spi_sam0_is_regular(const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)320 static bool spi_sam0_is_regular(const struct spi_buf_set *tx_bufs,
321 const struct spi_buf_set *rx_bufs)
322 {
323 const struct spi_buf *tx = NULL;
324 const struct spi_buf *rx = NULL;
325 size_t tx_count = 0;
326 size_t rx_count = 0;
327
328 if (tx_bufs) {
329 tx = tx_bufs->buffers;
330 tx_count = tx_bufs->count;
331 }
332
333 if (rx_bufs) {
334 rx = rx_bufs->buffers;
335 rx_count = rx_bufs->count;
336 }
337
338 while (tx_count != 0 && rx_count != 0) {
339 if (tx->len != rx->len) {
340 return false;
341 }
342
343 tx++;
344 tx_count--;
345 rx++;
346 rx_count--;
347 }
348
349 return true;
350 }
351
spi_sam0_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)352 static int spi_sam0_transceive(const struct device *dev,
353 const struct spi_config *config,
354 const struct spi_buf_set *tx_bufs,
355 const struct spi_buf_set *rx_bufs)
356 {
357 const struct spi_sam0_config *cfg = dev->config;
358 struct spi_sam0_data *data = dev->data;
359 SercomSpi *regs = cfg->regs;
360 int err;
361
362 spi_context_lock(&data->ctx, false, NULL, NULL, config);
363
364 err = spi_sam0_configure(dev, config);
365 if (err != 0) {
366 goto done;
367 }
368
369 spi_context_cs_control(&data->ctx, true);
370
371 /* This driver special cases the common send only, receive
372 * only, and transmit then receive operations. This special
373 * casing is 4x faster than the spi_context() routines
374 * and allows the transmit and receive to be interleaved.
375 */
376 if (spi_sam0_is_regular(tx_bufs, rx_bufs)) {
377 spi_sam0_fast_transceive(dev, config, tx_bufs, rx_bufs);
378 } else {
379 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
380
381 do {
382 spi_sam0_shift_master(regs, data);
383 } while (spi_sam0_transfer_ongoing(data));
384 }
385
386 spi_context_cs_control(&data->ctx, false);
387
388 done:
389 spi_context_release(&data->ctx, err);
390 return err;
391 }
392
spi_sam0_transceive_sync(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)393 static int spi_sam0_transceive_sync(const struct device *dev,
394 const struct spi_config *config,
395 const struct spi_buf_set *tx_bufs,
396 const struct spi_buf_set *rx_bufs)
397 {
398 return spi_sam0_transceive(dev, config, tx_bufs, rx_bufs);
399 }
400
401 #ifdef CONFIG_SPI_ASYNC
402
403 static void spi_sam0_dma_rx_done(const struct device *dma_dev, void *arg,
404 uint32_t id, int error_code);
405
spi_sam0_dma_rx_load(const struct device * dev,uint8_t * buf,size_t len)406 static int spi_sam0_dma_rx_load(const struct device *dev, uint8_t *buf,
407 size_t len)
408 {
409 const struct spi_sam0_config *cfg = dev->config;
410 struct spi_sam0_data *data = dev->data;
411 SercomSpi *regs = cfg->regs;
412 struct dma_config dma_cfg = { 0 };
413 struct dma_block_config dma_blk = { 0 };
414 int retval;
415
416 dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
417 dma_cfg.source_data_size = 1;
418 dma_cfg.dest_data_size = 1;
419 dma_cfg.user_data = data;
420 dma_cfg.dma_callback = spi_sam0_dma_rx_done;
421 dma_cfg.block_count = 1;
422 dma_cfg.head_block = &dma_blk;
423 dma_cfg.dma_slot = cfg->rx_dma_request;
424
425 dma_blk.block_size = len;
426
427 if (buf != NULL) {
428 dma_blk.dest_address = (uint32_t)buf;
429 } else {
430 static uint8_t dummy;
431
432 dma_blk.dest_address = (uint32_t)&dummy;
433 dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
434 }
435
436 dma_blk.source_address = (uint32_t)(&(regs->DATA.reg));
437 dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
438
439 retval = dma_config(cfg->dma_dev, cfg->rx_dma_channel,
440 &dma_cfg);
441 if (retval != 0) {
442 return retval;
443 }
444
445 return dma_start(cfg->dma_dev, cfg->rx_dma_channel);
446 }
447
spi_sam0_dma_tx_load(const struct device * dev,const uint8_t * buf,size_t len)448 static int spi_sam0_dma_tx_load(const struct device *dev, const uint8_t *buf,
449 size_t len)
450 {
451 const struct spi_sam0_config *cfg = dev->config;
452 SercomSpi *regs = cfg->regs;
453 struct dma_config dma_cfg = { 0 };
454 struct dma_block_config dma_blk = { 0 };
455 int retval;
456
457 dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
458 dma_cfg.source_data_size = 1;
459 dma_cfg.dest_data_size = 1;
460 dma_cfg.block_count = 1;
461 dma_cfg.head_block = &dma_blk;
462 dma_cfg.dma_slot = cfg->tx_dma_request;
463
464 dma_blk.block_size = len;
465
466 if (buf != NULL) {
467 dma_blk.source_address = (uint32_t)buf;
468 } else {
469 static const uint8_t dummy;
470
471 dma_blk.source_address = (uint32_t)&dummy;
472 dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
473 }
474
475 dma_blk.dest_address = (uint32_t)(&(regs->DATA.reg));
476 dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
477
478 retval = dma_config(cfg->dma_dev, cfg->tx_dma_channel,
479 &dma_cfg);
480
481 if (retval != 0) {
482 return retval;
483 }
484
485 return dma_start(cfg->dma_dev, cfg->tx_dma_channel);
486 }
487
spi_sam0_dma_advance_segment(const struct device * dev)488 static bool spi_sam0_dma_advance_segment(const struct device *dev)
489 {
490 struct spi_sam0_data *data = dev->data;
491 uint32_t segment_len;
492
493 /* Pick the shorter buffer of ones that have an actual length */
494 if (data->ctx.rx_len != 0) {
495 segment_len = data->ctx.rx_len;
496 if (data->ctx.tx_len != 0) {
497 segment_len = MIN(segment_len, data->ctx.tx_len);
498 }
499 } else {
500 segment_len = data->ctx.tx_len;
501 }
502
503 if (segment_len == 0) {
504 return false;
505 }
506
507 segment_len = MIN(segment_len, 65535);
508
509 data->dma_segment_len = segment_len;
510 return true;
511 }
512
spi_sam0_dma_advance_buffers(const struct device * dev)513 static int spi_sam0_dma_advance_buffers(const struct device *dev)
514 {
515 struct spi_sam0_data *data = dev->data;
516 int retval;
517
518 if (data->dma_segment_len == 0) {
519 return -EINVAL;
520 }
521
522 /* Load receive first, so it can accept transmit data */
523 if (data->ctx.rx_len) {
524 retval = spi_sam0_dma_rx_load(dev, data->ctx.rx_buf,
525 data->dma_segment_len);
526 } else {
527 retval = spi_sam0_dma_rx_load(dev, NULL, data->dma_segment_len);
528 }
529
530 if (retval != 0) {
531 return retval;
532 }
533
534 /* Now load the transmit, which starts the actual bus clocking */
535 if (data->ctx.tx_len) {
536 retval = spi_sam0_dma_tx_load(dev, data->ctx.tx_buf,
537 data->dma_segment_len);
538 } else {
539 retval = spi_sam0_dma_tx_load(dev, NULL, data->dma_segment_len);
540 }
541
542 if (retval != 0) {
543 return retval;
544 }
545
546 return 0;
547 }
548
spi_sam0_dma_rx_done(const struct device * dma_dev,void * arg,uint32_t id,int error_code)549 static void spi_sam0_dma_rx_done(const struct device *dma_dev, void *arg,
550 uint32_t id, int error_code)
551 {
552 struct spi_sam0_data *data = arg;
553 const struct device *dev = data->dev;
554 const struct spi_sam0_config *cfg = dev->config;
555 int retval;
556
557 ARG_UNUSED(id);
558 ARG_UNUSED(error_code);
559
560 spi_context_update_tx(&data->ctx, 1, data->dma_segment_len);
561 spi_context_update_rx(&data->ctx, 1, data->dma_segment_len);
562
563 if (!spi_sam0_dma_advance_segment(dev)) {
564 /* Done */
565 spi_context_cs_control(&data->ctx, false);
566 spi_context_complete(&data->ctx, dev, 0);
567 return;
568 }
569
570 retval = spi_sam0_dma_advance_buffers(dev);
571 if (retval != 0) {
572 dma_stop(cfg->dma_dev, cfg->tx_dma_channel);
573 dma_stop(cfg->dma_dev, cfg->rx_dma_channel);
574 spi_context_cs_control(&data->ctx, false);
575 spi_context_complete(&data->ctx, dev, retval);
576 return;
577 }
578 }
579
580
spi_sam0_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)581 static int spi_sam0_transceive_async(const struct device *dev,
582 const struct spi_config *config,
583 const struct spi_buf_set *tx_bufs,
584 const struct spi_buf_set *rx_bufs,
585 spi_callback_t cb,
586 void *userdata)
587 {
588 const struct spi_sam0_config *cfg = dev->config;
589 struct spi_sam0_data *data = dev->data;
590 int retval;
591
592 /*
593 * Transmit clocks the output and we use receive to determine when
594 * the transmit is done, so we always need both
595 */
596 if (cfg->tx_dma_channel == 0xFF || cfg->rx_dma_channel == 0xFF) {
597 return -ENOTSUP;
598 }
599
600 spi_context_lock(&data->ctx, true, cb, userdata, config);
601
602 retval = spi_sam0_configure(dev, config);
603 if (retval != 0) {
604 goto err_unlock;
605 }
606
607 spi_context_cs_control(&data->ctx, true);
608
609 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
610
611 spi_sam0_dma_advance_segment(dev);
612 retval = spi_sam0_dma_advance_buffers(dev);
613 if (retval != 0) {
614 goto err_cs;
615 }
616
617 return 0;
618
619 err_cs:
620 dma_stop(cfg->dma_dev, cfg->tx_dma_channel);
621 dma_stop(cfg->dma_dev, cfg->rx_dma_channel);
622
623 spi_context_cs_control(&data->ctx, false);
624
625 err_unlock:
626 spi_context_release(&data->ctx, retval);
627 return retval;
628 }
629 #endif /* CONFIG_SPI_ASYNC */
630
spi_sam0_release(const struct device * dev,const struct spi_config * config)631 static int spi_sam0_release(const struct device *dev,
632 const struct spi_config *config)
633 {
634 struct spi_sam0_data *data = dev->data;
635
636 spi_context_unlock_unconditionally(&data->ctx);
637
638 return 0;
639 }
640
spi_sam0_init(const struct device * dev)641 static int spi_sam0_init(const struct device *dev)
642 {
643 int err;
644 const struct spi_sam0_config *cfg = dev->config;
645 struct spi_sam0_data *data = dev->data;
646 SercomSpi *regs = cfg->regs;
647
648 #ifdef MCLK
649 /* Enable the GCLK */
650 GCLK->PCHCTRL[cfg->gclk_core_id].reg = GCLK_PCHCTRL_GEN_GCLK0 |
651 GCLK_PCHCTRL_CHEN;
652
653 /* Enable the MCLK */
654 *cfg->mclk |= cfg->mclk_mask;
655 #else
656 /* Enable the GCLK */
657 GCLK->CLKCTRL.reg = cfg->gclk_clkctrl_id | GCLK_CLKCTRL_GEN_GCLK0 |
658 GCLK_CLKCTRL_CLKEN;
659
660 /* Enable SERCOM clock in PM */
661 PM->APBCMASK.reg |= cfg->pm_apbcmask;
662 #endif
663
664 /* Disable all SPI interrupts */
665 regs->INTENCLR.reg = SERCOM_SPI_INTENCLR_MASK;
666 wait_synchronization(regs);
667
668 err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
669 if (err < 0) {
670 return err;
671 }
672
673 #ifdef CONFIG_SPI_ASYNC
674 if (!device_is_ready(cfg->dma_dev)) {
675 return -ENODEV;
676 }
677 data->dev = dev;
678 #endif
679
680 err = spi_context_cs_configure_all(&data->ctx);
681 if (err < 0) {
682 return err;
683 }
684
685 spi_context_unlock_unconditionally(&data->ctx);
686
687 /* The device will be configured and enabled when transceive
688 * is called.
689 */
690
691 return 0;
692 }
693
694 static DEVICE_API(spi, spi_sam0_driver_api) = {
695 .transceive = spi_sam0_transceive_sync,
696 #ifdef CONFIG_SPI_ASYNC
697 .transceive_async = spi_sam0_transceive_async,
698 #endif
699 #ifdef CONFIG_SPI_RTIO
700 .iodev_submit = spi_rtio_iodev_default_submit,
701 #endif
702 .release = spi_sam0_release,
703 };
704
705 #if CONFIG_SPI_ASYNC
706 #define SPI_SAM0_DMA_CHANNELS(n) \
707 .dma_dev = DEVICE_DT_GET(ATMEL_SAM0_DT_INST_DMA_CTLR(n, tx)), \
708 .tx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, tx), \
709 .tx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, tx), \
710 .rx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, rx), \
711 .rx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, rx),
712 #else
713 #define SPI_SAM0_DMA_CHANNELS(n)
714 #endif
715
716 #define SPI_SAM0_SERCOM_PADS(n) \
717 SERCOM_SPI_CTRLA_DIPO(DT_INST_PROP(n, dipo)) | \
718 SERCOM_SPI_CTRLA_DOPO(DT_INST_PROP(n, dopo))
719
720 #ifdef MCLK
721 #define SPI_SAM0_DEFINE_CONFIG(n) \
722 static const struct spi_sam0_config spi_sam0_config_##n = { \
723 .regs = (SercomSpi *)DT_INST_REG_ADDR(n), \
724 .mclk = (volatile uint32_t *)MCLK_MASK_DT_INT_REG_ADDR(n), \
725 .mclk_mask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, mclk, bit)), \
726 .gclk_core_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, periph_ch),\
727 .pads = SPI_SAM0_SERCOM_PADS(n), \
728 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
729 SPI_SAM0_DMA_CHANNELS(n) \
730 }
731 #else
732 #define SPI_SAM0_DEFINE_CONFIG(n) \
733 static const struct spi_sam0_config spi_sam0_config_##n = { \
734 .regs = (SercomSpi *)DT_INST_REG_ADDR(n), \
735 .pm_apbcmask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, pm, bit)), \
736 .gclk_clkctrl_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, clkctrl_id),\
737 .pads = SPI_SAM0_SERCOM_PADS(n), \
738 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
739 SPI_SAM0_DMA_CHANNELS(n) \
740 }
741 #endif /* MCLK */
742
743 #define SPI_SAM0_DEVICE_INIT(n) \
744 PINCTRL_DT_INST_DEFINE(n); \
745 SPI_SAM0_DEFINE_CONFIG(n); \
746 static struct spi_sam0_data spi_sam0_dev_data_##n = { \
747 SPI_CONTEXT_INIT_LOCK(spi_sam0_dev_data_##n, ctx), \
748 SPI_CONTEXT_INIT_SYNC(spi_sam0_dev_data_##n, ctx), \
749 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
750 }; \
751 SPI_DEVICE_DT_INST_DEFINE(n, spi_sam0_init, NULL, \
752 &spi_sam0_dev_data_##n, \
753 &spi_sam0_config_##n, POST_KERNEL, \
754 CONFIG_SPI_INIT_PRIORITY, \
755 &spi_sam0_driver_api);
756
757 DT_INST_FOREACH_STATUS_OKAY(SPI_SAM0_DEVICE_INIT)
758