1 /*
2 * Copyright (c) 2017 Google LLC.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT atmel_sam0_uart
8
9 #include <zephyr/device.h>
10 #include <errno.h>
11 #include <zephyr/init.h>
12 #include <zephyr/sys/__assert.h>
13 #include <soc.h>
14 #include <zephyr/drivers/uart.h>
15 #include <zephyr/drivers/dma.h>
16 #include <zephyr/drivers/pinctrl.h>
17 #include <string.h>
18 #include <zephyr/irq.h>
19
20 #ifndef SERCOM_USART_CTRLA_MODE_USART_INT_CLK
21 #define SERCOM_USART_CTRLA_MODE_USART_INT_CLK SERCOM_USART_CTRLA_MODE(0x1)
22 #endif
23
24 /*
25 * Interrupt error flag is only supported in devices with
26 * SERCOM revision 0x500
27 */
28 #if defined(SERCOM_U2201) && (REV_SERCOM == 0x500)
29 #define SERCOM_REV500
30 #endif
31
32 /* Device constant configuration parameters */
33 struct uart_sam0_dev_cfg {
34 SercomUsart *regs;
35 uint32_t baudrate;
36 uint32_t pads;
37 bool collision_detect;
38 #ifdef MCLK
39 volatile uint32_t *mclk;
40 uint32_t mclk_mask;
41 uint16_t gclk_core_id;
42 #else
43 uint32_t pm_apbcmask;
44 uint16_t gclk_clkctrl_id;
45 #endif
46 #if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_SAM0_ASYNC
47 void (*irq_config_func)(const struct device *dev);
48 #endif
49 #if CONFIG_UART_SAM0_ASYNC
50 const struct device *dma_dev;
51 uint8_t tx_dma_request;
52 uint8_t tx_dma_channel;
53 uint8_t rx_dma_request;
54 uint8_t rx_dma_channel;
55 #endif
56 const struct pinctrl_dev_config *pcfg;
57 };
58
59 /* Device run time data */
60 struct uart_sam0_dev_data {
61 struct uart_config config_cache;
62 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
63 uart_irq_callback_user_data_t cb;
64 void *cb_data;
65 uint8_t txc_cache;
66 #endif
67 #if CONFIG_UART_SAM0_ASYNC
68 const struct device *dev;
69 const struct uart_sam0_dev_cfg *cfg;
70
71 uart_callback_t async_cb;
72 void *async_cb_data;
73
74 struct k_work_delayable tx_timeout_work;
75 const uint8_t *tx_buf;
76 size_t tx_len;
77
78 struct k_work_delayable rx_timeout_work;
79 size_t rx_timeout_time;
80 size_t rx_timeout_chunk;
81 uint32_t rx_timeout_start;
82 uint8_t *rx_buf;
83 size_t rx_len;
84 size_t rx_processed_len;
85 uint8_t *rx_next_buf;
86 size_t rx_next_len;
87 bool rx_waiting_for_irq;
88 bool rx_timeout_from_isr;
89 #endif
90 };
91
wait_synchronization(SercomUsart * const usart)92 static void wait_synchronization(SercomUsart *const usart)
93 {
94 #if defined(SERCOM_USART_SYNCBUSY_MASK)
95 /* SYNCBUSY is a register */
96 while ((usart->SYNCBUSY.reg & SERCOM_USART_SYNCBUSY_MASK) != 0) {
97 }
98 #elif defined(SERCOM_USART_STATUS_SYNCBUSY)
99 /* SYNCBUSY is a bit */
100 while ((usart->STATUS.reg & SERCOM_USART_STATUS_SYNCBUSY) != 0) {
101 }
102 #else
103 #error Unsupported device
104 #endif
105 }
106
uart_sam0_set_baudrate(SercomUsart * const usart,uint32_t baudrate,uint32_t clk_freq_hz)107 static int uart_sam0_set_baudrate(SercomUsart *const usart, uint32_t baudrate,
108 uint32_t clk_freq_hz)
109 {
110 uint64_t tmp;
111 uint16_t baud;
112
113 tmp = (uint64_t)baudrate << 20;
114 tmp = (tmp + (clk_freq_hz >> 1)) / clk_freq_hz;
115
116 /* Verify that the calculated result is within range */
117 if (tmp < 1 || tmp > UINT16_MAX) {
118 return -ERANGE;
119 }
120
121 baud = 65536 - (uint16_t)tmp;
122 usart->BAUD.reg = baud;
123 wait_synchronization(usart);
124
125 return 0;
126 }
127
128
129 #if CONFIG_UART_SAM0_ASYNC
130
uart_sam0_dma_tx_done(const struct device * dma_dev,void * arg,uint32_t id,int error_code)131 static void uart_sam0_dma_tx_done(const struct device *dma_dev, void *arg,
132 uint32_t id, int error_code)
133 {
134 ARG_UNUSED(dma_dev);
135 ARG_UNUSED(id);
136 ARG_UNUSED(error_code);
137
138 struct uart_sam0_dev_data *const dev_data =
139 (struct uart_sam0_dev_data *const) arg;
140 const struct uart_sam0_dev_cfg *const cfg = dev_data->cfg;
141
142 SercomUsart * const regs = cfg->regs;
143
144 regs->INTENSET.reg = SERCOM_USART_INTENSET_TXC;
145 }
146
uart_sam0_tx_halt(struct uart_sam0_dev_data * dev_data)147 static int uart_sam0_tx_halt(struct uart_sam0_dev_data *dev_data)
148 {
149 const struct uart_sam0_dev_cfg *const cfg = dev_data->cfg;
150 unsigned int key = irq_lock();
151 size_t tx_active = dev_data->tx_len;
152 struct dma_status st;
153
154 struct uart_event evt = {
155 .type = UART_TX_ABORTED,
156 .data.tx = {
157 .buf = dev_data->tx_buf,
158 .len = 0U,
159 },
160 };
161
162 dev_data->tx_buf = NULL;
163 dev_data->tx_len = 0U;
164
165 dma_stop(cfg->dma_dev, cfg->tx_dma_channel);
166
167 irq_unlock(key);
168
169 if (dma_get_status(cfg->dma_dev, cfg->tx_dma_channel, &st) == 0) {
170 evt.data.tx.len = tx_active - st.pending_length;
171 }
172
173 if (tx_active) {
174 if (dev_data->async_cb) {
175 dev_data->async_cb(dev_data->dev,
176 &evt, dev_data->async_cb_data);
177 }
178 } else {
179 return -EINVAL;
180 }
181
182 return 0;
183 }
184
uart_sam0_tx_timeout(struct k_work * work)185 static void uart_sam0_tx_timeout(struct k_work *work)
186 {
187 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
188 struct uart_sam0_dev_data *dev_data = CONTAINER_OF(dwork,
189 struct uart_sam0_dev_data, tx_timeout_work);
190
191 uart_sam0_tx_halt(dev_data);
192 }
193
uart_sam0_notify_rx_processed(struct uart_sam0_dev_data * dev_data,size_t processed)194 static void uart_sam0_notify_rx_processed(struct uart_sam0_dev_data *dev_data,
195 size_t processed)
196 {
197 if (!dev_data->async_cb) {
198 return;
199 }
200
201 if (dev_data->rx_processed_len == processed) {
202 return;
203 }
204
205 struct uart_event evt = {
206 .type = UART_RX_RDY,
207 .data.rx = {
208 .buf = dev_data->rx_buf,
209 .offset = dev_data->rx_processed_len,
210 .len = processed - dev_data->rx_processed_len,
211 },
212 };
213
214 dev_data->rx_processed_len = processed;
215
216 dev_data->async_cb(dev_data->dev,
217 &evt, dev_data->async_cb_data);
218 }
219
uart_sam0_dma_rx_done(const struct device * dma_dev,void * arg,uint32_t id,int error_code)220 static void uart_sam0_dma_rx_done(const struct device *dma_dev, void *arg,
221 uint32_t id, int error_code)
222 {
223 ARG_UNUSED(dma_dev);
224 ARG_UNUSED(id);
225 ARG_UNUSED(error_code);
226
227 struct uart_sam0_dev_data *const dev_data =
228 (struct uart_sam0_dev_data *const)arg;
229 const struct device *dev = dev_data->dev;
230 const struct uart_sam0_dev_cfg *const cfg = dev_data->cfg;
231 SercomUsart * const regs = cfg->regs;
232 unsigned int key = irq_lock();
233
234 if (dev_data->rx_len == 0U) {
235 irq_unlock(key);
236 return;
237 }
238
239 uart_sam0_notify_rx_processed(dev_data, dev_data->rx_len);
240
241 if (dev_data->async_cb) {
242 struct uart_event evt = {
243 .type = UART_RX_BUF_RELEASED,
244 .data.rx_buf = {
245 .buf = dev_data->rx_buf,
246 },
247 };
248
249 dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
250 }
251
252 /* No next buffer, so end the transfer */
253 if (!dev_data->rx_next_len) {
254 dev_data->rx_buf = NULL;
255 dev_data->rx_len = 0U;
256
257 if (dev_data->async_cb) {
258 struct uart_event evt = {
259 .type = UART_RX_DISABLED,
260 };
261
262 dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
263 }
264
265 irq_unlock(key);
266 return;
267 }
268
269 dev_data->rx_buf = dev_data->rx_next_buf;
270 dev_data->rx_len = dev_data->rx_next_len;
271 dev_data->rx_next_buf = NULL;
272 dev_data->rx_next_len = 0U;
273 dev_data->rx_processed_len = 0U;
274
275 dma_reload(cfg->dma_dev, cfg->rx_dma_channel,
276 (uint32_t)(&(regs->DATA.reg)),
277 (uint32_t)dev_data->rx_buf, dev_data->rx_len);
278
279 /*
280 * If there should be a timeout, handle starting the DMA in the
281 * ISR, since reception resets it and DMA completion implies
282 * reception. This also catches the case of DMA completion during
283 * timeout handling.
284 */
285 if (dev_data->rx_timeout_time != SYS_FOREVER_US) {
286 dev_data->rx_waiting_for_irq = true;
287 regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
288 irq_unlock(key);
289 return;
290 }
291
292 /* Otherwise, start the transfer immediately. */
293 dma_start(cfg->dma_dev, cfg->rx_dma_channel);
294
295 struct uart_event evt = {
296 .type = UART_RX_BUF_REQUEST,
297 };
298
299 dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
300
301 irq_unlock(key);
302 }
303
uart_sam0_rx_timeout(struct k_work * work)304 static void uart_sam0_rx_timeout(struct k_work *work)
305 {
306 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
307 struct uart_sam0_dev_data *dev_data = CONTAINER_OF(dwork,
308 struct uart_sam0_dev_data, rx_timeout_work);
309 const struct uart_sam0_dev_cfg *const cfg = dev_data->cfg;
310 SercomUsart * const regs = cfg->regs;
311 struct dma_status st;
312 unsigned int key = irq_lock();
313
314 if (dev_data->rx_len == 0U) {
315 irq_unlock(key);
316 return;
317 }
318
319 /*
320 * Stop the DMA transfer and restart the interrupt read
321 * component (so the timeout restarts if there's still data).
322 * However, just ignore it if the transfer has completed (nothing
323 * pending) that means the DMA ISR is already pending, so just let
324 * it handle things instead when we re-enable IRQs.
325 */
326 dma_stop(cfg->dma_dev, cfg->rx_dma_channel);
327 if (dma_get_status(cfg->dma_dev, cfg->rx_dma_channel,
328 &st) == 0 && st.pending_length == 0U) {
329 irq_unlock(key);
330 return;
331 }
332
333 uint8_t *rx_dma_start = dev_data->rx_buf + dev_data->rx_len -
334 st.pending_length;
335 size_t rx_processed = rx_dma_start - dev_data->rx_buf;
336
337 /*
338 * We know we still have space, since the above will catch the
339 * empty buffer, so always restart the transfer.
340 */
341 dma_reload(cfg->dma_dev, cfg->rx_dma_channel,
342 (uint32_t)(&(regs->DATA.reg)),
343 (uint32_t)rx_dma_start,
344 dev_data->rx_len - rx_processed);
345
346 dev_data->rx_waiting_for_irq = true;
347 regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
348
349 /*
350 * Never do a notify on a timeout started from the ISR: timing
351 * granularity means the first timeout can be in the middle
352 * of reception but still have the total elapsed time exhausted.
353 * So we require a timeout chunk with no data seen at all
354 * (i.e. no ISR entry).
355 */
356 if (dev_data->rx_timeout_from_isr) {
357 dev_data->rx_timeout_from_isr = false;
358 k_work_reschedule(&dev_data->rx_timeout_work,
359 K_USEC(dev_data->rx_timeout_chunk));
360 irq_unlock(key);
361 return;
362 }
363
364 uint32_t now = k_uptime_get_32();
365 uint32_t elapsed = now - dev_data->rx_timeout_start;
366
367 if (elapsed >= dev_data->rx_timeout_time) {
368 /*
369 * No time left, so call the handler, and let the ISR
370 * restart the timeout when it sees data.
371 */
372 uart_sam0_notify_rx_processed(dev_data, rx_processed);
373 } else {
374 /*
375 * Still have time left, so start another timeout.
376 */
377 uint32_t remaining = MIN(dev_data->rx_timeout_time - elapsed,
378 dev_data->rx_timeout_chunk);
379
380 k_work_reschedule(&dev_data->rx_timeout_work,
381 K_USEC(remaining));
382 }
383
384 irq_unlock(key);
385 }
386
387 #endif
388
389 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
uart_sam0_configure(const struct device * dev,const struct uart_config * new_cfg)390 static int uart_sam0_configure(const struct device *dev,
391 const struct uart_config *new_cfg)
392 {
393 int retval;
394
395 const struct uart_sam0_dev_cfg *const cfg = dev->config;
396 struct uart_sam0_dev_data *const dev_data = dev->data;
397 SercomUsart * const usart = cfg->regs;
398
399 wait_synchronization(usart);
400
401 usart->CTRLA.bit.ENABLE = 0;
402 wait_synchronization(usart);
403
404 if (new_cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) {
405 /* Flow control not yet supported though in principle possible
406 * on this soc family.
407 */
408 return -ENOTSUP;
409 }
410
411 dev_data->config_cache.flow_ctrl = new_cfg->flow_ctrl;
412
413 SERCOM_USART_CTRLA_Type CTRLA_temp = usart->CTRLA;
414 SERCOM_USART_CTRLB_Type CTRLB_temp = usart->CTRLB;
415
416 switch (new_cfg->parity) {
417 case UART_CFG_PARITY_NONE:
418 CTRLA_temp.bit.FORM = 0x0;
419 break;
420 case UART_CFG_PARITY_ODD:
421 CTRLA_temp.bit.FORM = 0x1;
422 CTRLB_temp.bit.PMODE = 1;
423 break;
424 case UART_CFG_PARITY_EVEN:
425 CTRLA_temp.bit.FORM = 0x1;
426 CTRLB_temp.bit.PMODE = 0;
427 break;
428 default:
429 return -ENOTSUP;
430 }
431
432 dev_data->config_cache.parity = new_cfg->parity;
433
434 switch (new_cfg->stop_bits) {
435 case UART_CFG_STOP_BITS_1:
436 CTRLB_temp.bit.SBMODE = 0;
437 break;
438 case UART_CFG_STOP_BITS_2:
439 CTRLB_temp.bit.SBMODE = 1;
440 break;
441 default:
442 return -ENOTSUP;
443 }
444
445 dev_data->config_cache.stop_bits = new_cfg->stop_bits;
446
447 switch (new_cfg->data_bits) {
448 case UART_CFG_DATA_BITS_5:
449 CTRLB_temp.bit.CHSIZE = 0x5;
450 break;
451 case UART_CFG_DATA_BITS_6:
452 CTRLB_temp.bit.CHSIZE = 0x6;
453 break;
454 case UART_CFG_DATA_BITS_7:
455 CTRLB_temp.bit.CHSIZE = 0x7;
456 break;
457 case UART_CFG_DATA_BITS_8:
458 CTRLB_temp.bit.CHSIZE = 0x0;
459 break;
460 case UART_CFG_DATA_BITS_9:
461 CTRLB_temp.bit.CHSIZE = 0x1;
462 break;
463 default:
464 return -ENOTSUP;
465 }
466
467 dev_data->config_cache.data_bits = new_cfg->data_bits;
468
469 #if defined(SERCOM_REV500)
470 CTRLB_temp.bit.COLDEN = cfg->pads;
471 #endif
472
473 usart->CTRLA = CTRLA_temp;
474 wait_synchronization(usart);
475
476 usart->CTRLB = CTRLB_temp;
477 wait_synchronization(usart);
478
479 retval = uart_sam0_set_baudrate(usart, new_cfg->baudrate,
480 SOC_ATMEL_SAM0_GCLK0_FREQ_HZ);
481 if (retval != 0) {
482 return retval;
483 }
484
485 dev_data->config_cache.baudrate = new_cfg->baudrate;
486
487 usart->CTRLA.bit.ENABLE = 1;
488 wait_synchronization(usart);
489
490 return 0;
491 }
492
uart_sam0_config_get(const struct device * dev,struct uart_config * out_cfg)493 static int uart_sam0_config_get(const struct device *dev,
494 struct uart_config *out_cfg)
495 {
496 struct uart_sam0_dev_data *const dev_data = dev->data;
497
498 memcpy(out_cfg, &(dev_data->config_cache),
499 sizeof(dev_data->config_cache));
500
501 return 0;
502 }
503 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
504
uart_sam0_init(const struct device * dev)505 static int uart_sam0_init(const struct device *dev)
506 {
507 int retval;
508 const struct uart_sam0_dev_cfg *const cfg = dev->config;
509 struct uart_sam0_dev_data *const dev_data = dev->data;
510
511 SercomUsart * const usart = cfg->regs;
512
513 #ifdef MCLK
514 /* Enable the GCLK */
515 GCLK->PCHCTRL[cfg->gclk_core_id].reg = GCLK_PCHCTRL_GEN_GCLK0 |
516 GCLK_PCHCTRL_CHEN;
517
518 /* Enable SERCOM clock in MCLK */
519 *cfg->mclk |= cfg->mclk_mask;
520 #else
521 /* Enable the GCLK */
522 GCLK->CLKCTRL.reg = cfg->gclk_clkctrl_id | GCLK_CLKCTRL_GEN_GCLK0 |
523 GCLK_CLKCTRL_CLKEN;
524
525 /* Enable SERCOM clock in PM */
526 PM->APBCMASK.reg |= cfg->pm_apbcmask;
527 #endif
528
529 /* Disable all USART interrupts */
530 usart->INTENCLR.reg = SERCOM_USART_INTENCLR_MASK;
531 wait_synchronization(usart);
532
533 /* 8 bits of data, no parity, 1 stop bit in normal mode */
534 usart->CTRLA.reg =
535 cfg->pads
536 /* Internal clock */
537 | SERCOM_USART_CTRLA_MODE_USART_INT_CLK
538 #if defined(SERCOM_USART_CTRLA_SAMPR)
539 /* 16x oversampling with arithmetic baud rate generation */
540 | SERCOM_USART_CTRLA_SAMPR(0)
541 #endif
542 | SERCOM_USART_CTRLA_FORM(0) |
543 SERCOM_USART_CTRLA_CPOL | SERCOM_USART_CTRLA_DORD;
544 wait_synchronization(usart);
545
546 /* Enable PINMUX based on PINCTRL */
547 retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
548 if (retval < 0) {
549 return retval;
550 }
551
552 dev_data->config_cache.flow_ctrl = UART_CFG_FLOW_CTRL_NONE;
553 dev_data->config_cache.parity = UART_CFG_PARITY_NONE;
554 dev_data->config_cache.stop_bits = UART_CFG_STOP_BITS_1;
555 dev_data->config_cache.data_bits = UART_CFG_DATA_BITS_8;
556
557 /* Enable receiver and transmitter */
558 usart->CTRLB.reg = SERCOM_USART_CTRLB_CHSIZE(0) |
559 SERCOM_USART_CTRLB_RXEN | SERCOM_USART_CTRLB_TXEN;
560 wait_synchronization(usart);
561
562 retval = uart_sam0_set_baudrate(usart, cfg->baudrate,
563 SOC_ATMEL_SAM0_GCLK0_FREQ_HZ);
564 if (retval != 0) {
565 return retval;
566 }
567 dev_data->config_cache.baudrate = cfg->baudrate;
568
569 #if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_SAM0_ASYNC
570 cfg->irq_config_func(dev);
571 #endif
572
573 #ifdef CONFIG_UART_SAM0_ASYNC
574 dev_data->dev = dev;
575 dev_data->cfg = cfg;
576 if (!device_is_ready(cfg->dma_dev)) {
577 return -ENODEV;
578 }
579
580 k_work_init_delayable(&dev_data->tx_timeout_work, uart_sam0_tx_timeout);
581 k_work_init_delayable(&dev_data->rx_timeout_work, uart_sam0_rx_timeout);
582
583 if (cfg->tx_dma_channel != 0xFFU) {
584 struct dma_config dma_cfg = { 0 };
585 struct dma_block_config dma_blk = { 0 };
586
587 dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
588 dma_cfg.source_data_size = 1;
589 dma_cfg.dest_data_size = 1;
590 dma_cfg.user_data = dev_data;
591 dma_cfg.dma_callback = uart_sam0_dma_tx_done;
592 dma_cfg.block_count = 1;
593 dma_cfg.head_block = &dma_blk;
594 dma_cfg.dma_slot = cfg->tx_dma_request;
595
596 dma_blk.block_size = 1;
597 dma_blk.dest_address = (uint32_t)(&(usart->DATA.reg));
598 dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
599
600 retval = dma_config(cfg->dma_dev, cfg->tx_dma_channel,
601 &dma_cfg);
602 if (retval != 0) {
603 return retval;
604 }
605 }
606
607 if (cfg->rx_dma_channel != 0xFFU) {
608 struct dma_config dma_cfg = { 0 };
609 struct dma_block_config dma_blk = { 0 };
610
611 dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
612 dma_cfg.source_data_size = 1;
613 dma_cfg.dest_data_size = 1;
614 dma_cfg.user_data = dev_data;
615 dma_cfg.dma_callback = uart_sam0_dma_rx_done;
616 dma_cfg.block_count = 1;
617 dma_cfg.head_block = &dma_blk;
618 dma_cfg.dma_slot = cfg->rx_dma_request;
619
620 dma_blk.block_size = 1;
621 dma_blk.source_address = (uint32_t)(&(usart->DATA.reg));
622 dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
623
624 retval = dma_config(cfg->dma_dev, cfg->rx_dma_channel,
625 &dma_cfg);
626 if (retval != 0) {
627 return retval;
628 }
629 }
630
631 #endif
632
633 usart->CTRLA.bit.ENABLE = 1;
634 wait_synchronization(usart);
635
636 return 0;
637 }
638
uart_sam0_poll_in(const struct device * dev,unsigned char * c)639 static int uart_sam0_poll_in(const struct device *dev, unsigned char *c)
640 {
641 const struct uart_sam0_dev_cfg *config = dev->config;
642
643 SercomUsart * const usart = config->regs;
644
645 if (!usart->INTFLAG.bit.RXC) {
646 return -EBUSY;
647 }
648
649 *c = (unsigned char)usart->DATA.reg;
650 return 0;
651 }
652
uart_sam0_poll_out(const struct device * dev,unsigned char c)653 static void uart_sam0_poll_out(const struct device *dev, unsigned char c)
654 {
655 const struct uart_sam0_dev_cfg *config = dev->config;
656
657 SercomUsart * const usart = config->regs;
658
659 while (!usart->INTFLAG.bit.DRE) {
660 }
661
662 /* send a character */
663 usart->DATA.reg = c;
664 }
665
uart_sam0_err_check(const struct device * dev)666 static int uart_sam0_err_check(const struct device *dev)
667 {
668 const struct uart_sam0_dev_cfg *config = dev->config;
669
670 SercomUsart * const regs = config->regs;
671 uint32_t err = 0U;
672
673 if (regs->STATUS.reg & SERCOM_USART_STATUS_BUFOVF) {
674 err |= UART_ERROR_OVERRUN;
675 }
676
677 if (regs->STATUS.reg & SERCOM_USART_STATUS_FERR) {
678 err |= UART_ERROR_PARITY;
679 }
680
681 if (regs->STATUS.reg & SERCOM_USART_STATUS_PERR) {
682 err |= UART_ERROR_FRAMING;
683 }
684
685 #if defined(SERCOM_REV500)
686 if (regs->STATUS.reg & SERCOM_USART_STATUS_ISF) {
687 err |= UART_BREAK;
688 }
689
690 if (regs->STATUS.reg & SERCOM_USART_STATUS_COLL) {
691 err |= UART_ERROR_COLLISION;
692 }
693
694 regs->STATUS.reg |= SERCOM_USART_STATUS_BUFOVF
695 | SERCOM_USART_STATUS_FERR
696 | SERCOM_USART_STATUS_PERR
697 | SERCOM_USART_STATUS_COLL
698 | SERCOM_USART_STATUS_ISF;
699 #else
700 regs->STATUS.reg |= SERCOM_USART_STATUS_BUFOVF
701 | SERCOM_USART_STATUS_FERR
702 | SERCOM_USART_STATUS_PERR;
703 #endif
704
705 wait_synchronization(regs);
706 return err;
707 }
708
709 #if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_SAM0_ASYNC
710
uart_sam0_isr(const struct device * dev)711 static void uart_sam0_isr(const struct device *dev)
712 {
713 struct uart_sam0_dev_data *const dev_data = dev->data;
714
715 #if CONFIG_UART_INTERRUPT_DRIVEN
716 if (dev_data->cb) {
717 dev_data->cb(dev, dev_data->cb_data);
718 }
719 #endif
720
721 #if CONFIG_UART_SAM0_ASYNC
722 const struct uart_sam0_dev_cfg *const cfg = dev->config;
723 SercomUsart * const regs = cfg->regs;
724
725 if (dev_data->tx_len && regs->INTFLAG.bit.TXC) {
726 regs->INTENCLR.reg = SERCOM_USART_INTENCLR_TXC;
727
728 k_work_cancel_delayable(&dev_data->tx_timeout_work);
729
730 unsigned int key = irq_lock();
731
732 struct uart_event evt = {
733 .type = UART_TX_DONE,
734 .data.tx = {
735 .buf = dev_data->tx_buf,
736 .len = dev_data->tx_len,
737 },
738 };
739
740 dev_data->tx_buf = NULL;
741 dev_data->tx_len = 0U;
742
743 if (evt.data.tx.len != 0U && dev_data->async_cb) {
744 dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
745 }
746
747 irq_unlock(key);
748 }
749
750 if (dev_data->rx_len && regs->INTFLAG.bit.RXC &&
751 dev_data->rx_waiting_for_irq) {
752 dev_data->rx_waiting_for_irq = false;
753 regs->INTENCLR.reg = SERCOM_USART_INTENCLR_RXC;
754
755 /* Receive started, so request the next buffer */
756 if (dev_data->rx_next_len == 0U && dev_data->async_cb) {
757 struct uart_event evt = {
758 .type = UART_RX_BUF_REQUEST,
759 };
760
761 dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
762 }
763
764 /*
765 * If we have a timeout, restart the time remaining whenever
766 * we see data.
767 */
768 if (dev_data->rx_timeout_time != SYS_FOREVER_US) {
769 dev_data->rx_timeout_from_isr = true;
770 dev_data->rx_timeout_start = k_uptime_get_32();
771 k_work_reschedule(&dev_data->rx_timeout_work,
772 K_USEC(dev_data->rx_timeout_chunk));
773 }
774
775 /* DMA will read the currently ready byte out */
776 dma_start(cfg->dma_dev, cfg->rx_dma_channel);
777 }
778 #endif
779 }
780
781 #endif
782
783 #if CONFIG_UART_INTERRUPT_DRIVEN
784
uart_sam0_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)785 static int uart_sam0_fifo_fill(const struct device *dev,
786 const uint8_t *tx_data, int len)
787 {
788 const struct uart_sam0_dev_cfg *config = dev->config;
789 SercomUsart *regs = config->regs;
790
791 if (regs->INTFLAG.bit.DRE && len >= 1) {
792 regs->DATA.reg = tx_data[0];
793 return 1;
794 } else {
795 return 0;
796 }
797 }
798
uart_sam0_irq_tx_enable(const struct device * dev)799 static void uart_sam0_irq_tx_enable(const struct device *dev)
800 {
801 const struct uart_sam0_dev_cfg *config = dev->config;
802 SercomUsart * const regs = config->regs;
803
804 regs->INTENSET.reg = SERCOM_USART_INTENSET_DRE
805 | SERCOM_USART_INTENSET_TXC;
806 }
807
uart_sam0_irq_tx_disable(const struct device * dev)808 static void uart_sam0_irq_tx_disable(const struct device *dev)
809 {
810 const struct uart_sam0_dev_cfg *config = dev->config;
811 SercomUsart * const regs = config->regs;
812
813 regs->INTENCLR.reg = SERCOM_USART_INTENCLR_DRE
814 | SERCOM_USART_INTENCLR_TXC;
815 }
816
uart_sam0_irq_tx_ready(const struct device * dev)817 static int uart_sam0_irq_tx_ready(const struct device *dev)
818 {
819 const struct uart_sam0_dev_cfg *config = dev->config;
820 SercomUsart * const regs = config->regs;
821
822 return (regs->INTFLAG.bit.DRE != 0) && (regs->INTENSET.bit.DRE != 0);
823 }
824
uart_sam0_irq_tx_complete(const struct device * dev)825 static int uart_sam0_irq_tx_complete(const struct device *dev)
826 {
827 const struct uart_sam0_dev_cfg *config = dev->config;
828 struct uart_sam0_dev_data *const dev_data = dev->data;
829 SercomUsart * const regs = config->regs;
830
831 return (dev_data->txc_cache != 0) && (regs->INTENSET.bit.TXC != 0);
832 }
833
uart_sam0_irq_rx_enable(const struct device * dev)834 static void uart_sam0_irq_rx_enable(const struct device *dev)
835 {
836 const struct uart_sam0_dev_cfg *config = dev->config;
837 SercomUsart * const regs = config->regs;
838
839 regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
840 }
841
uart_sam0_irq_rx_disable(const struct device * dev)842 static void uart_sam0_irq_rx_disable(const struct device *dev)
843 {
844 const struct uart_sam0_dev_cfg *config = dev->config;
845 SercomUsart * const regs = config->regs;
846
847 regs->INTENCLR.reg = SERCOM_USART_INTENCLR_RXC;
848 }
849
uart_sam0_irq_rx_ready(const struct device * dev)850 static int uart_sam0_irq_rx_ready(const struct device *dev)
851 {
852 const struct uart_sam0_dev_cfg *config = dev->config;
853 SercomUsart * const regs = config->regs;
854
855 return regs->INTFLAG.bit.RXC != 0;
856 }
857
uart_sam0_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)858 static int uart_sam0_fifo_read(const struct device *dev, uint8_t *rx_data,
859 const int size)
860 {
861 const struct uart_sam0_dev_cfg *config = dev->config;
862 SercomUsart * const regs = config->regs;
863
864 if (regs->INTFLAG.bit.RXC) {
865 uint8_t ch = regs->DATA.reg;
866
867 if (size >= 1) {
868 *rx_data = ch;
869 return 1;
870 } else {
871 return -EINVAL;
872 }
873 }
874 return 0;
875 }
876
uart_sam0_irq_is_pending(const struct device * dev)877 static int uart_sam0_irq_is_pending(const struct device *dev)
878 {
879 const struct uart_sam0_dev_cfg *config = dev->config;
880 SercomUsart * const regs = config->regs;
881
882 return (regs->INTENSET.reg & regs->INTFLAG.reg) != 0;
883 }
884
885 #if defined(SERCOM_REV500)
uart_sam0_irq_err_enable(const struct device * dev)886 static void uart_sam0_irq_err_enable(const struct device *dev)
887 {
888 const struct uart_sam0_dev_cfg *config = dev->config;
889 SercomUsart * const regs = config->regs;
890
891 regs->INTENSET.reg |= SERCOM_USART_INTENCLR_ERROR;
892 wait_synchronization(regs);
893 }
894
uart_sam0_irq_err_disable(const struct device * dev)895 static void uart_sam0_irq_err_disable(const struct device *dev)
896 {
897 const struct uart_sam0_dev_cfg *config = dev->config;
898 SercomUsart * const regs = config->regs;
899
900 regs->INTENCLR.reg |= SERCOM_USART_INTENSET_ERROR;
901 wait_synchronization(regs);
902 }
903 #endif
904
uart_sam0_irq_update(const struct device * dev)905 static int uart_sam0_irq_update(const struct device *dev)
906 {
907 /* Clear sticky interrupts */
908 const struct uart_sam0_dev_cfg *config = dev->config;
909 SercomUsart * const regs = config->regs;
910
911 #if defined(SERCOM_REV500)
912 /*
913 * Cache the TXC flag, and use this cached value to clear the interrupt
914 * if we do not used the cached value, there is a chance TXC will set
915 * after caching...this will cause TXC to never cached.
916 */
917 struct uart_sam0_dev_data *const dev_data = dev->data;
918
919 dev_data->txc_cache = regs->INTFLAG.bit.TXC;
920 regs->INTFLAG.reg = SERCOM_USART_INTENCLR_ERROR
921 | SERCOM_USART_INTENCLR_RXBRK
922 | SERCOM_USART_INTENCLR_CTSIC
923 | SERCOM_USART_INTENCLR_RXS
924 | (dev_data->txc_cache << SERCOM_USART_INTENCLR_TXC_Pos);
925 #else
926 regs->INTFLAG.reg = SERCOM_USART_INTENCLR_RXS;
927 #endif
928 return 1;
929 }
930
uart_sam0_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)931 static void uart_sam0_irq_callback_set(const struct device *dev,
932 uart_irq_callback_user_data_t cb,
933 void *cb_data)
934 {
935 struct uart_sam0_dev_data *const dev_data = dev->data;
936
937 dev_data->cb = cb;
938 dev_data->cb_data = cb_data;
939
940 #if defined(CONFIG_UART_SAM0_ASYNC) && defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
941 dev_data->async_cb = NULL;
942 dev_data->async_cb_data = NULL;
943 #endif
944 }
945 #endif
946
947 #ifdef CONFIG_UART_SAM0_ASYNC
948
uart_sam0_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)949 static int uart_sam0_callback_set(const struct device *dev,
950 uart_callback_t callback,
951 void *user_data)
952 {
953 struct uart_sam0_dev_data *const dev_data = dev->data;
954
955 dev_data->async_cb = callback;
956 dev_data->async_cb_data = user_data;
957
958 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
959 dev_data->cb = NULL;
960 dev_data->cb_data = NULL;
961 #endif
962
963 return 0;
964 }
965
uart_sam0_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)966 static int uart_sam0_tx(const struct device *dev, const uint8_t *buf,
967 size_t len,
968 int32_t timeout)
969 {
970 struct uart_sam0_dev_data *const dev_data = dev->data;
971 const struct uart_sam0_dev_cfg *const cfg = dev->config;
972 SercomUsart *regs = cfg->regs;
973 int retval;
974
975 if (cfg->tx_dma_channel == 0xFFU) {
976 return -ENOTSUP;
977 }
978
979 if (len > 0xFFFFU) {
980 return -EINVAL;
981 }
982
983 unsigned int key = irq_lock();
984
985 if (dev_data->tx_len != 0U) {
986 retval = -EBUSY;
987 goto err;
988 }
989
990 dev_data->tx_buf = buf;
991 dev_data->tx_len = len;
992
993 irq_unlock(key);
994
995 retval = dma_reload(cfg->dma_dev, cfg->tx_dma_channel, (uint32_t)buf,
996 (uint32_t)(&(regs->DATA.reg)), len);
997 if (retval != 0U) {
998 return retval;
999 }
1000
1001 if (timeout != SYS_FOREVER_US) {
1002 k_work_reschedule(&dev_data->tx_timeout_work,
1003 K_USEC(timeout));
1004 }
1005
1006 return dma_start(cfg->dma_dev, cfg->tx_dma_channel);
1007 err:
1008 irq_unlock(key);
1009 return retval;
1010 }
1011
uart_sam0_tx_abort(const struct device * dev)1012 static int uart_sam0_tx_abort(const struct device *dev)
1013 {
1014 struct uart_sam0_dev_data *const dev_data = dev->data;
1015 const struct uart_sam0_dev_cfg *const cfg = dev->config;
1016
1017 if (cfg->tx_dma_channel == 0xFFU) {
1018 return -ENOTSUP;
1019 }
1020
1021 k_work_cancel_delayable(&dev_data->tx_timeout_work);
1022
1023 return uart_sam0_tx_halt(dev_data);
1024 }
1025
uart_sam0_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)1026 static int uart_sam0_rx_enable(const struct device *dev, uint8_t *buf,
1027 size_t len,
1028 int32_t timeout)
1029 {
1030 struct uart_sam0_dev_data *const dev_data = dev->data;
1031 const struct uart_sam0_dev_cfg *const cfg = dev->config;
1032 SercomUsart *regs = cfg->regs;
1033 int retval;
1034
1035 if (cfg->rx_dma_channel == 0xFFU) {
1036 return -ENOTSUP;
1037 }
1038
1039 if (len > 0xFFFFU) {
1040 return -EINVAL;
1041 }
1042
1043 unsigned int key = irq_lock();
1044
1045 if (dev_data->rx_len != 0U) {
1046 retval = -EBUSY;
1047 goto err;
1048 }
1049
1050 /* Read off anything that was already there */
1051 while (regs->INTFLAG.bit.RXC) {
1052 char discard = regs->DATA.reg;
1053
1054 (void)discard;
1055 }
1056
1057 retval = dma_reload(cfg->dma_dev, cfg->rx_dma_channel,
1058 (uint32_t)(&(regs->DATA.reg)),
1059 (uint32_t)buf, len);
1060 if (retval != 0) {
1061 return retval;
1062 }
1063
1064 dev_data->rx_buf = buf;
1065 dev_data->rx_len = len;
1066 dev_data->rx_processed_len = 0U;
1067 dev_data->rx_waiting_for_irq = true;
1068 dev_data->rx_timeout_from_isr = true;
1069 dev_data->rx_timeout_time = timeout;
1070 dev_data->rx_timeout_chunk = MAX(timeout / 4U, 1);
1071
1072 regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
1073
1074 irq_unlock(key);
1075 return 0;
1076
1077 err:
1078 irq_unlock(key);
1079 return retval;
1080 }
1081
uart_sam0_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)1082 static int uart_sam0_rx_buf_rsp(const struct device *dev, uint8_t *buf,
1083 size_t len)
1084 {
1085 if (len > 0xFFFFU) {
1086 return -EINVAL;
1087 }
1088
1089 struct uart_sam0_dev_data *const dev_data = dev->data;
1090 unsigned int key = irq_lock();
1091 int retval = 0;
1092
1093 if (dev_data->rx_len == 0U) {
1094 retval = -EACCES;
1095 goto err;
1096 }
1097
1098 if (dev_data->rx_next_len != 0U) {
1099 retval = -EBUSY;
1100 goto err;
1101 }
1102
1103 dev_data->rx_next_buf = buf;
1104 dev_data->rx_next_len = len;
1105
1106 irq_unlock(key);
1107 return 0;
1108
1109 err:
1110 irq_unlock(key);
1111 return retval;
1112 }
1113
uart_sam0_rx_disable(const struct device * dev)1114 static int uart_sam0_rx_disable(const struct device *dev)
1115 {
1116 struct uart_sam0_dev_data *const dev_data = dev->data;
1117 const struct uart_sam0_dev_cfg *const cfg = dev->config;
1118 SercomUsart * const regs = cfg->regs;
1119 struct dma_status st;
1120
1121 k_work_cancel_delayable(&dev_data->rx_timeout_work);
1122
1123 unsigned int key = irq_lock();
1124
1125 if (dev_data->rx_len == 0U) {
1126 irq_unlock(key);
1127 return -EINVAL;
1128 }
1129
1130 regs->INTENCLR.reg = SERCOM_USART_INTENCLR_RXC;
1131 dma_stop(cfg->dma_dev, cfg->rx_dma_channel);
1132
1133
1134 if (dma_get_status(cfg->dma_dev, cfg->rx_dma_channel,
1135 &st) == 0 && st.pending_length != 0U) {
1136 size_t rx_processed = dev_data->rx_len - st.pending_length;
1137
1138 uart_sam0_notify_rx_processed(dev_data, rx_processed);
1139 }
1140
1141 struct uart_event evt = {
1142 .type = UART_RX_BUF_RELEASED,
1143 .data.rx_buf = {
1144 .buf = dev_data->rx_buf,
1145 },
1146 };
1147
1148 dev_data->rx_buf = NULL;
1149 dev_data->rx_len = 0U;
1150
1151 if (dev_data->async_cb) {
1152 dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
1153 }
1154
1155 if (dev_data->rx_next_len) {
1156 struct uart_event next_evt = {
1157 .type = UART_RX_BUF_RELEASED,
1158 .data.rx_buf = {
1159 .buf = dev_data->rx_next_buf,
1160 },
1161 };
1162
1163 dev_data->rx_next_buf = NULL;
1164 dev_data->rx_next_len = 0U;
1165
1166 if (dev_data->async_cb) {
1167 dev_data->async_cb(dev, &next_evt, dev_data->async_cb_data);
1168 }
1169 }
1170
1171 evt.type = UART_RX_DISABLED;
1172 if (dev_data->async_cb) {
1173 dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
1174 }
1175
1176 irq_unlock(key);
1177
1178 return 0;
1179 }
1180
1181 #endif
1182
1183 static DEVICE_API(uart, uart_sam0_driver_api) = {
1184 .poll_in = uart_sam0_poll_in,
1185 .poll_out = uart_sam0_poll_out,
1186 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1187 .configure = uart_sam0_configure,
1188 .config_get = uart_sam0_config_get,
1189 #endif
1190 .err_check = uart_sam0_err_check,
1191 #if CONFIG_UART_INTERRUPT_DRIVEN
1192 .fifo_fill = uart_sam0_fifo_fill,
1193 .fifo_read = uart_sam0_fifo_read,
1194 .irq_tx_enable = uart_sam0_irq_tx_enable,
1195 .irq_tx_disable = uart_sam0_irq_tx_disable,
1196 .irq_tx_ready = uart_sam0_irq_tx_ready,
1197 .irq_tx_complete = uart_sam0_irq_tx_complete,
1198 .irq_rx_enable = uart_sam0_irq_rx_enable,
1199 .irq_rx_disable = uart_sam0_irq_rx_disable,
1200 .irq_rx_ready = uart_sam0_irq_rx_ready,
1201 .irq_is_pending = uart_sam0_irq_is_pending,
1202 #if defined(SERCOM_REV500)
1203 .irq_err_enable = uart_sam0_irq_err_enable,
1204 .irq_err_disable = uart_sam0_irq_err_disable,
1205 #endif
1206 .irq_update = uart_sam0_irq_update,
1207 .irq_callback_set = uart_sam0_irq_callback_set,
1208 #endif
1209 #if CONFIG_UART_SAM0_ASYNC
1210 .callback_set = uart_sam0_callback_set,
1211 .tx = uart_sam0_tx,
1212 .tx_abort = uart_sam0_tx_abort,
1213 .rx_enable = uart_sam0_rx_enable,
1214 .rx_buf_rsp = uart_sam0_rx_buf_rsp,
1215 .rx_disable = uart_sam0_rx_disable,
1216 #endif
1217 };
1218
1219 #if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_SAM0_ASYNC
1220
1221 #define SAM0_UART_IRQ_CONNECT(n, m) \
1222 do { \
1223 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, m, irq), \
1224 DT_INST_IRQ_BY_IDX(n, m, priority), \
1225 uart_sam0_isr, \
1226 DEVICE_DT_INST_GET(n), 0); \
1227 irq_enable(DT_INST_IRQ_BY_IDX(n, m, irq)); \
1228 } while (false)
1229
1230 #define UART_SAM0_IRQ_HANDLER_DECL(n) \
1231 static void uart_sam0_irq_config_##n(const struct device *dev)
1232 #define UART_SAM0_IRQ_HANDLER_FUNC(n) \
1233 .irq_config_func = uart_sam0_irq_config_##n,
1234
1235 #if DT_INST_IRQ_HAS_IDX(0, 3)
1236 #define UART_SAM0_IRQ_HANDLER(n) \
1237 static void uart_sam0_irq_config_##n(const struct device *dev) \
1238 { \
1239 SAM0_UART_IRQ_CONNECT(n, 0); \
1240 SAM0_UART_IRQ_CONNECT(n, 1); \
1241 SAM0_UART_IRQ_CONNECT(n, 2); \
1242 SAM0_UART_IRQ_CONNECT(n, 3); \
1243 }
1244 #else
1245 #define UART_SAM0_IRQ_HANDLER(n) \
1246 static void uart_sam0_irq_config_##n(const struct device *dev) \
1247 { \
1248 SAM0_UART_IRQ_CONNECT(n, 0); \
1249 }
1250 #endif
1251 #else
1252 #define UART_SAM0_IRQ_HANDLER_DECL(n)
1253 #define UART_SAM0_IRQ_HANDLER_FUNC(n)
1254 #define UART_SAM0_IRQ_HANDLER(n)
1255 #endif
1256
1257 #if CONFIG_UART_SAM0_ASYNC
1258 #define UART_SAM0_DMA_CHANNELS(n) \
1259 .dma_dev = DEVICE_DT_GET(ATMEL_SAM0_DT_INST_DMA_CTLR(n, tx)), \
1260 .tx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, tx), \
1261 .tx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, tx), \
1262 .rx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, rx), \
1263 .rx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, rx),
1264 #else
1265 #define UART_SAM0_DMA_CHANNELS(n)
1266 #endif
1267
1268 #define UART_SAM0_SERCOM_PADS(n) \
1269 (DT_INST_PROP(n, rxpo) << SERCOM_USART_CTRLA_RXPO_Pos) | \
1270 (DT_INST_PROP(n, txpo) << SERCOM_USART_CTRLA_TXPO_Pos)
1271
1272 #define UART_SAM0_SERCOM_COLLISION_DETECT(n) \
1273 (DT_INST_PROP(n, collision_detection))
1274
1275 #ifdef MCLK
1276 #define UART_SAM0_CONFIG_DEFN(n) \
1277 static const struct uart_sam0_dev_cfg uart_sam0_config_##n = { \
1278 .regs = (SercomUsart *)DT_INST_REG_ADDR(n), \
1279 .baudrate = DT_INST_PROP(n, current_speed), \
1280 .mclk = (volatile uint32_t *)MCLK_MASK_DT_INT_REG_ADDR(n), \
1281 .mclk_mask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, mclk, bit)), \
1282 .gclk_core_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, periph_ch),\
1283 .pads = UART_SAM0_SERCOM_PADS(n), \
1284 .collision_detect = UART_SAM0_SERCOM_COLLISION_DETECT(n), \
1285 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
1286 UART_SAM0_IRQ_HANDLER_FUNC(n) \
1287 UART_SAM0_DMA_CHANNELS(n) \
1288 }
1289 #else
1290 #define UART_SAM0_CONFIG_DEFN(n) \
1291 static const struct uart_sam0_dev_cfg uart_sam0_config_##n = { \
1292 .regs = (SercomUsart *)DT_INST_REG_ADDR(n), \
1293 .baudrate = DT_INST_PROP(n, current_speed), \
1294 .pm_apbcmask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, pm, bit)), \
1295 .gclk_clkctrl_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, clkctrl_id),\
1296 .pads = UART_SAM0_SERCOM_PADS(n), \
1297 .collision_detect = UART_SAM0_SERCOM_COLLISION_DETECT(n), \
1298 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
1299 UART_SAM0_IRQ_HANDLER_FUNC(n) \
1300 UART_SAM0_DMA_CHANNELS(n) \
1301 }
1302 #endif
1303
1304 #define UART_SAM0_DEVICE_INIT(n) \
1305 PINCTRL_DT_INST_DEFINE(n); \
1306 static struct uart_sam0_dev_data uart_sam0_data_##n; \
1307 UART_SAM0_IRQ_HANDLER_DECL(n); \
1308 UART_SAM0_CONFIG_DEFN(n); \
1309 DEVICE_DT_INST_DEFINE(n, uart_sam0_init, NULL, \
1310 &uart_sam0_data_##n, \
1311 &uart_sam0_config_##n, PRE_KERNEL_1, \
1312 CONFIG_SERIAL_INIT_PRIORITY, \
1313 &uart_sam0_driver_api); \
1314 UART_SAM0_IRQ_HANDLER(n)
1315
1316 DT_INST_FOREACH_STATUS_OKAY(UART_SAM0_DEVICE_INIT)
1317