1 /*
2 * Copyright (c) 2016-2019 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @brief Driver for Nordic Semiconductor nRF5X UART
9 */
10
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/drivers/uart.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/irq.h>
15 #include <soc.h>
16 #include <hal/nrf_uart.h>
17
18 /*
19 * Extract information from devicetree.
20 *
21 * This driver only supports one instance of this IP block, so the
22 * instance number is always 0.
23 */
24 #define DT_DRV_COMPAT nordic_nrf_uart
25
26 #define PROP(prop) DT_INST_PROP(0, prop)
27 #define HAS_PROP(prop) DT_INST_NODE_HAS_PROP(0, prop)
28
29 #define BAUDRATE PROP(current_speed)
30
31 #define DISABLE_RX PROP(disable_rx)
32 #define HW_FLOW_CONTROL_AVAILABLE PROP(hw_flow_control)
33
34 #define IRQN DT_INST_IRQN(0)
35 #define IRQ_PRIO DT_INST_IRQ(0, priority)
36
37 static NRF_UART_Type *const uart0_addr = (NRF_UART_Type *)DT_INST_REG_ADDR(0);
38
39 struct uart_nrfx_config {
40 const struct pinctrl_dev_config *pcfg;
41 };
42
43 /* Device data structure */
44 struct uart_nrfx_data {
45 struct uart_config uart_config;
46 };
47
48 #ifdef CONFIG_UART_0_ASYNC
49 static struct {
50 uart_callback_t callback;
51 void *user_data;
52
53 uint8_t *rx_buffer;
54 uint8_t *rx_secondary_buffer;
55 size_t rx_buffer_length;
56 size_t rx_secondary_buffer_length;
57 volatile size_t rx_counter;
58 volatile size_t rx_offset;
59 int32_t rx_timeout;
60 struct k_timer rx_timeout_timer;
61 bool rx_enabled;
62
63 bool tx_abort;
64 const uint8_t *volatile tx_buffer;
65 /* note: this is aliased with atomic_t in uart_nrfx_poll_out() */
66 unsigned long tx_buffer_length;
67 volatile size_t tx_counter;
68 #if HW_FLOW_CONTROL_AVAILABLE
69 int32_t tx_timeout;
70 struct k_timer tx_timeout_timer;
71 #endif
72 } uart0_cb;
73 #endif /* CONFIG_UART_0_ASYNC */
74
75 #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
76
77 static uart_irq_callback_user_data_t irq_callback; /**< Callback function pointer */
78 static void *irq_cb_data; /**< Callback function arg */
79
80 /* Variable used to override the state of the TXDRDY event in the initial state
81 * of the driver. This event is not set by the hardware until a first byte is
82 * sent, and we want to use it as an indication if the transmitter is ready
83 * to accept a new byte.
84 */
85 static volatile uint8_t uart_sw_event_txdrdy;
86 static volatile bool disable_tx_irq;
87
88 #endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */
89
event_txdrdy_check(void)90 static bool event_txdrdy_check(void)
91 {
92 return (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY)
93 #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
94 || uart_sw_event_txdrdy
95 #endif
96 );
97 }
98
event_txdrdy_clear(void)99 static void event_txdrdy_clear(void)
100 {
101 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
102 #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
103 uart_sw_event_txdrdy = 0U;
104 #endif
105 }
106
107
108 /**
109 * @brief Set the baud rate
110 *
111 * This routine set the given baud rate for the UART.
112 *
113 * @param dev UART device struct
114 * @param baudrate Baud rate
115 *
116 * @retval 0 on success.
117 * @retval -EINVAL for invalid baudrate.
118 */
119
baudrate_set(const struct device * dev,uint32_t baudrate)120 static int baudrate_set(const struct device *dev, uint32_t baudrate)
121 {
122 nrf_uart_baudrate_t nrf_baudrate; /* calculated baudrate divisor */
123
124 switch (baudrate) {
125 case 300:
126 /* value not supported by Nordic HAL */
127 nrf_baudrate = 0x00014000;
128 break;
129 case 600:
130 /* value not supported by Nordic HAL */
131 nrf_baudrate = 0x00027000;
132 break;
133 case 1200:
134 nrf_baudrate = NRF_UART_BAUDRATE_1200;
135 break;
136 case 2400:
137 nrf_baudrate = NRF_UART_BAUDRATE_2400;
138 break;
139 case 4800:
140 nrf_baudrate = NRF_UART_BAUDRATE_4800;
141 break;
142 case 9600:
143 nrf_baudrate = NRF_UART_BAUDRATE_9600;
144 break;
145 case 14400:
146 nrf_baudrate = NRF_UART_BAUDRATE_14400;
147 break;
148 case 19200:
149 nrf_baudrate = NRF_UART_BAUDRATE_19200;
150 break;
151 case 28800:
152 nrf_baudrate = NRF_UART_BAUDRATE_28800;
153 break;
154 #if defined(UART_BAUDRATE_BAUDRATE_Baud31250)
155 case 31250:
156 nrf_baudrate = NRF_UART_BAUDRATE_31250;
157 break;
158 #endif
159 case 38400:
160 nrf_baudrate = NRF_UART_BAUDRATE_38400;
161 break;
162 #if defined(UART_BAUDRATE_BAUDRATE_Baud56000)
163 case 56000:
164 nrf_baudrate = NRF_UART_BAUDRATE_56000;
165 break;
166 #endif
167 case 57600:
168 nrf_baudrate = NRF_UART_BAUDRATE_57600;
169 break;
170 case 76800:
171 nrf_baudrate = NRF_UART_BAUDRATE_76800;
172 break;
173 case 115200:
174 nrf_baudrate = NRF_UART_BAUDRATE_115200;
175 break;
176 case 230400:
177 nrf_baudrate = NRF_UART_BAUDRATE_230400;
178 break;
179 case 250000:
180 nrf_baudrate = NRF_UART_BAUDRATE_250000;
181 break;
182 case 460800:
183 nrf_baudrate = NRF_UART_BAUDRATE_460800;
184 break;
185 case 921600:
186 nrf_baudrate = NRF_UART_BAUDRATE_921600;
187 break;
188 case 1000000:
189 nrf_baudrate = NRF_UART_BAUDRATE_1000000;
190 break;
191 default:
192 return -EINVAL;
193 }
194
195 nrf_uart_baudrate_set(uart0_addr, nrf_baudrate);
196
197 return 0;
198 }
199
200 /**
201 * @brief Poll the device for input.
202 *
203 * @param dev UART device struct
204 * @param c Pointer to character
205 *
206 * @return 0 if a character arrived, -1 if the input buffer if empty.
207 */
208
uart_nrfx_poll_in(const struct device * dev,unsigned char * c)209 static int uart_nrfx_poll_in(const struct device *dev, unsigned char *c)
210 {
211 if (!nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) {
212 return -1;
213 }
214
215 /* Clear the interrupt */
216 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
217
218 /* got a character */
219 *c = nrf_uart_rxd_get(uart0_addr);
220
221 return 0;
222 }
223
224 #ifdef CONFIG_UART_0_ASYNC
225 static void uart_nrfx_isr(const struct device *dev);
226 #endif
227
228 /**
229 * @brief Output a character in polled mode.
230 *
231 * @param dev UART device struct
232 * @param c Character to send
233 */
uart_nrfx_poll_out(const struct device * dev,unsigned char c)234 static void uart_nrfx_poll_out(const struct device *dev, unsigned char c)
235 {
236 atomic_t *lock;
237 #ifdef CONFIG_UART_0_ASYNC
238 while (uart0_cb.tx_buffer) {
239 /* If there is ongoing asynchronous transmission, and we are in
240 * ISR, then call uart interrupt routine, otherwise
241 * busy wait until transmission is finished.
242 */
243 if (k_is_in_isr()) {
244 uart_nrfx_isr(dev);
245 }
246 }
247 /* Use tx_buffer_length as lock, this way uart_nrfx_tx will
248 * return -EBUSY during poll_out.
249 */
250 lock = &uart0_cb.tx_buffer_length;
251 #else
252 static atomic_val_t poll_out_lock;
253
254 lock = &poll_out_lock;
255 #endif
256
257 if (!k_is_in_isr()) {
258 uint8_t safety_cnt = 100;
259
260 while (atomic_cas((atomic_t *) lock,
261 (atomic_val_t) 0,
262 (atomic_val_t) 1) == false) {
263 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
264 /* k_sleep allows other threads to execute and finish
265 * their transactions.
266 */
267 k_msleep(1);
268 } else {
269 k_busy_wait(1000);
270 }
271 if (--safety_cnt == 0) {
272 break;
273 }
274 }
275 } else {
276 *lock = 1;
277 }
278 /* Reset the transmitter ready state. */
279 event_txdrdy_clear();
280
281 /* Activate the transmitter. */
282 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX);
283
284 /* Send the provided character. */
285 nrf_uart_txd_set(uart0_addr, (uint8_t)c);
286
287 /* Wait until the transmitter is ready, i.e. the character is sent. */
288 bool res;
289
290 NRFX_WAIT_FOR(event_txdrdy_check(), 10000, 1, res);
291
292 /* Deactivate the transmitter so that it does not needlessly
293 * consume power.
294 */
295 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
296
297 /* Release the lock. */
298 *lock = 0;
299 }
300
301 /** Console I/O function */
uart_nrfx_err_check(const struct device * dev)302 static int uart_nrfx_err_check(const struct device *dev)
303 {
304 /* register bitfields maps to the defines in uart.h */
305 return nrf_uart_errorsrc_get_and_clear(uart0_addr);
306 }
307
uart_nrfx_configure(const struct device * dev,const struct uart_config * cfg)308 static int uart_nrfx_configure(const struct device *dev,
309 const struct uart_config *cfg)
310 {
311 struct uart_nrfx_data *data = dev->data;
312 nrf_uart_config_t uart_cfg;
313
314 #if defined(UART_CONFIG_STOP_Msk)
315 switch (cfg->stop_bits) {
316 case UART_CFG_STOP_BITS_1:
317 uart_cfg.stop = NRF_UART_STOP_ONE;
318 break;
319 case UART_CFG_STOP_BITS_2:
320 uart_cfg.stop = NRF_UART_STOP_TWO;
321 break;
322 default:
323 return -ENOTSUP;
324 }
325 #else
326 if (cfg->stop_bits != UART_CFG_STOP_BITS_1) {
327 return -ENOTSUP;
328 }
329 #endif
330
331 if (cfg->data_bits != UART_CFG_DATA_BITS_8) {
332 return -ENOTSUP;
333 }
334
335 switch (cfg->flow_ctrl) {
336 case UART_CFG_FLOW_CTRL_NONE:
337 uart_cfg.hwfc = NRF_UART_HWFC_DISABLED;
338 break;
339 case UART_CFG_FLOW_CTRL_RTS_CTS:
340 if (HW_FLOW_CONTROL_AVAILABLE) {
341 uart_cfg.hwfc = NRF_UART_HWFC_ENABLED;
342 } else {
343 return -ENOTSUP;
344 }
345 break;
346 default:
347 return -ENOTSUP;
348 }
349
350 #if defined(UART_CONFIG_PARITYTYPE_Msk)
351 uart_cfg.paritytype = NRF_UART_PARITYTYPE_EVEN;
352 #endif
353 switch (cfg->parity) {
354 case UART_CFG_PARITY_NONE:
355 uart_cfg.parity = NRF_UART_PARITY_EXCLUDED;
356 break;
357 case UART_CFG_PARITY_EVEN:
358 uart_cfg.parity = NRF_UART_PARITY_INCLUDED;
359 break;
360 #if defined(UART_CONFIG_PARITYTYPE_Msk)
361 case UART_CFG_PARITY_ODD:
362 uart_cfg.parity = NRF_UART_PARITY_INCLUDED;
363 uart_cfg.paritytype = NRF_UART_PARITYTYPE_ODD;
364 break;
365 #endif
366 default:
367 return -ENOTSUP;
368 }
369
370 if (baudrate_set(dev, cfg->baudrate) != 0) {
371 return -ENOTSUP;
372 }
373
374 nrf_uart_configure(uart0_addr, &uart_cfg);
375
376 data->uart_config = *cfg;
377
378 return 0;
379 }
380
381 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
uart_nrfx_config_get(const struct device * dev,struct uart_config * cfg)382 static int uart_nrfx_config_get(const struct device *dev,
383 struct uart_config *cfg)
384 {
385 struct uart_nrfx_data *data = dev->data;
386
387 *cfg = data->uart_config;
388 return 0;
389 }
390 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
391
392 #ifdef CONFIG_UART_0_ASYNC
393
user_callback(const struct device * dev,struct uart_event * event)394 static void user_callback(const struct device *dev, struct uart_event *event)
395 {
396 if (uart0_cb.callback) {
397 uart0_cb.callback(dev, event, uart0_cb.user_data);
398 }
399 }
400
uart_nrfx_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)401 static int uart_nrfx_callback_set(const struct device *dev,
402 uart_callback_t callback,
403 void *user_data)
404 {
405 uart0_cb.callback = callback;
406 uart0_cb.user_data = user_data;
407
408 #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) && defined(CONFIG_UART_0_INTERRUPT_DRIVEN)
409 irq_callback = NULL;
410 irq_cb_data = NULL;
411 #endif
412
413 return 0;
414 }
415
uart_nrfx_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)416 static int uart_nrfx_tx(const struct device *dev, const uint8_t *buf,
417 size_t len,
418 int32_t timeout)
419 {
420 if (atomic_cas((atomic_t *) &uart0_cb.tx_buffer_length,
421 (atomic_val_t) 0,
422 (atomic_val_t) len) == false) {
423 return -EBUSY;
424 }
425
426 uart0_cb.tx_buffer = buf;
427 #if HW_FLOW_CONTROL_AVAILABLE
428 uart0_cb.tx_timeout = timeout;
429 #endif
430 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
431 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX);
432 nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_TXDRDY);
433
434 uint8_t txd = uart0_cb.tx_buffer[uart0_cb.tx_counter];
435
436 nrf_uart_txd_set(uart0_addr, txd);
437
438 return 0;
439 }
440
uart_nrfx_tx_abort(const struct device * dev)441 static int uart_nrfx_tx_abort(const struct device *dev)
442 {
443 if (uart0_cb.tx_buffer_length == 0) {
444 return -EINVAL;
445 }
446 #if HW_FLOW_CONTROL_AVAILABLE
447 if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
448 k_timer_stop(&uart0_cb.tx_timeout_timer);
449 }
450 #endif
451 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
452
453 struct uart_event evt = {
454 .type = UART_TX_ABORTED,
455 .data.tx.buf = uart0_cb.tx_buffer,
456 .data.tx.len = uart0_cb.tx_counter
457 };
458
459 uart0_cb.tx_buffer_length = 0;
460 uart0_cb.tx_counter = 0;
461
462 user_callback(dev, &evt);
463
464 return 0;
465 }
466
uart_nrfx_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)467 static int uart_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
468 size_t len,
469 int32_t timeout)
470 {
471 if (DISABLE_RX) {
472 __ASSERT(false, "TX only UART instance");
473 return -ENOTSUP;
474 }
475
476 if (uart0_cb.rx_buffer_length != 0) {
477 return -EBUSY;
478 }
479
480 uart0_cb.rx_enabled = 1;
481 uart0_cb.rx_buffer = buf;
482 uart0_cb.rx_buffer_length = len;
483 uart0_cb.rx_counter = 0;
484 uart0_cb.rx_secondary_buffer_length = 0;
485 uart0_cb.rx_timeout = timeout;
486
487 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
488 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
489 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXTO);
490 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTRX);
491 nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_RXDRDY |
492 NRF_UART_INT_MASK_ERROR |
493 NRF_UART_INT_MASK_RXTO);
494
495 return 0;
496 }
497
uart_nrfx_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)498 static int uart_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf,
499 size_t len)
500 {
501 int err;
502 unsigned int key = irq_lock();
503
504 if (!uart0_cb.rx_enabled) {
505 err = -EACCES;
506 } else if (uart0_cb.rx_secondary_buffer_length != 0) {
507 err = -EBUSY;
508 } else {
509 uart0_cb.rx_secondary_buffer = buf;
510 uart0_cb.rx_secondary_buffer_length = len;
511 err = 0;
512 }
513
514 irq_unlock(key);
515
516 return err;
517 }
518
uart_nrfx_rx_disable(const struct device * dev)519 static int uart_nrfx_rx_disable(const struct device *dev)
520 {
521 if (uart0_cb.rx_buffer_length == 0) {
522 return -EFAULT;
523 }
524
525 uart0_cb.rx_enabled = 0;
526 if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
527 k_timer_stop(&uart0_cb.rx_timeout_timer);
528 }
529 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX);
530
531 return 0;
532 }
533
rx_rdy_evt(const struct device * dev)534 static void rx_rdy_evt(const struct device *dev)
535 {
536 struct uart_event event;
537 size_t rx_cnt = uart0_cb.rx_counter;
538
539 event.type = UART_RX_RDY;
540 event.data.rx.buf = uart0_cb.rx_buffer;
541 event.data.rx.len = rx_cnt - uart0_cb.rx_offset;
542 event.data.rx.offset = uart0_cb.rx_offset;
543
544 uart0_cb.rx_offset = rx_cnt;
545
546 user_callback(dev, &event);
547 }
548
buf_released_evt(const struct device * dev)549 static void buf_released_evt(const struct device *dev)
550 {
551 struct uart_event event = {
552 .type = UART_RX_BUF_RELEASED,
553 .data.rx_buf.buf = uart0_cb.rx_buffer
554 };
555 user_callback(dev, &event);
556 }
557
rx_disabled_evt(const struct device * dev)558 static void rx_disabled_evt(const struct device *dev)
559 {
560 struct uart_event event = {
561 .type = UART_RX_DISABLED
562 };
563 user_callback(dev, &event);
564 }
565
rx_reset_state(void)566 static void rx_reset_state(void)
567 {
568 nrf_uart_int_disable(uart0_addr,
569 NRF_UART_INT_MASK_RXDRDY |
570 NRF_UART_INT_MASK_ERROR |
571 NRF_UART_INT_MASK_RXTO);
572 uart0_cb.rx_buffer_length = 0;
573 uart0_cb.rx_enabled = 0;
574 uart0_cb.rx_counter = 0;
575 uart0_cb.rx_offset = 0;
576 uart0_cb.rx_secondary_buffer_length = 0;
577 }
578
rx_isr(const struct device * dev)579 static void rx_isr(const struct device *dev)
580 {
581 struct uart_event event;
582
583 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
584
585 if (!uart0_cb.rx_buffer_length || !uart0_cb.rx_enabled) {
586 /* Byte received when receiving is disabled - data lost. */
587 nrf_uart_rxd_get(uart0_addr);
588 } else {
589 if (uart0_cb.rx_counter == 0 &&
590 uart0_cb.rx_secondary_buffer_length == 0) {
591 event.type = UART_RX_BUF_REQUEST;
592 user_callback(dev, &event);
593 }
594 uart0_cb.rx_buffer[uart0_cb.rx_counter] =
595 nrf_uart_rxd_get(uart0_addr);
596 uart0_cb.rx_counter++;
597 if (uart0_cb.rx_timeout == 0) {
598 rx_rdy_evt(dev);
599 } else if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
600 k_timer_start(&uart0_cb.rx_timeout_timer,
601 K_USEC(uart0_cb.rx_timeout),
602 K_NO_WAIT);
603 }
604 }
605
606 if (uart0_cb.rx_buffer_length == uart0_cb.rx_counter) {
607 if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
608 k_timer_stop(&uart0_cb.rx_timeout_timer);
609 }
610 rx_rdy_evt(dev);
611
612 unsigned int key = irq_lock();
613
614 if (uart0_cb.rx_secondary_buffer_length == 0) {
615 uart0_cb.rx_enabled = 0;
616 }
617 irq_unlock(key);
618
619 if (uart0_cb.rx_secondary_buffer_length) {
620 buf_released_evt(dev);
621 /* Switch to secondary buffer. */
622 uart0_cb.rx_buffer_length =
623 uart0_cb.rx_secondary_buffer_length;
624 uart0_cb.rx_buffer = uart0_cb.rx_secondary_buffer;
625 uart0_cb.rx_secondary_buffer_length = 0;
626 uart0_cb.rx_counter = 0;
627 uart0_cb.rx_offset = 0;
628
629 event.type = UART_RX_BUF_REQUEST;
630 user_callback(dev, &event);
631 } else {
632 uart_nrfx_rx_disable(dev);
633 }
634 }
635 }
636
tx_isr(const struct device * dev)637 static void tx_isr(const struct device *dev)
638 {
639 uart0_cb.tx_counter++;
640 if (uart0_cb.tx_counter < uart0_cb.tx_buffer_length &&
641 !uart0_cb.tx_abort) {
642 #if HW_FLOW_CONTROL_AVAILABLE
643 if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
644 k_timer_start(&uart0_cb.tx_timeout_timer,
645 K_USEC(uart0_cb.tx_timeout),
646 K_NO_WAIT);
647 }
648 #endif
649 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
650
651 uint8_t txd = uart0_cb.tx_buffer[uart0_cb.tx_counter];
652
653 nrf_uart_txd_set(uart0_addr, txd);
654 } else {
655 #if HW_FLOW_CONTROL_AVAILABLE
656
657 if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
658 k_timer_stop(&uart0_cb.tx_timeout_timer);
659 }
660 #endif
661 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
662 struct uart_event event = {
663 .type = UART_TX_DONE,
664 .data.tx.buf = uart0_cb.tx_buffer,
665 .data.tx.len = uart0_cb.tx_counter
666 };
667 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
668 uart0_cb.tx_buffer_length = 0;
669 uart0_cb.tx_counter = 0;
670 uart0_cb.tx_buffer = NULL;
671
672 nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_TXDRDY);
673 user_callback(dev, &event);
674 }
675 }
676
677 #define UART_ERROR_FROM_MASK(mask) \
678 (mask & NRF_UART_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN \
679 : mask & NRF_UART_ERROR_PARITY_MASK ? UART_ERROR_PARITY \
680 : mask & NRF_UART_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING \
681 : mask & NRF_UART_ERROR_BREAK_MASK ? UART_BREAK \
682 : 0)
683
error_isr(const struct device * dev)684 static void error_isr(const struct device *dev)
685 {
686 if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
687 k_timer_stop(&uart0_cb.rx_timeout_timer);
688 }
689 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
690
691 if (!uart0_cb.rx_enabled) {
692 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX);
693 }
694 struct uart_event event = {
695 .type = UART_RX_STOPPED,
696 .data.rx_stop.reason =
697 UART_ERROR_FROM_MASK(
698 nrf_uart_errorsrc_get_and_clear(uart0_addr)),
699 .data.rx_stop.data.len = uart0_cb.rx_counter
700 - uart0_cb.rx_offset,
701 .data.rx_stop.data.offset = uart0_cb.rx_offset,
702 .data.rx_stop.data.buf = uart0_cb.rx_buffer
703 };
704
705 user_callback(dev, &event);
706 /* Abort transfer. */
707 uart_nrfx_rx_disable(dev);
708 }
709
710 /*
711 * In nRF hardware RX timeout can occur only after stopping the peripheral,
712 * it is used as a sign that peripheral has finished its operation and is
713 * disabled.
714 */
rxto_isr(const struct device * dev)715 static void rxto_isr(const struct device *dev)
716 {
717 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXTO);
718
719 /* Send rxrdy if there is any data pending. */
720 if (uart0_cb.rx_counter - uart0_cb.rx_offset) {
721 rx_rdy_evt(dev);
722 }
723
724 buf_released_evt(dev);
725 if (uart0_cb.rx_secondary_buffer_length) {
726 uart0_cb.rx_buffer = uart0_cb.rx_secondary_buffer;
727 buf_released_evt(dev);
728 }
729
730 rx_reset_state();
731 rx_disabled_evt(dev);
732 }
733
uart_nrfx_isr(const struct device * uart)734 void uart_nrfx_isr(const struct device *uart)
735 {
736 if (nrf_uart_int_enable_check(uart0_addr, NRF_UART_INT_MASK_ERROR) &&
737 nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_ERROR)) {
738 error_isr(uart);
739 } else if (nrf_uart_int_enable_check(uart0_addr,
740 NRF_UART_INT_MASK_RXDRDY) &&
741 nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) {
742 rx_isr(uart);
743 }
744
745 if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY)
746 && nrf_uart_int_enable_check(uart0_addr,
747 NRF_UART_INT_MASK_TXDRDY)) {
748 tx_isr(uart);
749 }
750
751 if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXTO)) {
752 rxto_isr(uart);
753 }
754 }
755
rx_timeout(struct k_timer * timer)756 static void rx_timeout(struct k_timer *timer)
757 {
758 rx_rdy_evt(DEVICE_DT_INST_GET(0));
759 }
760
761 #if HW_FLOW_CONTROL_AVAILABLE
tx_timeout(struct k_timer * timer)762 static void tx_timeout(struct k_timer *timer)
763 {
764 struct uart_event evt;
765
766 if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
767 k_timer_stop(&uart0_cb.tx_timeout_timer);
768 }
769 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
770 evt.type = UART_TX_ABORTED;
771 evt.data.tx.buf = uart0_cb.tx_buffer;
772 evt.data.tx.len = uart0_cb.tx_buffer_length;
773 uart0_cb.tx_buffer_length = 0;
774 uart0_cb.tx_counter = 0;
775 user_callback(DEVICE_DT_INST_GET(0), &evt);
776 }
777 #endif
778
779 #endif /* CONFIG_UART_0_ASYNC */
780
781
782 #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
783
784 /** Interrupt driven FIFO fill function */
uart_nrfx_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)785 static int uart_nrfx_fifo_fill(const struct device *dev,
786 const uint8_t *tx_data,
787 int len)
788 {
789 int num_tx = 0U;
790
791 while ((len - num_tx > 0) &&
792 event_txdrdy_check()) {
793
794 /* Clear the interrupt */
795 event_txdrdy_clear();
796
797 /* Send a character */
798 nrf_uart_txd_set(uart0_addr, (uint8_t)tx_data[num_tx++]);
799 }
800
801 return (int)num_tx;
802 }
803
804 /** Interrupt driven FIFO read function */
uart_nrfx_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)805 static int uart_nrfx_fifo_read(const struct device *dev,
806 uint8_t *rx_data,
807 const int size)
808 {
809 int num_rx = 0U;
810
811 while ((size - num_rx > 0) &&
812 nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) {
813 /* Clear the interrupt */
814 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
815
816 /* Receive a character */
817 rx_data[num_rx++] = (uint8_t)nrf_uart_rxd_get(uart0_addr);
818 }
819
820 return num_rx;
821 }
822
823 /** Interrupt driven transfer enabling function */
uart_nrfx_irq_tx_enable(const struct device * dev)824 static void uart_nrfx_irq_tx_enable(const struct device *dev)
825 {
826 uint32_t key;
827
828 disable_tx_irq = false;
829
830 /* Indicate that this device started a transaction that should not be
831 * interrupted by putting the SoC into the deep sleep mode.
832 */
833 pm_device_busy_set(dev);
834
835 /* Activate the transmitter. */
836 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX);
837
838 nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_TXDRDY);
839
840 /* Critical section is used to avoid any UART related interrupt which
841 * can occur after the if statement and before call of the function
842 * forcing an interrupt.
843 */
844 key = irq_lock();
845 if (uart_sw_event_txdrdy) {
846 /* Due to HW limitation first TXDRDY interrupt shall be
847 * triggered by the software.
848 */
849 NVIC_SetPendingIRQ(IRQN);
850 }
851 irq_unlock(key);
852 }
853
854 /** Interrupt driven transfer disabling function */
uart_nrfx_irq_tx_disable(const struct device * dev)855 static void uart_nrfx_irq_tx_disable(const struct device *dev)
856 {
857 /* Disable TX interrupt in uart_nrfx_isr() when transmission is done. */
858 disable_tx_irq = true;
859 }
860
861 /** Interrupt driven receiver enabling function */
uart_nrfx_irq_rx_enable(const struct device * dev)862 static void uart_nrfx_irq_rx_enable(const struct device *dev)
863 {
864 nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_RXDRDY);
865 }
866
867 /** Interrupt driven receiver disabling function */
uart_nrfx_irq_rx_disable(const struct device * dev)868 static void uart_nrfx_irq_rx_disable(const struct device *dev)
869 {
870 nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_RXDRDY);
871 }
872
873 /** Interrupt driven transfer empty function */
uart_nrfx_irq_tx_ready_complete(const struct device * dev)874 static int uart_nrfx_irq_tx_ready_complete(const struct device *dev)
875 {
876 /* Signal TX readiness only when the TX interrupt is enabled and there
877 * is no pending request to disable it. Note that this function may get
878 * called after the TX interrupt is requested to be disabled but before
879 * the disabling is actually performed (in the IRQ handler).
880 */
881 bool ready = nrf_uart_int_enable_check(uart0_addr,
882 NRF_UART_INT_MASK_TXDRDY) &&
883 !disable_tx_irq &&
884 event_txdrdy_check();
885 return ready ? 1 : 0;
886 }
887
888 /** Interrupt driven receiver ready function */
uart_nrfx_irq_rx_ready(const struct device * dev)889 static int uart_nrfx_irq_rx_ready(const struct device *dev)
890 {
891 return nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY);
892 }
893
894 /** Interrupt driven error enabling function */
uart_nrfx_irq_err_enable(const struct device * dev)895 static void uart_nrfx_irq_err_enable(const struct device *dev)
896 {
897 nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_ERROR);
898 }
899
900 /** Interrupt driven error disabling function */
uart_nrfx_irq_err_disable(const struct device * dev)901 static void uart_nrfx_irq_err_disable(const struct device *dev)
902 {
903 nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_ERROR);
904 }
905
906 /** Interrupt driven pending status function */
uart_nrfx_irq_is_pending(const struct device * dev)907 static int uart_nrfx_irq_is_pending(const struct device *dev)
908 {
909 return ((nrf_uart_int_enable_check(uart0_addr,
910 NRF_UART_INT_MASK_TXDRDY) &&
911 uart_nrfx_irq_tx_ready_complete(dev))
912 ||
913 (nrf_uart_int_enable_check(uart0_addr,
914 NRF_UART_INT_MASK_RXDRDY) &&
915 uart_nrfx_irq_rx_ready(dev)));
916 }
917
918 /** Interrupt driven interrupt update function */
uart_nrfx_irq_update(const struct device * dev)919 static int uart_nrfx_irq_update(const struct device *dev)
920 {
921 return 1;
922 }
923
924 /** Set the callback function */
uart_nrfx_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)925 static void uart_nrfx_irq_callback_set(const struct device *dev,
926 uart_irq_callback_user_data_t cb,
927 void *cb_data)
928 {
929 (void)dev;
930 irq_callback = cb;
931 irq_cb_data = cb_data;
932
933 #if defined(CONFIG_UART_0_ASYNC) && defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
934 uart0_cb.callback = NULL;
935 uart0_cb.user_data = NULL;
936 #endif
937 }
938
939 /**
940 * @brief Interrupt service routine.
941 *
942 * This simply calls the callback function, if one exists.
943 *
944 * @param arg Argument to ISR.
945 */
uart_nrfx_isr(const struct device * dev)946 static void uart_nrfx_isr(const struct device *dev)
947 {
948 if (disable_tx_irq &&
949 nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY)) {
950 nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_TXDRDY);
951
952 /* Deactivate the transmitter so that it does not needlessly
953 * consume power.
954 */
955 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
956
957 /* The transaction is over. It is okay to enter the deep sleep
958 * mode if needed.
959 */
960 pm_device_busy_clear(dev);
961
962 disable_tx_irq = false;
963
964 return;
965 }
966
967 if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_ERROR)) {
968 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
969 }
970
971 if (irq_callback) {
972 irq_callback(dev, irq_cb_data);
973 }
974 }
975 #endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */
976
977 /**
978 * @brief Initialize UART channel
979 *
980 * This routine is called to reset the chip in a quiescent state.
981 * It is assumed that this function is called only once per UART.
982 *
983 * @param dev UART device struct
984 *
985 * @return 0 on success
986 */
uart_nrfx_init(const struct device * dev)987 static int uart_nrfx_init(const struct device *dev)
988 {
989 const struct uart_nrfx_config *config = dev->config;
990 struct uart_nrfx_data *data = dev->data;
991 int err;
992
993 nrf_uart_disable(uart0_addr);
994
995 err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
996 if (err < 0) {
997 return err;
998 }
999
1000 /* Set initial configuration */
1001 err = uart_nrfx_configure(dev, &data->uart_config);
1002 if (err) {
1003 return err;
1004 }
1005
1006 /* Enable the UART and activate its receiver. With the current API
1007 * the receiver needs to be active all the time. The transmitter
1008 * will be activated when there is something to send.
1009 */
1010 nrf_uart_enable(uart0_addr);
1011
1012 if (!DISABLE_RX) {
1013 nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
1014
1015 nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTRX);
1016 }
1017
1018 #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
1019 /* Simulate that the TXDRDY event is set, so that the transmitter status
1020 * is indicated correctly.
1021 */
1022 uart_sw_event_txdrdy = 1U;
1023 #endif
1024
1025 #if defined(CONFIG_UART_0_ASYNC) || defined(CONFIG_UART_0_INTERRUPT_DRIVEN)
1026
1027 IRQ_CONNECT(IRQN,
1028 IRQ_PRIO,
1029 uart_nrfx_isr,
1030 DEVICE_DT_INST_GET(0),
1031 0);
1032 irq_enable(IRQN);
1033 #endif
1034
1035 #ifdef CONFIG_UART_0_ASYNC
1036 k_timer_init(&uart0_cb.rx_timeout_timer, rx_timeout, NULL);
1037 #if HW_FLOW_CONTROL_AVAILABLE
1038 k_timer_init(&uart0_cb.tx_timeout_timer, tx_timeout, NULL);
1039 #endif
1040 #endif
1041 return 0;
1042 }
1043
1044 /* Common function: uart_nrfx_irq_tx_ready_complete is used for two API entries
1045 * because Nordic hardware does not distinguish between them.
1046 */
1047 static DEVICE_API(uart, uart_nrfx_uart_driver_api) = {
1048 #ifdef CONFIG_UART_0_ASYNC
1049 .callback_set = uart_nrfx_callback_set,
1050 .tx = uart_nrfx_tx,
1051 .tx_abort = uart_nrfx_tx_abort,
1052 .rx_enable = uart_nrfx_rx_enable,
1053 .rx_buf_rsp = uart_nrfx_rx_buf_rsp,
1054 .rx_disable = uart_nrfx_rx_disable,
1055 #endif /* CONFIG_UART_0_ASYNC */
1056 .poll_in = uart_nrfx_poll_in,
1057 .poll_out = uart_nrfx_poll_out,
1058 .err_check = uart_nrfx_err_check,
1059 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1060 .configure = uart_nrfx_configure,
1061 .config_get = uart_nrfx_config_get,
1062 #endif
1063 #ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
1064 .fifo_fill = uart_nrfx_fifo_fill,
1065 .fifo_read = uart_nrfx_fifo_read,
1066 .irq_tx_enable = uart_nrfx_irq_tx_enable,
1067 .irq_tx_disable = uart_nrfx_irq_tx_disable,
1068 .irq_tx_ready = uart_nrfx_irq_tx_ready_complete,
1069 .irq_rx_enable = uart_nrfx_irq_rx_enable,
1070 .irq_rx_disable = uart_nrfx_irq_rx_disable,
1071 .irq_tx_complete = uart_nrfx_irq_tx_ready_complete,
1072 .irq_rx_ready = uart_nrfx_irq_rx_ready,
1073 .irq_err_enable = uart_nrfx_irq_err_enable,
1074 .irq_err_disable = uart_nrfx_irq_err_disable,
1075 .irq_is_pending = uart_nrfx_irq_is_pending,
1076 .irq_update = uart_nrfx_irq_update,
1077 .irq_callback_set = uart_nrfx_irq_callback_set,
1078 #endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */
1079 };
1080
1081 #ifdef CONFIG_PM_DEVICE
uart_nrfx_pm_action(const struct device * dev,enum pm_device_action action)1082 static int uart_nrfx_pm_action(const struct device *dev,
1083 enum pm_device_action action)
1084 {
1085 const struct uart_nrfx_config *config = dev->config;
1086 int ret;
1087
1088 switch (action) {
1089 case PM_DEVICE_ACTION_RESUME:
1090 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
1091 if (ret < 0) {
1092 return ret;
1093 }
1094
1095 nrf_uart_enable(uart0_addr);
1096 if (!DISABLE_RX) {
1097 nrf_uart_task_trigger(uart0_addr,
1098 NRF_UART_TASK_STARTRX);
1099 }
1100 break;
1101 case PM_DEVICE_ACTION_SUSPEND:
1102 nrf_uart_disable(uart0_addr);
1103 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
1104 if (ret < 0) {
1105 return ret;
1106 }
1107 break;
1108 default:
1109 return -ENOTSUP;
1110 }
1111
1112 return 0;
1113 }
1114 #endif /* CONFIG_PM_DEVICE */
1115
1116 PINCTRL_DT_INST_DEFINE(0);
1117
1118 NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(DT_DRV_INST(0));
1119
1120 static const struct uart_nrfx_config uart_nrfx_uart0_config = {
1121 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
1122 };
1123
1124 static struct uart_nrfx_data uart_nrfx_uart0_data = {
1125 .uart_config = {
1126 .stop_bits = UART_CFG_STOP_BITS_1,
1127 .data_bits = UART_CFG_DATA_BITS_8,
1128 .baudrate = BAUDRATE,
1129 #ifdef CONFIG_UART_0_NRF_PARITY_BIT
1130 .parity = UART_CFG_PARITY_EVEN,
1131 #else
1132 .parity = UART_CFG_PARITY_NONE,
1133 #endif /* CONFIG_UART_0_NRF_PARITY_BIT */
1134 .flow_ctrl = PROP(hw_flow_control) ?
1135 UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE,
1136 }
1137 };
1138
1139 PM_DEVICE_DT_INST_DEFINE(0, uart_nrfx_pm_action);
1140
1141 DEVICE_DT_INST_DEFINE(0,
1142 uart_nrfx_init,
1143 PM_DEVICE_DT_INST_GET(0),
1144 &uart_nrfx_uart0_data,
1145 &uart_nrfx_uart0_config,
1146 /* Initialize UART device before UART console. */
1147 PRE_KERNEL_1,
1148 CONFIG_SERIAL_INIT_PRIORITY,
1149 &uart_nrfx_uart_driver_api);
1150