1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @brief Driver for Nordic Semiconductor nRF UARTE
9  */
10 
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/pm/device.h>
13 #include <hal/nrf_uarte.h>
14 #include <nrfx_timer.h>
15 #include <zephyr/sys/util.h>
16 #include <zephyr/kernel.h>
17 #include <soc.h>
18 #include <helpers/nrfx_gppi.h>
19 #include <zephyr/linker/devicetree_regions.h>
20 #include <zephyr/irq.h>
21 
22 #include <zephyr/logging/log.h>
23 LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL);
24 
25 #include <zephyr/drivers/pinctrl.h>
26 
27 /* Generalize PPI or DPPI channel management */
28 #if defined(PPI_PRESENT)
29 #include <nrfx_ppi.h>
30 #define gppi_channel_t nrf_ppi_channel_t
31 #define gppi_channel_alloc nrfx_ppi_channel_alloc
32 #define gppi_channel_enable nrfx_ppi_channel_enable
33 #elif defined(DPPI_PRESENT)
34 #include <nrfx_dppi.h>
35 #define gppi_channel_t uint8_t
36 #define gppi_channel_alloc nrfx_dppi_channel_alloc
37 #define gppi_channel_enable nrfx_dppi_channel_enable
38 #else
39 #error "No PPI or DPPI"
40 #endif
41 
42 /* Execute macro f(x) for all instances. */
43 #define UARTE_FOR_EACH_INSTANCE(f, sep, off_code) \
44 	NRFX_FOREACH_PRESENT(UARTE, f, sep, off_code, _)
45 
46 /* Determine if any instance is using interrupt driven API. */
47 #define IS_INT_DRIVEN(unused, prefix, i, _) \
48 	(IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
49 	 IS_ENABLED(CONFIG_UART_##prefix##i##_INTERRUPT_DRIVEN))
50 
51 #if UARTE_FOR_EACH_INSTANCE(IS_INT_DRIVEN, (||), (0))
52 	#define UARTE_INTERRUPT_DRIVEN	1
53 #endif
54 
55 /* Determine if any instance is not using asynchronous API. */
56 #define IS_NOT_ASYNC(unused, prefix, i, _) \
57 	(IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
58 	 !IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC))
59 
60 #if UARTE_FOR_EACH_INSTANCE(IS_NOT_ASYNC, (||), (0))
61 #define UARTE_ANY_NONE_ASYNC 1
62 #endif
63 
64 /* Determine if any instance is using asynchronous API. */
65 #define IS_ASYNC(unused, prefix, i, _) \
66 	(IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
67 	 IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC))
68 
69 #if UARTE_FOR_EACH_INSTANCE(IS_ASYNC, (||), (0))
70 #define UARTE_ANY_ASYNC 1
71 #endif
72 
73 /* Determine if any instance is using asynchronous API with HW byte counting. */
74 #define IS_HW_ASYNC(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_HW_ASYNC)
75 
76 #if UARTE_FOR_EACH_INSTANCE(IS_HW_ASYNC, (||), (0))
77 #define UARTE_HW_ASYNC 1
78 #endif
79 
80 /* Determine if any instance is using enhanced poll_out feature. */
81 #define IS_ENHANCED_POLL_OUT(unused, prefix, i, _) \
82 	IS_ENABLED(CONFIG_UART_##prefix##i##_ENHANCED_POLL_OUT)
83 
84 #if UARTE_FOR_EACH_INSTANCE(IS_ENHANCED_POLL_OUT, (||), (0))
85 #define UARTE_ENHANCED_POLL_OUT 1
86 #endif
87 
88 /*
89  * RX timeout is divided into time slabs, this define tells how many divisions
90  * should be made. More divisions - higher timeout accuracy and processor usage.
91  */
92 #define RX_TIMEOUT_DIV 5
93 
94 /* Size of hardware fifo in RX path. */
95 #define UARTE_HW_RX_FIFO_SIZE 5
96 
97 #ifdef UARTE_ANY_ASYNC
98 struct uarte_async_cb {
99 	uart_callback_t user_callback;
100 	void *user_data;
101 
102 	const uint8_t *tx_buf;
103 	volatile size_t tx_size;
104 	const uint8_t *xfer_buf;
105 	size_t xfer_len;
106 
107 	uint8_t *tx_cache;
108 	size_t tx_cache_offset;
109 
110 	struct k_timer tx_timeout_timer;
111 
112 	uint8_t *rx_buf;
113 	size_t rx_buf_len;
114 	size_t rx_offset;
115 	uint8_t *rx_next_buf;
116 	size_t rx_next_buf_len;
117 	uint32_t rx_total_byte_cnt; /* Total number of bytes received */
118 	uint32_t rx_total_user_byte_cnt; /* Total number of bytes passed to user */
119 	int32_t rx_timeout; /* Timeout set by user */
120 	int32_t rx_timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */
121 	int32_t rx_timeout_left; /* Current time left until user callback */
122 	struct k_timer rx_timeout_timer;
123 	union {
124 		gppi_channel_t ppi;
125 		uint32_t cnt;
126 	} rx_cnt;
127 	volatile int tx_amount;
128 
129 	atomic_t low_power_mask;
130 	uint8_t rx_flush_buffer[UARTE_HW_RX_FIFO_SIZE];
131 	uint8_t rx_flush_cnt;
132 	volatile bool rx_enabled;
133 	volatile bool discard_rx_fifo;
134 	bool hw_rx_counting;
135 	bool pending_tx;
136 	/* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
137 	volatile bool is_in_irq;
138 };
139 #endif /* UARTE_ANY_ASYNC */
140 
141 #ifdef UARTE_INTERRUPT_DRIVEN
142 struct uarte_nrfx_int_driven {
143 	uart_irq_callback_user_data_t cb; /**< Callback function pointer */
144 	void *cb_data; /**< Callback function arg */
145 	uint8_t *tx_buffer;
146 	uint16_t tx_buff_size;
147 	volatile bool disable_tx_irq;
148 #ifdef CONFIG_PM_DEVICE
149 	bool rx_irq_enabled;
150 #endif
151 	atomic_t fifo_fill_lock;
152 };
153 #endif
154 
155 /* Device data structure */
156 struct uarte_nrfx_data {
157 	const struct device *dev;
158 	struct uart_config uart_config;
159 #ifdef UARTE_INTERRUPT_DRIVEN
160 	struct uarte_nrfx_int_driven *int_driven;
161 #endif
162 #ifdef UARTE_ANY_ASYNC
163 	struct uarte_async_cb *async;
164 #endif
165 	atomic_val_t poll_out_lock;
166 	uint8_t *char_out;
167 	uint8_t *rx_data;
168 	gppi_channel_t ppi_ch_endtx;
169 };
170 
171 #define UARTE_LOW_POWER_TX BIT(0)
172 #define UARTE_LOW_POWER_RX BIT(1)
173 
174 /* If enabled, pins are managed when going to low power mode. */
175 #define UARTE_CFG_FLAG_GPIO_MGMT   BIT(0)
176 
177 /* If enabled then ENDTX is PPI'ed to TXSTOP */
178 #define UARTE_CFG_FLAG_PPI_ENDTX   BIT(1)
179 
180 /* If enabled then UARTE peripheral is disabled when not used. This allows
181  * to achieve lowest power consumption in idle.
182  */
183 #define UARTE_CFG_FLAG_LOW_POWER   BIT(4)
184 
185 /**
186  * @brief Structure for UARTE configuration.
187  */
188 struct uarte_nrfx_config {
189 	NRF_UARTE_Type *uarte_regs; /* Instance address */
190 	uint32_t flags;
191 	bool disable_rx;
192 	const struct pinctrl_dev_config *pcfg;
193 #ifdef UARTE_ANY_ASYNC
194 	nrfx_timer_t timer;
195 #endif
196 };
197 
get_uarte_instance(const struct device * dev)198 static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev)
199 {
200 	const struct uarte_nrfx_config *config = dev->config;
201 
202 	return config->uarte_regs;
203 }
204 
endtx_isr(const struct device * dev)205 static void endtx_isr(const struct device *dev)
206 {
207 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
208 
209 	unsigned int key = irq_lock();
210 
211 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
212 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
213 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
214 	}
215 
216 	irq_unlock(key);
217 
218 }
219 
220 #ifdef UARTE_ANY_NONE_ASYNC
221 /**
222  * @brief Interrupt service routine.
223  *
224  * This simply calls the callback function, if one exists.
225  *
226  * @param arg Argument to ISR.
227  */
uarte_nrfx_isr_int(const void * arg)228 static void uarte_nrfx_isr_int(const void *arg)
229 {
230 	const struct device *dev = arg;
231 	const struct uarte_nrfx_config *config = dev->config;
232 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
233 
234 	/* If interrupt driven and asynchronous APIs are disabled then UART
235 	 * interrupt is still called to stop TX. Unless it is done using PPI.
236 	 */
237 	if (nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK) &&
238 		nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
239 		endtx_isr(dev);
240 	}
241 
242 	if (config->flags & UARTE_CFG_FLAG_LOW_POWER) {
243 		unsigned int key = irq_lock();
244 
245 		if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
246 			nrf_uarte_disable(uarte);
247 		}
248 
249 #ifdef UARTE_INTERRUPT_DRIVEN
250 		struct uarte_nrfx_data *data = dev->data;
251 
252 		if (!data->int_driven || data->int_driven->fifo_fill_lock == 0)
253 #endif
254 		{
255 			nrf_uarte_int_disable(uarte,
256 					      NRF_UARTE_INT_TXSTOPPED_MASK);
257 		}
258 
259 		irq_unlock(key);
260 	}
261 
262 #ifdef UARTE_INTERRUPT_DRIVEN
263 	struct uarte_nrfx_data *data = dev->data;
264 
265 	if (!data->int_driven) {
266 		return;
267 	}
268 
269 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
270 		data->int_driven->fifo_fill_lock = 0;
271 		if (data->int_driven->disable_tx_irq) {
272 			nrf_uarte_int_disable(uarte,
273 					      NRF_UARTE_INT_TXSTOPPED_MASK);
274 			data->int_driven->disable_tx_irq = false;
275 			return;
276 		}
277 
278 	}
279 
280 
281 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) {
282 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR);
283 	}
284 
285 	if (data->int_driven->cb) {
286 		data->int_driven->cb(dev, data->int_driven->cb_data);
287 	}
288 #endif /* UARTE_INTERRUPT_DRIVEN */
289 }
290 #endif /* UARTE_ANY_NONE_ASYNC */
291 
292 /**
293  * @brief Set the baud rate
294  *
295  * This routine set the given baud rate for the UARTE.
296  *
297  * @param dev UARTE device struct
298  * @param baudrate Baud rate
299  *
300  * @return 0 on success or error code
301  */
baudrate_set(const struct device * dev,uint32_t baudrate)302 static int baudrate_set(const struct device *dev, uint32_t baudrate)
303 {
304 	nrf_uarte_baudrate_t nrf_baudrate; /* calculated baudrate divisor */
305 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
306 
307 	switch (baudrate) {
308 	case 300:
309 		/* value not supported by Nordic HAL */
310 		nrf_baudrate = 0x00014000;
311 		break;
312 	case 600:
313 		/* value not supported by Nordic HAL */
314 		nrf_baudrate = 0x00027000;
315 		break;
316 	case 1200:
317 		nrf_baudrate = NRF_UARTE_BAUDRATE_1200;
318 		break;
319 	case 2400:
320 		nrf_baudrate = NRF_UARTE_BAUDRATE_2400;
321 		break;
322 	case 4800:
323 		nrf_baudrate = NRF_UARTE_BAUDRATE_4800;
324 		break;
325 	case 9600:
326 		nrf_baudrate = NRF_UARTE_BAUDRATE_9600;
327 		break;
328 	case 14400:
329 		nrf_baudrate = NRF_UARTE_BAUDRATE_14400;
330 		break;
331 	case 19200:
332 		nrf_baudrate = NRF_UARTE_BAUDRATE_19200;
333 		break;
334 	case 28800:
335 		nrf_baudrate = NRF_UARTE_BAUDRATE_28800;
336 		break;
337 #if defined(UARTE_BAUDRATE_BAUDRATE_Baud31250)
338 	case 31250:
339 		nrf_baudrate = NRF_UARTE_BAUDRATE_31250;
340 		break;
341 #endif
342 	case 38400:
343 		nrf_baudrate = NRF_UARTE_BAUDRATE_38400;
344 		break;
345 #if defined(UARTE_BAUDRATE_BAUDRATE_Baud56000)
346 	case 56000:
347 		nrf_baudrate = NRF_UARTE_BAUDRATE_56000;
348 		break;
349 #endif
350 	case 57600:
351 		nrf_baudrate = NRF_UARTE_BAUDRATE_57600;
352 		break;
353 	case 76800:
354 		nrf_baudrate = NRF_UARTE_BAUDRATE_76800;
355 		break;
356 	case 115200:
357 		nrf_baudrate = NRF_UARTE_BAUDRATE_115200;
358 		break;
359 	case 230400:
360 		nrf_baudrate = NRF_UARTE_BAUDRATE_230400;
361 		break;
362 	case 250000:
363 		nrf_baudrate = NRF_UARTE_BAUDRATE_250000;
364 		break;
365 	case 460800:
366 		nrf_baudrate = NRF_UARTE_BAUDRATE_460800;
367 		break;
368 	case 921600:
369 		nrf_baudrate = NRF_UARTE_BAUDRATE_921600;
370 		break;
371 	case 1000000:
372 		nrf_baudrate = NRF_UARTE_BAUDRATE_1000000;
373 		break;
374 	default:
375 		return -EINVAL;
376 	}
377 
378 	nrf_uarte_baudrate_set(uarte, nrf_baudrate);
379 
380 	return 0;
381 }
382 
uarte_nrfx_configure(const struct device * dev,const struct uart_config * cfg)383 static int uarte_nrfx_configure(const struct device *dev,
384 				const struct uart_config *cfg)
385 {
386 	struct uarte_nrfx_data *data = dev->data;
387 	nrf_uarte_config_t uarte_cfg;
388 
389 #if NRF_UARTE_HAS_FRAME_TIMEOUT
390 	uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_DIS;
391 #endif
392 
393 #if defined(UARTE_CONFIG_STOP_Msk)
394 	switch (cfg->stop_bits) {
395 	case UART_CFG_STOP_BITS_1:
396 		uarte_cfg.stop = NRF_UARTE_STOP_ONE;
397 		break;
398 	case UART_CFG_STOP_BITS_2:
399 		uarte_cfg.stop = NRF_UARTE_STOP_TWO;
400 		break;
401 	default:
402 		return -ENOTSUP;
403 	}
404 #else
405 	if (cfg->stop_bits != UART_CFG_STOP_BITS_1) {
406 		return -ENOTSUP;
407 	}
408 #endif
409 
410 	if (cfg->data_bits != UART_CFG_DATA_BITS_8) {
411 		return -ENOTSUP;
412 	}
413 
414 	switch (cfg->flow_ctrl) {
415 	case UART_CFG_FLOW_CTRL_NONE:
416 		uarte_cfg.hwfc = NRF_UARTE_HWFC_DISABLED;
417 		break;
418 	case UART_CFG_FLOW_CTRL_RTS_CTS:
419 		uarte_cfg.hwfc = NRF_UARTE_HWFC_ENABLED;
420 		break;
421 	default:
422 		return -ENOTSUP;
423 	}
424 
425 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
426 	uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_EVEN;
427 #endif
428 	switch (cfg->parity) {
429 	case UART_CFG_PARITY_NONE:
430 		uarte_cfg.parity = NRF_UARTE_PARITY_EXCLUDED;
431 		break;
432 	case UART_CFG_PARITY_EVEN:
433 		uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
434 		break;
435 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
436 	case UART_CFG_PARITY_ODD:
437 		uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
438 		uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_ODD;
439 		break;
440 #endif
441 	default:
442 		return -ENOTSUP;
443 	}
444 
445 	if (baudrate_set(dev, cfg->baudrate) != 0) {
446 		return -ENOTSUP;
447 	}
448 
449 	nrf_uarte_configure(get_uarte_instance(dev), &uarte_cfg);
450 
451 	data->uart_config = *cfg;
452 
453 	return 0;
454 }
455 
456 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
uarte_nrfx_config_get(const struct device * dev,struct uart_config * cfg)457 static int uarte_nrfx_config_get(const struct device *dev,
458 				 struct uart_config *cfg)
459 {
460 	struct uarte_nrfx_data *data = dev->data;
461 
462 	*cfg = data->uart_config;
463 	return 0;
464 }
465 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
466 
467 
uarte_nrfx_err_check(const struct device * dev)468 static int uarte_nrfx_err_check(const struct device *dev)
469 {
470 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
471 	/* register bitfields maps to the defines in uart.h */
472 	return nrf_uarte_errorsrc_get_and_clear(uarte);
473 }
474 
475 /* Function returns true if new transfer can be started. Since TXSTOPPED
476  * (and ENDTX) is cleared before triggering new transfer, TX is ready for new
477  * transfer if any event is set.
478  */
is_tx_ready(const struct device * dev)479 static bool is_tx_ready(const struct device *dev)
480 {
481 	const struct uarte_nrfx_config *config = dev->config;
482 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
483 	bool ppi_endtx = config->flags & UARTE_CFG_FLAG_PPI_ENDTX;
484 
485 	return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) ||
486 		(!ppi_endtx ?
487 		       nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX) : 0);
488 }
489 
490 /* Wait until the transmitter is in the idle state. When this function returns,
491  * IRQ's are locked with the returned key.
492  */
wait_tx_ready(const struct device * dev)493 static int wait_tx_ready(const struct device *dev)
494 {
495 	unsigned int key;
496 
497 	do {
498 		/* wait arbitrary time before back off. */
499 		bool res;
500 
501 #if defined(CONFIG_ARCH_POSIX)
502 		NRFX_WAIT_FOR(is_tx_ready(dev), 33, 3, res);
503 #else
504 		NRFX_WAIT_FOR(is_tx_ready(dev), 100, 1, res);
505 #endif
506 
507 		if (res) {
508 			key = irq_lock();
509 			if (is_tx_ready(dev)) {
510 				break;
511 			}
512 
513 			irq_unlock(key);
514 		}
515 		if (IS_ENABLED(CONFIG_MULTITHREADING)) {
516 			k_msleep(1);
517 		}
518 	} while (1);
519 
520 	return key;
521 }
522 
523 #if defined(UARTE_ANY_ASYNC) || defined(CONFIG_PM_DEVICE)
pins_state_change(const struct device * dev,bool on)524 static int pins_state_change(const struct device *dev, bool on)
525 {
526 	const struct uarte_nrfx_config *config = dev->config;
527 
528 	if (config->flags & UARTE_CFG_FLAG_GPIO_MGMT) {
529 		return pinctrl_apply_state(config->pcfg,
530 				on ? PINCTRL_STATE_DEFAULT : PINCTRL_STATE_SLEEP);
531 	}
532 
533 	return 0;
534 }
535 #endif
536 
537 #ifdef UARTE_ANY_ASYNC
538 
539 /* Using Macro instead of static inline function to handle NO_OPTIMIZATIONS case
540  * where static inline fails on linking.
541  */
542 #define HW_RX_COUNTING_ENABLED(data) \
543 	(IS_ENABLED(UARTE_HW_ASYNC) ? data->async->hw_rx_counting : false)
544 
545 #endif /* UARTE_ANY_ASYNC */
546 
uarte_enable(const struct device * dev,uint32_t mask)547 static int uarte_enable(const struct device *dev, uint32_t mask)
548 {
549 #ifdef UARTE_ANY_ASYNC
550 	const struct uarte_nrfx_config *config = dev->config;
551 	struct uarte_nrfx_data *data = dev->data;
552 
553 	if (data->async) {
554 		bool disabled = data->async->low_power_mask == 0;
555 		int ret;
556 
557 		data->async->low_power_mask |= mask;
558 		ret = pins_state_change(dev, true);
559 		if (ret < 0) {
560 			return ret;
561 		}
562 
563 		if (HW_RX_COUNTING_ENABLED(data) && disabled) {
564 			const nrfx_timer_t *timer = &config->timer;
565 
566 			nrfx_timer_enable(timer);
567 
568 			for (int i = 0; i < data->async->rx_flush_cnt; i++) {
569 				nrfx_timer_increment(timer);
570 			}
571 		}
572 	}
573 #endif
574 	nrf_uarte_enable(get_uarte_instance(dev));
575 
576 	return 0;
577 }
578 
579 /* At this point we should have irq locked and any previous transfer completed.
580  * Transfer can be started, no need to wait for completion.
581  */
tx_start(const struct device * dev,const uint8_t * buf,size_t len)582 static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
583 {
584 	const struct uarte_nrfx_config *config = dev->config;
585 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
586 
587 #ifdef CONFIG_PM_DEVICE
588 	enum pm_device_state state;
589 
590 	(void)pm_device_state_get(dev, &state);
591 	if (state != PM_DEVICE_STATE_ACTIVE) {
592 		return;
593 	}
594 #endif
595 	nrf_uarte_tx_buffer_set(uarte, buf, len);
596 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
597 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED);
598 
599 	if (config->flags & UARTE_CFG_FLAG_LOW_POWER) {
600 		(void)uarte_enable(dev, UARTE_LOW_POWER_TX);
601 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
602 	}
603 
604 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
605 }
606 
607 #if defined(UARTE_ANY_ASYNC) || defined(CONFIG_PM_DEVICE)
uart_disable(const struct device * dev)608 static void uart_disable(const struct device *dev)
609 {
610 #ifdef UARTE_ANY_ASYNC
611 	const struct uarte_nrfx_config *config = dev->config;
612 	struct uarte_nrfx_data *data = dev->data;
613 
614 	if (data->async && HW_RX_COUNTING_ENABLED(data)) {
615 		nrfx_timer_disable(&config->timer);
616 		/* Timer/counter value is reset when disabled. */
617 		data->async->rx_total_byte_cnt = 0;
618 		data->async->rx_total_user_byte_cnt = 0;
619 	}
620 #endif
621 
622 	nrf_uarte_disable(get_uarte_instance(dev));
623 }
624 #endif
625 
626 #ifdef UARTE_ANY_ASYNC
627 
timer_handler(nrf_timer_event_t event_type,void * p_context)628 static void timer_handler(nrf_timer_event_t event_type, void *p_context) { }
629 static void rx_timeout(struct k_timer *timer);
630 static void tx_timeout(struct k_timer *timer);
631 
uarte_nrfx_rx_counting_init(const struct device * dev)632 static int uarte_nrfx_rx_counting_init(const struct device *dev)
633 {
634 	struct uarte_nrfx_data *data = dev->data;
635 	const struct uarte_nrfx_config *cfg = dev->config;
636 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
637 	int ret;
638 
639 	if (HW_RX_COUNTING_ENABLED(data)) {
640 		nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG(
641 						NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg));
642 
643 		tmr_config.mode = NRF_TIMER_MODE_COUNTER;
644 		tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32;
645 		ret = nrfx_timer_init(&cfg->timer,
646 				      &tmr_config,
647 				      timer_handler);
648 		if (ret != NRFX_SUCCESS) {
649 			LOG_ERR("Timer already initialized, "
650 				"switching to software byte counting.");
651 			data->async->hw_rx_counting = false;
652 		} else {
653 			nrfx_timer_enable(&cfg->timer);
654 			nrfx_timer_clear(&cfg->timer);
655 		}
656 
657 		ret = gppi_channel_alloc(&data->async->rx_cnt.ppi);
658 		if (ret != NRFX_SUCCESS) {
659 			LOG_ERR("Failed to allocate PPI Channel, "
660 				"switching to software byte counting.");
661 			data->async->hw_rx_counting = false;
662 			nrfx_timer_uninit(&cfg->timer);
663 		}
664 
665 #if CONFIG_HAS_HW_NRF_PPI
666 		ret = nrfx_ppi_channel_assign(
667 			data->async->rx_cnt.ppi,
668 			nrf_uarte_event_address_get(uarte,
669 						    NRF_UARTE_EVENT_RXDRDY),
670 			nrfx_timer_task_address_get(&cfg->timer,
671 						    NRF_TIMER_TASK_COUNT));
672 
673 		if (ret != NRFX_SUCCESS) {
674 			return -EIO;
675 		}
676 #else
677 		nrf_uarte_publish_set(uarte,
678 				      NRF_UARTE_EVENT_RXDRDY,
679 				      data->async->rx_cnt.ppi);
680 		nrf_timer_subscribe_set(cfg->timer.p_reg,
681 					NRF_TIMER_TASK_COUNT,
682 					data->async->rx_cnt.ppi);
683 
684 #endif
685 		ret = gppi_channel_enable(data->async->rx_cnt.ppi);
686 		if (ret != NRFX_SUCCESS) {
687 			return -EIO;
688 		}
689 	} else {
690 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK);
691 	}
692 
693 	return 0;
694 }
695 
uarte_nrfx_init(const struct device * dev)696 static int uarte_nrfx_init(const struct device *dev)
697 {
698 	struct uarte_nrfx_data *data = dev->data;
699 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
700 
701 	int ret = uarte_nrfx_rx_counting_init(dev);
702 
703 	if (ret != 0) {
704 		return ret;
705 	}
706 
707 	data->async->low_power_mask = UARTE_LOW_POWER_TX;
708 	nrf_uarte_int_enable(uarte,
709 			     NRF_UARTE_INT_ENDRX_MASK |
710 			     NRF_UARTE_INT_RXSTARTED_MASK |
711 			     NRF_UARTE_INT_ERROR_MASK |
712 			     NRF_UARTE_INT_RXTO_MASK);
713 	nrf_uarte_enable(uarte);
714 
715 	/**
716 	 * Stop any currently running RX operations. This can occur when a
717 	 * bootloader sets up the UART hardware and does not clean it up
718 	 * before jumping to the next application.
719 	 */
720 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
721 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
722 		while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) &&
723 		       !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) {
724 			/* Busy wait for event to register */
725 		}
726 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
727 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
728 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
729 	}
730 
731 	k_timer_init(&data->async->rx_timeout_timer, rx_timeout, NULL);
732 	k_timer_user_data_set(&data->async->rx_timeout_timer, data);
733 	k_timer_init(&data->async->tx_timeout_timer, tx_timeout, NULL);
734 	k_timer_user_data_set(&data->async->tx_timeout_timer, data);
735 
736 	return 0;
737 }
738 
739 /* Attempt to start TX (asynchronous transfer). If hardware is not ready, then pending
740  * flag is set. When current poll_out is completed, pending transfer is started.
741  * Function must be called with interrupts locked.
742  */
start_tx_locked(const struct device * dev,struct uarte_nrfx_data * data)743 static void start_tx_locked(const struct device *dev, struct uarte_nrfx_data *data)
744 {
745 	if (!is_tx_ready(dev)) {
746 		/* Active poll out, postpone until it is completed. */
747 		data->async->pending_tx = true;
748 	} else {
749 		data->async->pending_tx = false;
750 		data->async->tx_amount = -1;
751 		tx_start(dev, data->async->xfer_buf, data->async->xfer_len);
752 	}
753 }
754 
755 /* Setup cache buffer (used for sending data outside of RAM memory).
756  * During setup data is copied to cache buffer and transfer length is set.
757  *
758  * @return True if cache was set, false if no more data to put in cache.
759  */
setup_tx_cache(struct uarte_nrfx_data * data)760 static bool setup_tx_cache(struct uarte_nrfx_data *data)
761 {
762 	size_t remaining = data->async->tx_size - data->async->tx_cache_offset;
763 
764 	if (!remaining) {
765 		return false;
766 	}
767 
768 	size_t len = MIN(remaining, CONFIG_UART_ASYNC_TX_CACHE_SIZE);
769 
770 	data->async->xfer_len = len;
771 	data->async->xfer_buf = data->async->tx_cache;
772 	memcpy(data->async->tx_cache, &data->async->tx_buf[data->async->tx_cache_offset], len);
773 
774 	return true;
775 }
776 
uarte_nrfx_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)777 static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf,
778 			 size_t len,
779 			 int32_t timeout)
780 {
781 	struct uarte_nrfx_data *data = dev->data;
782 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
783 
784 	unsigned int key = irq_lock();
785 
786 	if (data->async->tx_size) {
787 		irq_unlock(key);
788 		return -EBUSY;
789 	}
790 
791 	data->async->tx_size = len;
792 	data->async->tx_buf = buf;
793 	nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
794 
795 	if (nrfx_is_in_ram(buf)) {
796 		data->async->xfer_buf = buf;
797 		data->async->xfer_len = len;
798 	} else {
799 		data->async->tx_cache_offset = 0;
800 		(void)setup_tx_cache(data);
801 	}
802 
803 	start_tx_locked(dev, data);
804 
805 	irq_unlock(key);
806 
807 	if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS
808 	    && timeout != SYS_FOREVER_US) {
809 		k_timer_start(&data->async->tx_timeout_timer, K_USEC(timeout),
810 			      K_NO_WAIT);
811 	}
812 	return 0;
813 }
814 
uarte_nrfx_tx_abort(const struct device * dev)815 static int uarte_nrfx_tx_abort(const struct device *dev)
816 {
817 	struct uarte_nrfx_data *data = dev->data;
818 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
819 
820 	if (data->async->tx_buf == NULL) {
821 		return -EFAULT;
822 	}
823 
824 	data->async->pending_tx = false;
825 	k_timer_stop(&data->async->tx_timeout_timer);
826 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
827 
828 	return 0;
829 }
830 
user_callback(const struct device * dev,struct uart_event * evt)831 static void user_callback(const struct device *dev, struct uart_event *evt)
832 {
833 	struct uarte_nrfx_data *data = dev->data;
834 
835 	if (data->async->user_callback) {
836 		data->async->user_callback(dev, evt, data->async->user_data);
837 	}
838 }
839 
notify_uart_rx_rdy(const struct device * dev,size_t len)840 static void notify_uart_rx_rdy(const struct device *dev, size_t len)
841 {
842 	struct uarte_nrfx_data *data = dev->data;
843 	struct uart_event evt = {
844 		.type = UART_RX_RDY,
845 		.data.rx.buf = data->async->rx_buf,
846 		.data.rx.len = len,
847 		.data.rx.offset = data->async->rx_offset
848 	};
849 
850 	user_callback(dev, &evt);
851 }
852 
rx_buf_release(const struct device * dev,uint8_t ** buf)853 static void rx_buf_release(const struct device *dev, uint8_t **buf)
854 {
855 	if (*buf) {
856 		struct uart_event evt = {
857 			.type = UART_RX_BUF_RELEASED,
858 			.data.rx_buf.buf = *buf,
859 		};
860 
861 		user_callback(dev, &evt);
862 		*buf = NULL;
863 	}
864 }
865 
notify_rx_disable(const struct device * dev)866 static void notify_rx_disable(const struct device *dev)
867 {
868 	struct uart_event evt = {
869 		.type = UART_RX_DISABLED,
870 	};
871 
872 	user_callback(dev, (struct uart_event *)&evt);
873 }
874 
uarte_nrfx_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)875 static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
876 				size_t len,
877 				int32_t timeout)
878 {
879 	struct uarte_nrfx_data *data = dev->data;
880 	const struct uarte_nrfx_config *cfg = dev->config;
881 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
882 	int ret = 0;
883 
884 	if (cfg->disable_rx) {
885 		__ASSERT(false, "TX only UARTE instance");
886 		return -ENOTSUP;
887 	}
888 
889 	/* Signal error if RX is already enabled or if the driver is waiting
890 	 * for the RXTO event after a call to uart_rx_disable() to discard
891 	 * data from the UARTE internal RX FIFO.
892 	 */
893 	if (data->async->rx_enabled || data->async->discard_rx_fifo) {
894 		return -EBUSY;
895 	}
896 
897 	data->async->rx_timeout = timeout;
898 	data->async->rx_timeout_slab = timeout / RX_TIMEOUT_DIV;
899 
900 	data->async->rx_buf = buf;
901 	data->async->rx_buf_len = len;
902 	data->async->rx_offset = 0;
903 	data->async->rx_next_buf = NULL;
904 	data->async->rx_next_buf_len = 0;
905 
906 	if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) {
907 		if (data->async->rx_flush_cnt) {
908 			int cpy_len = MIN(len, data->async->rx_flush_cnt);
909 
910 			memcpy(buf, data->async->rx_flush_buffer, cpy_len);
911 			buf += cpy_len;
912 			len -= cpy_len;
913 
914 			/* If flush content filled whole new buffer complete the
915 			 * request and indicate rx being disabled.
916 			 */
917 			if (!len) {
918 				data->async->rx_flush_cnt -= cpy_len;
919 				notify_uart_rx_rdy(dev, cpy_len);
920 				rx_buf_release(dev, &data->async->rx_buf);
921 				notify_rx_disable(dev);
922 				return 0;
923 			}
924 		}
925 	}
926 
927 	nrf_uarte_rx_buffer_set(uarte, buf, len);
928 
929 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
930 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
931 
932 	data->async->rx_enabled = true;
933 	if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) {
934 		unsigned int key = irq_lock();
935 
936 		ret = uarte_enable(dev, UARTE_LOW_POWER_RX);
937 		irq_unlock(key);
938 	}
939 
940 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
941 
942 	return 0;
943 }
944 
uarte_nrfx_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)945 static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf,
946 				 size_t len)
947 {
948 	struct uarte_nrfx_data *data = dev->data;
949 	int err;
950 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
951 	unsigned int key = irq_lock();
952 
953 	if (data->async->rx_buf == NULL) {
954 		err = -EACCES;
955 	} else if (data->async->rx_next_buf == NULL) {
956 		data->async->rx_next_buf = buf;
957 		data->async->rx_next_buf_len = len;
958 		nrf_uarte_rx_buffer_set(uarte, buf, len);
959 		nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
960 		err = 0;
961 	} else {
962 		err = -EBUSY;
963 	}
964 
965 	irq_unlock(key);
966 
967 	return err;
968 }
969 
uarte_nrfx_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)970 static int uarte_nrfx_callback_set(const struct device *dev,
971 				   uart_callback_t callback,
972 				   void *user_data)
973 {
974 	struct uarte_nrfx_data *data = dev->data;
975 
976 	if (!data->async) {
977 		return -ENOTSUP;
978 	}
979 
980 	data->async->user_callback = callback;
981 	data->async->user_data = user_data;
982 
983 	return 0;
984 }
985 
uarte_nrfx_rx_disable(const struct device * dev)986 static int uarte_nrfx_rx_disable(const struct device *dev)
987 {
988 	struct uarte_nrfx_data *data = dev->data;
989 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
990 
991 	if (data->async->rx_buf == NULL) {
992 		return -EFAULT;
993 	}
994 	if (data->async->rx_next_buf != NULL) {
995 		nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
996 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
997 	}
998 
999 	k_timer_stop(&data->async->rx_timeout_timer);
1000 	data->async->rx_enabled = false;
1001 	data->async->discard_rx_fifo = true;
1002 
1003 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1004 
1005 	return 0;
1006 }
1007 
tx_timeout(struct k_timer * timer)1008 static void tx_timeout(struct k_timer *timer)
1009 {
1010 	struct uarte_nrfx_data *data = k_timer_user_data_get(timer);
1011 	(void) uarte_nrfx_tx_abort(data->dev);
1012 }
1013 
1014 /**
1015  * Whole timeout is divided by RX_TIMEOUT_DIV into smaller units, rx_timeout
1016  * is executed periodically every rx_timeout_slab us. If between executions
1017  * data was received, then we start counting down time from start, if not, then
1018  * we subtract rx_timeout_slab from rx_timeout_left.
1019  * If rx_timeout_left is less than rx_timeout_slab it means that receiving has
1020  * timed out and we should tell user about that.
1021  */
rx_timeout(struct k_timer * timer)1022 static void rx_timeout(struct k_timer *timer)
1023 {
1024 	struct uarte_nrfx_data *data = k_timer_user_data_get(timer);
1025 	const struct device *dev = data->dev;
1026 	const struct uarte_nrfx_config *cfg = dev->config;
1027 	uint32_t read;
1028 
1029 	if (data->async->is_in_irq) {
1030 		return;
1031 	}
1032 
1033 	/* Disable ENDRX ISR, in case ENDRX event is generated, it will be
1034 	 * handled after rx_timeout routine is complete.
1035 	 */
1036 	nrf_uarte_int_disable(get_uarte_instance(dev),
1037 			      NRF_UARTE_INT_ENDRX_MASK);
1038 
1039 	if (HW_RX_COUNTING_ENABLED(data)) {
1040 		read = nrfx_timer_capture(&cfg->timer, 0);
1041 	} else {
1042 		read = data->async->rx_cnt.cnt;
1043 	}
1044 
1045 	/* Check if data was received since last function call */
1046 	if (read != data->async->rx_total_byte_cnt) {
1047 		data->async->rx_total_byte_cnt = read;
1048 		data->async->rx_timeout_left = data->async->rx_timeout;
1049 	}
1050 
1051 	/* Check if there is data that was not sent to user yet
1052 	 * Note though that 'len' is a count of data bytes received, but not
1053 	 * necessarily the amount available in the current buffer
1054 	 */
1055 	int32_t len = data->async->rx_total_byte_cnt
1056 		    - data->async->rx_total_user_byte_cnt;
1057 
1058 	if (!HW_RX_COUNTING_ENABLED(data) &&
1059 	    (len < 0)) {
1060 		/* Prevent too low value of rx_cnt.cnt which may occur due to
1061 		 * latencies in handling of the RXRDY interrupt.
1062 		 * At this point, the number of received bytes is at least
1063 		 * equal to what was reported to the user.
1064 		 */
1065 		data->async->rx_cnt.cnt = data->async->rx_total_user_byte_cnt;
1066 		len = 0;
1067 	}
1068 
1069 	/* Check for current buffer being full.
1070 	 * if the UART receives characters before the ENDRX is handled
1071 	 * and the 'next' buffer is set up, then the SHORT between ENDRX and
1072 	 * STARTRX will mean that data will be going into to the 'next' buffer
1073 	 * until the ENDRX event gets a chance to be handled.
1074 	 */
1075 	bool clipped = false;
1076 
1077 	if (len + data->async->rx_offset > data->async->rx_buf_len) {
1078 		len = data->async->rx_buf_len - data->async->rx_offset;
1079 		clipped = true;
1080 	}
1081 
1082 	if (len > 0) {
1083 		if (clipped ||
1084 			(data->async->rx_timeout_left
1085 				< data->async->rx_timeout_slab)) {
1086 			/* rx_timeout us elapsed since last receiving */
1087 			if (data->async->rx_buf != NULL) {
1088 				notify_uart_rx_rdy(dev, len);
1089 				data->async->rx_offset += len;
1090 				data->async->rx_total_user_byte_cnt += len;
1091 			}
1092 		} else {
1093 			data->async->rx_timeout_left -=
1094 				data->async->rx_timeout_slab;
1095 		}
1096 
1097 		/* If there's nothing left to report until the buffers are
1098 		 * switched then the timer can be stopped
1099 		 */
1100 		if (clipped) {
1101 			k_timer_stop(&data->async->rx_timeout_timer);
1102 		}
1103 	}
1104 
1105 	nrf_uarte_int_enable(get_uarte_instance(dev),
1106 			     NRF_UARTE_INT_ENDRX_MASK);
1107 
1108 }
1109 
1110 #define UARTE_ERROR_FROM_MASK(mask)					\
1111 	((mask) & NRF_UARTE_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN	\
1112 	 : (mask) & NRF_UARTE_ERROR_PARITY_MASK ? UART_ERROR_PARITY	\
1113 	 : (mask) & NRF_UARTE_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING	\
1114 	 : (mask) & NRF_UARTE_ERROR_BREAK_MASK ? UART_BREAK		\
1115 	 : 0)
1116 
error_isr(const struct device * dev)1117 static void error_isr(const struct device *dev)
1118 {
1119 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1120 	uint32_t err = nrf_uarte_errorsrc_get_and_clear(uarte);
1121 	struct uart_event evt = {
1122 		.type = UART_RX_STOPPED,
1123 		.data.rx_stop.reason = UARTE_ERROR_FROM_MASK(err),
1124 	};
1125 	user_callback(dev, &evt);
1126 	(void) uarte_nrfx_rx_disable(dev);
1127 }
1128 
rxstarted_isr(const struct device * dev)1129 static void rxstarted_isr(const struct device *dev)
1130 {
1131 	struct uarte_nrfx_data *data = dev->data;
1132 	struct uart_event evt = {
1133 		.type = UART_RX_BUF_REQUEST,
1134 	};
1135 	user_callback(dev, &evt);
1136 	if (data->async->rx_timeout != SYS_FOREVER_US) {
1137 		data->async->rx_timeout_left = data->async->rx_timeout;
1138 		k_timer_start(&data->async->rx_timeout_timer,
1139 			      K_USEC(data->async->rx_timeout_slab),
1140 			      K_USEC(data->async->rx_timeout_slab));
1141 	}
1142 }
1143 
endrx_isr(const struct device * dev)1144 static void endrx_isr(const struct device *dev)
1145 {
1146 	struct uarte_nrfx_data *data = dev->data;
1147 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1148 
1149 	data->async->is_in_irq = true;
1150 
1151 	/* ensure rx timer is stopped - it will be restarted in RXSTARTED
1152 	 * handler if needed
1153 	 */
1154 	k_timer_stop(&data->async->rx_timeout_timer);
1155 
1156 	/* this is the amount that the EasyDMA controller has copied into the
1157 	 * buffer
1158 	 */
1159 	const int rx_amount = nrf_uarte_rx_amount_get(uarte) +
1160 				data->async->rx_flush_cnt;
1161 
1162 	data->async->rx_flush_cnt = 0;
1163 
1164 	/* The 'rx_offset' can be bigger than 'rx_amount', so it the length
1165 	 * of data we report back the user may need to be clipped.
1166 	 * This can happen because the 'rx_offset' count derives from RXRDY
1167 	 * events, which can occur already for the next buffer before we are
1168 	 * here to handle this buffer. (The next buffer is now already active
1169 	 * because of the ENDRX_STARTRX shortcut)
1170 	 */
1171 	int rx_len = rx_amount - data->async->rx_offset;
1172 
1173 	if (rx_len < 0) {
1174 		rx_len = 0;
1175 	}
1176 
1177 	data->async->rx_total_user_byte_cnt += rx_len;
1178 
1179 	/* Only send the RX_RDY event if there is something to send */
1180 	if (rx_len > 0) {
1181 		notify_uart_rx_rdy(dev, rx_len);
1182 	}
1183 
1184 	if (!data->async->rx_enabled) {
1185 		data->async->is_in_irq = false;
1186 		return;
1187 	}
1188 
1189 	rx_buf_release(dev, &data->async->rx_buf);
1190 
1191 	/* If there is a next buffer, then STARTRX will have already been
1192 	 * invoked by the short (the next buffer will be filling up already)
1193 	 * and here we just do the swap of which buffer the driver is following,
1194 	 * the next rx_timeout() will update the rx_offset.
1195 	 */
1196 	unsigned int key = irq_lock();
1197 
1198 	if (data->async->rx_next_buf) {
1199 		data->async->rx_buf = data->async->rx_next_buf;
1200 		data->async->rx_buf_len = data->async->rx_next_buf_len;
1201 		data->async->rx_next_buf = NULL;
1202 		data->async->rx_next_buf_len = 0;
1203 
1204 		data->async->rx_offset = 0;
1205 		/* Check is based on assumption that ISR handler handles
1206 		 * ENDRX before RXSTARTED so if short was set on time, RXSTARTED
1207 		 * event will be set.
1208 		 */
1209 		if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
1210 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1211 		}
1212 		/* Remove the short until the subsequent next buffer is setup */
1213 		nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1214 	} else {
1215 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1216 	}
1217 
1218 	irq_unlock(key);
1219 
1220 	data->async->is_in_irq = false;
1221 }
1222 
1223 /* Function for flushing internal RX fifo. Function can be called in case
1224  * flushed data is discarded or when data is valid and needs to be retrieved.
1225  *
1226  * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value
1227  * remains. In certain cases it makes it impossible to distinguish between
1228  * case when fifo was empty and not. Function is trying to minimize chances of
1229  * error with following measures:
1230  * - RXAMOUNT is read before flushing and compared against value after flushing
1231  *   if they differ it indicates that data was flushed
1232  * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if
1233  *   it is still dirty. If not then it indicates that data was flushed
1234  *
1235  * In other cases function indicates that fifo was empty. It means that if
1236  * number of bytes in the fifo equal last rx transfer length and data is equal
1237  * to dirty marker it will be discarded.
1238  *
1239  * @param dev Device.
1240  * @param buf Buffer for flushed data, null indicates that flushed data can be
1241  *	      dropped.
1242  * @param len Buffer size, not used if @p buf is null.
1243  *
1244  * @return number of bytes flushed from the fifo.
1245  */
rx_flush(const struct device * dev,uint8_t * buf,uint32_t len)1246 static uint8_t rx_flush(const struct device *dev, uint8_t *buf, uint32_t len)
1247 {
1248 	/* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
1249 	static const uint8_t dirty;
1250 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1251 	uint32_t prev_rx_amount = nrf_uarte_rx_amount_get(uarte);
1252 	uint8_t tmp_buf[UARTE_HW_RX_FIFO_SIZE];
1253 	uint8_t *flush_buf = buf ? buf : tmp_buf;
1254 	size_t flush_len = buf ? len : sizeof(tmp_buf);
1255 
1256 	if (buf) {
1257 		memset(buf, dirty, len);
1258 		flush_buf = buf;
1259 		flush_len = len;
1260 	} else {
1261 		flush_buf = tmp_buf;
1262 		flush_len = sizeof(tmp_buf);
1263 	}
1264 
1265 	nrf_uarte_rx_buffer_set(uarte, flush_buf, flush_len);
1266 	/* Final part of handling RXTO event is in ENDRX interrupt
1267 	 * handler. ENDRX is generated as a result of FLUSHRX task.
1268 	 */
1269 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1270 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_FLUSHRX);
1271 	while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1272 		/* empty */
1273 	}
1274 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1275 
1276 	if (!buf) {
1277 		return nrf_uarte_rx_amount_get(uarte);
1278 	}
1279 
1280 	uint32_t rx_amount = nrf_uarte_rx_amount_get(uarte);
1281 
1282 	if (rx_amount != prev_rx_amount) {
1283 		return rx_amount;
1284 	}
1285 
1286 	for (int i = 0; i < flush_len; i++) {
1287 		if (buf[i] != dirty) {
1288 			return rx_amount;
1289 		}
1290 	}
1291 
1292 	return 0;
1293 }
1294 
async_uart_release(const struct device * dev,uint32_t dir_mask)1295 static void async_uart_release(const struct device *dev, uint32_t dir_mask)
1296 {
1297 	struct uarte_nrfx_data *data = dev->data;
1298 	unsigned int key = irq_lock();
1299 
1300 	data->async->low_power_mask &= ~dir_mask;
1301 	if (!data->async->low_power_mask) {
1302 		if (dir_mask == UARTE_LOW_POWER_RX) {
1303 			data->async->rx_flush_cnt =
1304 				rx_flush(dev, data->async->rx_flush_buffer,
1305 					 sizeof(data->async->rx_flush_buffer));
1306 		}
1307 
1308 		uart_disable(dev);
1309 		int err = pins_state_change(dev, false);
1310 
1311 		(void)err;
1312 		__ASSERT_NO_MSG(err == 0);
1313 	}
1314 
1315 	irq_unlock(key);
1316 }
1317 
1318 /* This handler is called when the receiver is stopped. If rx was aborted
1319  * data from fifo is flushed.
1320  */
rxto_isr(const struct device * dev)1321 static void rxto_isr(const struct device *dev)
1322 {
1323 	const struct uarte_nrfx_config *config = dev->config;
1324 	struct uarte_nrfx_data *data = dev->data;
1325 
1326 	rx_buf_release(dev, &data->async->rx_buf);
1327 	rx_buf_release(dev, &data->async->rx_next_buf);
1328 
1329 	/* This point can be reached in two cases:
1330 	 * 1. RX is disabled because all provided RX buffers have been filled.
1331 	 * 2. RX was explicitly disabled by a call to uart_rx_disable().
1332 	 * In both cases, the rx_enabled flag is cleared, so that RX can be
1333 	 * enabled again.
1334 	 * In the second case, additionally, data from the UARTE internal RX
1335 	 * FIFO need to be discarded.
1336 	 */
1337 	data->async->rx_enabled = false;
1338 	if (data->async->discard_rx_fifo) {
1339 		data->async->discard_rx_fifo = false;
1340 		(void)rx_flush(dev, NULL, 0);
1341 	}
1342 
1343 	if (config->flags & UARTE_CFG_FLAG_LOW_POWER) {
1344 		async_uart_release(dev, UARTE_LOW_POWER_RX);
1345 	}
1346 
1347 	notify_rx_disable(dev);
1348 }
1349 
txstopped_isr(const struct device * dev)1350 static void txstopped_isr(const struct device *dev)
1351 {
1352 	const struct uarte_nrfx_config *config = dev->config;
1353 	struct uarte_nrfx_data *data = dev->data;
1354 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1355 	unsigned int key;
1356 
1357 	if (config->flags & UARTE_CFG_FLAG_LOW_POWER) {
1358 		nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1359 		async_uart_release(dev, UARTE_LOW_POWER_TX);
1360 
1361 		if (!data->async->tx_size) {
1362 			return;
1363 		}
1364 	}
1365 
1366 	if (!data->async->tx_buf) {
1367 		return;
1368 	}
1369 
1370 	key = irq_lock();
1371 	size_t amount = (data->async->tx_amount >= 0) ?
1372 			data->async->tx_amount : nrf_uarte_tx_amount_get(uarte);
1373 
1374 	irq_unlock(key);
1375 
1376 	/* If there is a pending tx request, it means that uart_tx()
1377 	 * was called when there was ongoing uart_poll_out. Handling
1378 	 * TXSTOPPED interrupt means that uart_poll_out has completed.
1379 	 */
1380 	if (data->async->pending_tx) {
1381 		key = irq_lock();
1382 		start_tx_locked(dev, data);
1383 		irq_unlock(key);
1384 		return;
1385 	}
1386 
1387 	/* Cache buffer is used because tx_buf wasn't in RAM. */
1388 	if (data->async->tx_buf != data->async->xfer_buf) {
1389 		/* In that case setup next chunk. If that was the last chunk
1390 		 * fall back to reporting TX_DONE.
1391 		 */
1392 		if (amount == data->async->xfer_len) {
1393 			data->async->tx_cache_offset += amount;
1394 			if (setup_tx_cache(data)) {
1395 				key = irq_lock();
1396 				start_tx_locked(dev, data);
1397 				irq_unlock(key);
1398 				return;
1399 			}
1400 
1401 			/* Amount is already included in tx_cache_offset. */
1402 			amount = data->async->tx_cache_offset;
1403 		} else {
1404 			/* TX was aborted, include tx_cache_offset in amount. */
1405 			amount += data->async->tx_cache_offset;
1406 		}
1407 	}
1408 
1409 	k_timer_stop(&data->async->tx_timeout_timer);
1410 
1411 	struct uart_event evt = {
1412 		.data.tx.buf = data->async->tx_buf,
1413 		.data.tx.len = amount,
1414 	};
1415 	if (amount == data->async->tx_size) {
1416 		evt.type = UART_TX_DONE;
1417 	} else {
1418 		evt.type = UART_TX_ABORTED;
1419 	}
1420 
1421 	nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1422 	data->async->tx_buf = NULL;
1423 	data->async->tx_size = 0;
1424 
1425 	user_callback(dev, &evt);
1426 }
1427 
uarte_nrfx_isr_async(const void * arg)1428 static void uarte_nrfx_isr_async(const void *arg)
1429 {
1430 	const struct device *dev = arg;
1431 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1432 	struct uarte_nrfx_data *data = dev->data;
1433 
1434 	if (!HW_RX_COUNTING_ENABLED(data)
1435 	    && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) {
1436 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY);
1437 		data->async->rx_cnt.cnt++;
1438 		return;
1439 	}
1440 
1441 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) {
1442 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR);
1443 		error_isr(dev);
1444 	}
1445 
1446 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)
1447 	    && nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDRX_MASK)) {
1448 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1449 		endrx_isr(dev);
1450 	}
1451 
1452 	/* RXSTARTED must be handled after ENDRX because it starts the RX timeout
1453 	 * and if order is swapped then ENDRX will stop this timeout.
1454 	 * Skip if ENDRX is set when RXSTARTED is set. It means that
1455 	 * ENDRX occurred after check for ENDRX in isr which may happen when
1456 	 * UARTE interrupt got preempted. Events are not cleared
1457 	 * and isr will be called again. ENDRX will be handled first.
1458 	 */
1459 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED) &&
1460 	    !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1461 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1462 		rxstarted_isr(dev);
1463 	}
1464 
1465 	/* RXTO must be handled after ENDRX which should notify the buffer.
1466 	 * Skip if ENDRX is set when RXTO is set. It means that
1467 	 * ENDRX occurred after check for ENDRX in isr which may happen when
1468 	 * UARTE interrupt got preempted. Events are not cleared
1469 	 * and isr will be called again. ENDRX will be handled first.
1470 	 */
1471 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) &&
1472 	    !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1473 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
1474 		rxto_isr(dev);
1475 	}
1476 
1477 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)
1478 	    && nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK)) {
1479 		endtx_isr(dev);
1480 	}
1481 
1482 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)
1483 	    && nrf_uarte_int_enable_check(uarte,
1484 					  NRF_UARTE_INT_TXSTOPPED_MASK)) {
1485 		txstopped_isr(dev);
1486 	}
1487 }
1488 
1489 #endif /* UARTE_ANY_ASYNC */
1490 
1491 /**
1492  * @brief Poll the device for input.
1493  *
1494  * @param dev UARTE device struct
1495  * @param c Pointer to character
1496  *
1497  * @return 0 if a character arrived, -1 if the input buffer is empty.
1498  */
uarte_nrfx_poll_in(const struct device * dev,unsigned char * c)1499 static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c)
1500 {
1501 
1502 	const struct uarte_nrfx_data *data = dev->data;
1503 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1504 
1505 #ifdef UARTE_ANY_ASYNC
1506 	if (data->async) {
1507 		return -ENOTSUP;
1508 	}
1509 #endif
1510 
1511 	if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1512 		return -1;
1513 	}
1514 
1515 	*c = *data->rx_data;
1516 
1517 	/* clear the interrupt */
1518 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1519 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1520 
1521 	return 0;
1522 }
1523 
1524 /**
1525  * @brief Output a character in polled mode.
1526  *
1527  * @param dev UARTE device struct
1528  * @param c Character to send
1529  */
uarte_nrfx_poll_out(const struct device * dev,unsigned char c)1530 static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c)
1531 {
1532 	struct uarte_nrfx_data *data = dev->data;
1533 	bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
1534 	unsigned int key;
1535 
1536 	if (isr_mode) {
1537 		while (1) {
1538 			key = irq_lock();
1539 			if (is_tx_ready(dev)) {
1540 #if UARTE_ANY_ASYNC
1541 				if (data->async && data->async->tx_size &&
1542 					data->async->tx_amount < 0) {
1543 					data->async->tx_amount =
1544 						nrf_uarte_tx_amount_get(
1545 						      get_uarte_instance(dev));
1546 				}
1547 #endif
1548 				break;
1549 			}
1550 
1551 			irq_unlock(key);
1552 			Z_SPIN_DELAY(3);
1553 		}
1554 	} else {
1555 		key = wait_tx_ready(dev);
1556 	}
1557 
1558 	*data->char_out = c;
1559 	tx_start(dev, data->char_out, 1);
1560 
1561 	irq_unlock(key);
1562 }
1563 
1564 
1565 #ifdef UARTE_INTERRUPT_DRIVEN
1566 /** Interrupt driven FIFO fill function */
uarte_nrfx_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)1567 static int uarte_nrfx_fifo_fill(const struct device *dev,
1568 				const uint8_t *tx_data,
1569 				int len)
1570 {
1571 	struct uarte_nrfx_data *data = dev->data;
1572 
1573 	len = MIN(len, data->int_driven->tx_buff_size);
1574 	if (!atomic_cas(&data->int_driven->fifo_fill_lock, 0, 1)) {
1575 		return 0;
1576 	}
1577 
1578 	/* Copy data to RAM buffer for EasyDMA transfer */
1579 	memcpy(data->int_driven->tx_buffer, tx_data, len);
1580 
1581 	unsigned int key = irq_lock();
1582 
1583 	if (!is_tx_ready(dev)) {
1584 		data->int_driven->fifo_fill_lock = 0;
1585 		len = 0;
1586 	} else {
1587 		tx_start(dev, data->int_driven->tx_buffer, len);
1588 	}
1589 
1590 	irq_unlock(key);
1591 
1592 	return len;
1593 }
1594 
1595 /** Interrupt driven FIFO read function */
uarte_nrfx_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)1596 static int uarte_nrfx_fifo_read(const struct device *dev,
1597 				uint8_t *rx_data,
1598 				const int size)
1599 {
1600 	int num_rx = 0;
1601 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1602 	const struct uarte_nrfx_data *data = dev->data;
1603 
1604 	if (size > 0 && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1605 		/* Clear the interrupt */
1606 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1607 
1608 		/* Receive a character */
1609 		rx_data[num_rx++] = *data->rx_data;
1610 
1611 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1612 	}
1613 
1614 	return num_rx;
1615 }
1616 
1617 /** Interrupt driven transfer enabling function */
uarte_nrfx_irq_tx_enable(const struct device * dev)1618 static void uarte_nrfx_irq_tx_enable(const struct device *dev)
1619 {
1620 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1621 	struct uarte_nrfx_data *data = dev->data;
1622 	unsigned int key = irq_lock();
1623 
1624 	data->int_driven->disable_tx_irq = false;
1625 	nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1626 
1627 	irq_unlock(key);
1628 }
1629 
1630 /** Interrupt driven transfer disabling function */
uarte_nrfx_irq_tx_disable(const struct device * dev)1631 static void uarte_nrfx_irq_tx_disable(const struct device *dev)
1632 {
1633 	struct uarte_nrfx_data *data = dev->data;
1634 	/* TX IRQ will be disabled after current transmission is finished */
1635 	data->int_driven->disable_tx_irq = true;
1636 }
1637 
1638 /** Interrupt driven transfer ready function */
uarte_nrfx_irq_tx_ready_complete(const struct device * dev)1639 static int uarte_nrfx_irq_tx_ready_complete(const struct device *dev)
1640 {
1641 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1642 	struct uarte_nrfx_data *data = dev->data;
1643 
1644 	/* ENDTX flag is always on so that ISR is called when we enable TX IRQ.
1645 	 * Because of that we have to explicitly check if ENDTX interrupt is
1646 	 * enabled, otherwise this function would always return true no matter
1647 	 * what would be the source of interrupt.
1648 	 */
1649 	bool ready = !data->int_driven->disable_tx_irq &&
1650 		     nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) &&
1651 		     nrf_uarte_int_enable_check(uarte,
1652 						NRF_UARTE_INT_TXSTOPPED_MASK);
1653 
1654 	if (ready) {
1655 		data->int_driven->fifo_fill_lock = 0;
1656 	}
1657 
1658 	return ready;
1659 }
1660 
uarte_nrfx_irq_rx_ready(const struct device * dev)1661 static int uarte_nrfx_irq_rx_ready(const struct device *dev)
1662 {
1663 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1664 
1665 	return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX);
1666 }
1667 
1668 /** Interrupt driven receiver enabling function */
uarte_nrfx_irq_rx_enable(const struct device * dev)1669 static void uarte_nrfx_irq_rx_enable(const struct device *dev)
1670 {
1671 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1672 
1673 	nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK);
1674 }
1675 
1676 /** Interrupt driven receiver disabling function */
uarte_nrfx_irq_rx_disable(const struct device * dev)1677 static void uarte_nrfx_irq_rx_disable(const struct device *dev)
1678 {
1679 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1680 
1681 	nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK);
1682 }
1683 
1684 /** Interrupt driven error enabling function */
uarte_nrfx_irq_err_enable(const struct device * dev)1685 static void uarte_nrfx_irq_err_enable(const struct device *dev)
1686 {
1687 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1688 
1689 	nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ERROR_MASK);
1690 }
1691 
1692 /** Interrupt driven error disabling function */
uarte_nrfx_irq_err_disable(const struct device * dev)1693 static void uarte_nrfx_irq_err_disable(const struct device *dev)
1694 {
1695 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1696 
1697 	nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ERROR_MASK);
1698 }
1699 
1700 /** Interrupt driven pending status function */
uarte_nrfx_irq_is_pending(const struct device * dev)1701 static int uarte_nrfx_irq_is_pending(const struct device *dev)
1702 {
1703 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1704 
1705 	return ((nrf_uarte_int_enable_check(uarte,
1706 					    NRF_UARTE_INT_TXSTOPPED_MASK) &&
1707 		 uarte_nrfx_irq_tx_ready_complete(dev))
1708 		||
1709 		(nrf_uarte_int_enable_check(uarte,
1710 					    NRF_UARTE_INT_ENDRX_MASK) &&
1711 		 uarte_nrfx_irq_rx_ready(dev)));
1712 }
1713 
1714 /** Interrupt driven interrupt update function */
uarte_nrfx_irq_update(const struct device * dev)1715 static int uarte_nrfx_irq_update(const struct device *dev)
1716 {
1717 	return 1;
1718 }
1719 
1720 /** Set the callback function */
uarte_nrfx_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)1721 static void uarte_nrfx_irq_callback_set(const struct device *dev,
1722 					uart_irq_callback_user_data_t cb,
1723 					void *cb_data)
1724 {
1725 	struct uarte_nrfx_data *data = dev->data;
1726 
1727 	data->int_driven->cb = cb;
1728 	data->int_driven->cb_data = cb_data;
1729 }
1730 #endif /* UARTE_INTERRUPT_DRIVEN */
1731 
1732 static const struct uart_driver_api uart_nrfx_uarte_driver_api = {
1733 	.poll_in		= uarte_nrfx_poll_in,
1734 	.poll_out		= uarte_nrfx_poll_out,
1735 	.err_check		= uarte_nrfx_err_check,
1736 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1737 	.configure              = uarte_nrfx_configure,
1738 	.config_get             = uarte_nrfx_config_get,
1739 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
1740 #ifdef UARTE_ANY_ASYNC
1741 	.callback_set		= uarte_nrfx_callback_set,
1742 	.tx			= uarte_nrfx_tx,
1743 	.tx_abort		= uarte_nrfx_tx_abort,
1744 	.rx_enable		= uarte_nrfx_rx_enable,
1745 	.rx_buf_rsp		= uarte_nrfx_rx_buf_rsp,
1746 	.rx_disable		= uarte_nrfx_rx_disable,
1747 #endif /* UARTE_ANY_ASYNC */
1748 #ifdef UARTE_INTERRUPT_DRIVEN
1749 	.fifo_fill		= uarte_nrfx_fifo_fill,
1750 	.fifo_read		= uarte_nrfx_fifo_read,
1751 	.irq_tx_enable		= uarte_nrfx_irq_tx_enable,
1752 	.irq_tx_disable		= uarte_nrfx_irq_tx_disable,
1753 	.irq_tx_ready		= uarte_nrfx_irq_tx_ready_complete,
1754 	.irq_rx_enable		= uarte_nrfx_irq_rx_enable,
1755 	.irq_rx_disable		= uarte_nrfx_irq_rx_disable,
1756 	.irq_tx_complete	= uarte_nrfx_irq_tx_ready_complete,
1757 	.irq_rx_ready		= uarte_nrfx_irq_rx_ready,
1758 	.irq_err_enable		= uarte_nrfx_irq_err_enable,
1759 	.irq_err_disable	= uarte_nrfx_irq_err_disable,
1760 	.irq_is_pending		= uarte_nrfx_irq_is_pending,
1761 	.irq_update		= uarte_nrfx_irq_update,
1762 	.irq_callback_set	= uarte_nrfx_irq_callback_set,
1763 #endif /* UARTE_INTERRUPT_DRIVEN */
1764 };
1765 
endtx_stoptx_ppi_init(NRF_UARTE_Type * uarte,struct uarte_nrfx_data * data)1766 static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte,
1767 				 struct uarte_nrfx_data *data)
1768 {
1769 	nrfx_err_t ret;
1770 
1771 	ret = gppi_channel_alloc(&data->ppi_ch_endtx);
1772 	if (ret != NRFX_SUCCESS) {
1773 		LOG_ERR("Failed to allocate PPI Channel");
1774 		return -EIO;
1775 	}
1776 
1777 	nrfx_gppi_channel_endpoints_setup(data->ppi_ch_endtx,
1778 		nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_ENDTX),
1779 		nrf_uarte_task_address_get(uarte, NRF_UARTE_TASK_STOPTX));
1780 	nrfx_gppi_channels_enable(BIT(data->ppi_ch_endtx));
1781 
1782 	return 0;
1783 }
1784 
uarte_instance_init(const struct device * dev,uint8_t interrupts_active)1785 static int uarte_instance_init(const struct device *dev,
1786 			       uint8_t interrupts_active)
1787 {
1788 	int err;
1789 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1790 	struct uarte_nrfx_data *data = dev->data;
1791 	const struct uarte_nrfx_config *cfg = dev->config;
1792 
1793 	nrf_uarte_disable(uarte);
1794 
1795 	data->dev = dev;
1796 
1797 #ifdef CONFIG_ARCH_POSIX
1798 	/* For simulation the DT provided peripheral address needs to be corrected */
1799 	((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)cfg->uarte_regs;
1800 #endif
1801 
1802 	err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
1803 	if (err < 0) {
1804 		return err;
1805 	}
1806 
1807 	err = uarte_nrfx_configure(dev, &data->uart_config);
1808 	if (err) {
1809 		return err;
1810 	}
1811 
1812 	if (IS_ENABLED(UARTE_ENHANCED_POLL_OUT) &&
1813 	    cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) {
1814 		err = endtx_stoptx_ppi_init(uarte, data);
1815 		if (err < 0) {
1816 			return err;
1817 		}
1818 	}
1819 
1820 
1821 #ifdef UARTE_ANY_ASYNC
1822 	if (data->async) {
1823 		err = uarte_nrfx_init(dev);
1824 		if (err < 0) {
1825 			return err;
1826 		}
1827 	} else
1828 #endif
1829 	{
1830 		/* Enable receiver and transmitter */
1831 		nrf_uarte_enable(uarte);
1832 
1833 		if (!cfg->disable_rx) {
1834 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1835 
1836 			nrf_uarte_rx_buffer_set(uarte, data->rx_data, 1);
1837 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1838 		}
1839 	}
1840 
1841 	if (!(cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX)) {
1842 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK);
1843 	}
1844 
1845 	if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) {
1846 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1847 	}
1848 
1849 	/* Set TXSTOPPED event by requesting fake (zero-length) transfer.
1850 	 * Pointer to RAM variable (data->tx_buffer) is set because otherwise
1851 	 * such operation may result in HardFault or RAM corruption.
1852 	 */
1853 	nrf_uarte_tx_buffer_set(uarte, data->char_out, 0);
1854 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
1855 
1856 	/* switch off transmitter to save an energy */
1857 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
1858 
1859 	return 0;
1860 }
1861 
1862 #ifdef CONFIG_PM_DEVICE
1863 /** @brief Pend until TX is stopped.
1864  *
1865  * There are 2 configurations that must be handled:
1866  * - ENDTX->TXSTOPPED PPI enabled - just pend until TXSTOPPED event is set
1867  * - disable ENDTX interrupt and manually trigger STOPTX, pend for TXSTOPPED
1868  */
wait_for_tx_stopped(const struct device * dev)1869 static void wait_for_tx_stopped(const struct device *dev)
1870 {
1871 	const struct uarte_nrfx_config *config = dev->config;
1872 	bool ppi_endtx = config->flags & UARTE_CFG_FLAG_PPI_ENDTX;
1873 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1874 	bool res;
1875 
1876 	if (!ppi_endtx) {
1877 		/* We assume here that it can be called from any context,
1878 		 * including the one that uarte interrupt will not preempt.
1879 		 * Disable endtx interrupt to ensure that it will not be triggered
1880 		 * (if in lower priority context) and stop TX if necessary.
1881 		 */
1882 		nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDTX_MASK);
1883 		NRFX_WAIT_FOR(is_tx_ready(dev), 1000, 1, res);
1884 		if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
1885 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
1886 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
1887 		}
1888 	}
1889 
1890 	NRFX_WAIT_FOR(nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED),
1891 		      1000, 1, res);
1892 
1893 	if (!ppi_endtx) {
1894 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK);
1895 	}
1896 }
1897 
1898 
uarte_nrfx_pm_action(const struct device * dev,enum pm_device_action action)1899 static int uarte_nrfx_pm_action(const struct device *dev,
1900 				enum pm_device_action action)
1901 {
1902 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1903 #if defined(UARTE_ANY_ASYNC) || defined(UARTE_INTERRUPT_DRIVEN)
1904 	struct uarte_nrfx_data *data = dev->data;
1905 #endif
1906 	const struct uarte_nrfx_config *cfg = dev->config;
1907 	int ret;
1908 
1909 #ifdef UARTE_ANY_ASYNC
1910 	/* If low power mode for asynchronous mode is used then there is nothing to do here.
1911 	 * In low power mode UARTE is turned off whenever there is no activity.
1912 	 */
1913 	if (data->async && (cfg->flags & UARTE_CFG_FLAG_LOW_POWER)) {
1914 		return 0;
1915 	}
1916 #endif
1917 
1918 	switch (action) {
1919 	case PM_DEVICE_ACTION_RESUME:
1920 
1921 		ret = pins_state_change(dev, true);
1922 		if (ret < 0) {
1923 			return ret;
1924 		}
1925 
1926 		nrf_uarte_enable(uarte);
1927 
1928 #ifdef UARTE_ANY_ASYNC
1929 		if (data->async) {
1930 			if (HW_RX_COUNTING_ENABLED(data)) {
1931 				nrfx_timer_enable(&cfg->timer);
1932 			}
1933 
1934 			return 0;
1935 		}
1936 #endif
1937 		if (!cfg->disable_rx) {
1938 
1939 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1940 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1941 #ifdef UARTE_INTERRUPT_DRIVEN
1942 			if (data->int_driven &&
1943 			    data->int_driven->rx_irq_enabled) {
1944 				nrf_uarte_int_enable(uarte,
1945 						     NRF_UARTE_INT_ENDRX_MASK);
1946 			}
1947 #endif
1948 		}
1949 		break;
1950 	case PM_DEVICE_ACTION_SUSPEND:
1951 		/* Disabling UART requires stopping RX, but stop RX event is
1952 		 * only sent after each RX if async UART API is used.
1953 		 */
1954 #ifdef UARTE_ANY_ASYNC
1955 		if (data->async) {
1956 			/* Entering inactive state requires device to be no
1957 			 * active asynchronous calls.
1958 			 */
1959 			__ASSERT_NO_MSG(!data->async->rx_enabled);
1960 			__ASSERT_NO_MSG(!data->async->tx_size);
1961 
1962 		}
1963 #endif
1964 		if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
1965 #ifdef UARTE_INTERRUPT_DRIVEN
1966 			if (data->int_driven) {
1967 				data->int_driven->rx_irq_enabled =
1968 					nrf_uarte_int_enable_check(uarte,
1969 						NRF_UARTE_INT_ENDRX_MASK);
1970 				if (data->int_driven->rx_irq_enabled) {
1971 					nrf_uarte_int_disable(uarte,
1972 						NRF_UARTE_INT_ENDRX_MASK);
1973 				}
1974 			}
1975 #endif
1976 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1977 			while (!nrf_uarte_event_check(uarte,
1978 						      NRF_UARTE_EVENT_RXTO) &&
1979 			       !nrf_uarte_event_check(uarte,
1980 						      NRF_UARTE_EVENT_ERROR)) {
1981 				/* Busy wait for event to register */
1982 				Z_SPIN_DELAY(2);
1983 			}
1984 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1985 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
1986 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1987 		}
1988 
1989 		wait_for_tx_stopped(dev);
1990 		uart_disable(dev);
1991 
1992 		ret = pins_state_change(dev, false);
1993 		if (ret < 0) {
1994 			return ret;
1995 		}
1996 
1997 		break;
1998 	default:
1999 		return -ENOTSUP;
2000 	}
2001 
2002 	return 0;
2003 }
2004 #endif /* CONFIG_PM_DEVICE */
2005 
2006 #define UARTE(idx)			DT_NODELABEL(uart##idx)
2007 #define UARTE_HAS_PROP(idx, prop)	DT_NODE_HAS_PROP(UARTE(idx), prop)
2008 #define UARTE_PROP(idx, prop)		DT_PROP(UARTE(idx), prop)
2009 
2010 #define UARTE_IRQ_CONFIGURE(idx, isr_handler)				       \
2011 	do {								       \
2012 		IRQ_CONNECT(DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority), \
2013 			    isr_handler, DEVICE_DT_GET(UARTE(idx)), 0);	       \
2014 		irq_enable(DT_IRQN(UARTE(idx)));			       \
2015 	} while (false)
2016 
2017 /* Low power mode is used when disable_rx is not defined or in async mode if
2018  * kconfig option is enabled.
2019  */
2020 #define USE_LOW_POWER(idx) \
2021 	((!UARTE_PROP(idx, disable_rx) &&				       \
2022 	COND_CODE_1(CONFIG_UART_##idx##_ASYNC,				       \
2023 		(!IS_ENABLED(CONFIG_UART_##idx##_NRF_ASYNC_LOW_POWER)),	       \
2024 		(1))) ? 0 : UARTE_CFG_FLAG_LOW_POWER)
2025 
2026 #define UARTE_DISABLE_RX_INIT(node_id) \
2027 	.disable_rx = DT_PROP(node_id, disable_rx)
2028 
2029 #define UART_NRF_UARTE_DEVICE(idx)					       \
2030 	NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(UARTE(idx));		       \
2031 	UARTE_INT_DRIVEN(idx);						       \
2032 	UARTE_ASYNC(idx);						       \
2033 	PINCTRL_DT_DEFINE(UARTE(idx));					       \
2034 	static uint8_t uarte##idx##_char_out UARTE_MEMORY_SECTION(idx);	       \
2035 	static uint8_t uarte##idx##_rx_data UARTE_MEMORY_SECTION(idx);	       \
2036 	static struct uarte_nrfx_data uarte_##idx##_data = {		       \
2037 		UARTE_CONFIG(idx),					       \
2038 		IF_ENABLED(CONFIG_UART_##idx##_ASYNC,			       \
2039 			    (.async = &uarte##idx##_async,))		       \
2040 		IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN,	       \
2041 			    (.int_driven = &uarte##idx##_int_driven,))	       \
2042 	};								       \
2043 	static const struct uarte_nrfx_config uarte_##idx##z_config = {	       \
2044 		.pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)),		       \
2045 		.uarte_regs = _CONCAT(NRF_UARTE, idx),                         \
2046 		.flags =						       \
2047 			(IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT) ?     \
2048 				UARTE_CFG_FLAG_GPIO_MGMT : 0) |		       \
2049 			(IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT) ?   \
2050 				UARTE_CFG_FLAG_PPI_ENDTX : 0) |		       \
2051 			USE_LOW_POWER(idx),				       \
2052 		UARTE_DISABLE_RX_INIT(UARTE(idx)),			       \
2053 		IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC,		       \
2054 			(.timer = NRFX_TIMER_INSTANCE(			       \
2055 				CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),))     \
2056 	};								       \
2057 	static int uarte_##idx##_init(const struct device *dev)		       \
2058 	{								       \
2059 		COND_CODE_1(CONFIG_UART_##idx##_ASYNC,			       \
2060 			   (UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_async);),  \
2061 			   (UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_int);))    \
2062 		return uarte_instance_init(				       \
2063 			dev,						       \
2064 			IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN));     \
2065 	}								       \
2066 									       \
2067 	PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action);		       \
2068 									       \
2069 	DEVICE_DT_DEFINE(UARTE(idx),					       \
2070 		      uarte_##idx##_init,				       \
2071 		      PM_DEVICE_DT_GET(UARTE(idx)),			       \
2072 		      &uarte_##idx##_data,				       \
2073 		      &uarte_##idx##z_config,				       \
2074 		      PRE_KERNEL_1,					       \
2075 		      CONFIG_SERIAL_INIT_PRIORITY,			       \
2076 		      &uart_nrfx_uarte_driver_api)
2077 
2078 #define UARTE_CONFIG(idx)						       \
2079 	.char_out = &uarte##idx##_char_out,				       \
2080 	.rx_data = &uarte##idx##_rx_data,				       \
2081 	.uart_config = {						       \
2082 		.baudrate = UARTE_PROP(idx, current_speed),		       \
2083 		.data_bits = UART_CFG_DATA_BITS_8,			       \
2084 		.stop_bits = UART_CFG_STOP_BITS_1,			       \
2085 		.parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT)       \
2086 			  ? UART_CFG_PARITY_EVEN			       \
2087 			  : UART_CFG_PARITY_NONE,			       \
2088 		.flow_ctrl = UARTE_PROP(idx, hw_flow_control)		       \
2089 			     ? UART_CFG_FLOW_CTRL_RTS_CTS		       \
2090 			     : UART_CFG_FLOW_CTRL_NONE,			       \
2091 	}
2092 
2093 #define UARTE_ASYNC(idx)						       \
2094 	IF_ENABLED(CONFIG_UART_##idx##_ASYNC, (				       \
2095 		static uint8_t						       \
2096 			uarte##idx##_tx_cache[CONFIG_UART_ASYNC_TX_CACHE_SIZE] \
2097 			UARTE_MEMORY_SECTION(idx);			       \
2098 		struct uarte_async_cb uarte##idx##_async = {		       \
2099 			.tx_cache = uarte##idx##_tx_cache,		       \
2100 			.hw_rx_counting =				       \
2101 				IS_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC),  \
2102 		}))
2103 
2104 #define UARTE_INT_DRIVEN(idx)						       \
2105 	IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN,		       \
2106 		(static uint8_t uarte##idx##_tx_buffer			       \
2107 			[MIN(CONFIG_UART_##idx##_NRF_TX_BUFFER_SIZE,	       \
2108 			     BIT_MASK(UARTE##idx##_EASYDMA_MAXCNT_SIZE))]      \
2109 			UARTE_MEMORY_SECTION(idx);			       \
2110 		 static struct uarte_nrfx_int_driven			       \
2111 			uarte##idx##_int_driven = {			       \
2112 				.tx_buffer = uarte##idx##_tx_buffer,	       \
2113 				.tx_buff_size = sizeof(uarte##idx##_tx_buffer),\
2114 			};))
2115 
2116 #define UARTE_MEMORY_SECTION(idx)					       \
2117 	COND_CODE_1(UARTE_HAS_PROP(idx, memory_regions),		       \
2118 		(__attribute__((__section__(LINKER_DT_NODE_REGION_NAME(	       \
2119 			DT_PHANDLE(UARTE(idx), memory_regions)))))),	       \
2120 		())
2121 
2122 #define COND_UART_NRF_UARTE_DEVICE(unused, prefix, i, _) \
2123 	IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i, (UART_NRF_UARTE_DEVICE(prefix##i);))
2124 
2125 UARTE_FOR_EACH_INSTANCE(COND_UART_NRF_UARTE_DEVICE, (), ())
2126