1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @brief Driver for Nordic Semiconductor nRF UARTE
9  */
10 
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/drivers/pinctrl.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/pm/device_runtime.h>
15 #include <hal/nrf_uarte.h>
16 #include <nrfx_timer.h>
17 #include <zephyr/sys/util.h>
18 #include <zephyr/kernel.h>
19 #include <zephyr/cache.h>
20 #include <soc.h>
21 #include <dmm.h>
22 #include <helpers/nrfx_gppi.h>
23 #include <zephyr/linker/devicetree_regions.h>
24 #include <zephyr/irq.h>
25 #include <zephyr/logging/log.h>
26 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
27 
28 LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL);
29 
30 #define RX_FLUSH_WORKAROUND 1
31 
32 #define UARTE(idx)                DT_NODELABEL(uart##idx)
33 #define UARTE_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(UARTE(idx), prop)
34 #define UARTE_PROP(idx, prop)     DT_PROP(UARTE(idx), prop)
35 
36 #define UARTE_IS_CACHEABLE(idx) DMM_IS_REG_CACHEABLE(DT_PHANDLE(UARTE(idx), memory_regions))
37 
38 /* Execute macro f(x) for all instances. */
39 #define UARTE_FOR_EACH_INSTANCE(f, sep, off_code, ...)                                             \
40 	NRFX_FOREACH_PRESENT(UARTE, f, sep, off_code, __VA_ARGS__)
41 
42 /* Determine if any instance is using interrupt driven API. */
43 #define IS_INT_DRIVEN(unused, prefix, i, _) \
44 	(IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
45 	 IS_ENABLED(CONFIG_UART_##prefix##i##_INTERRUPT_DRIVEN))
46 
47 #if UARTE_FOR_EACH_INSTANCE(IS_INT_DRIVEN, (||), (0))
48 	#define UARTE_INTERRUPT_DRIVEN	1
49 #endif
50 
51 /* Determine if any instance is not using asynchronous API. */
52 #define IS_NOT_ASYNC(unused, prefix, i, _) \
53 	(IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
54 	 !IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC))
55 
56 #if UARTE_FOR_EACH_INSTANCE(IS_NOT_ASYNC, (||), (0))
57 #define UARTE_ANY_NONE_ASYNC 1
58 #endif
59 
60 /* Determine if any instance is using asynchronous API. */
61 #define IS_ASYNC(unused, prefix, i, _) \
62 	(IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
63 	 IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC))
64 
65 #if UARTE_FOR_EACH_INSTANCE(IS_ASYNC, (||), (0))
66 #define UARTE_ANY_ASYNC 1
67 #endif
68 
69 /* Determine if any instance is using asynchronous API with HW byte counting. */
70 #define IS_HW_ASYNC(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_HW_ASYNC)
71 
72 #if UARTE_FOR_EACH_INSTANCE(IS_HW_ASYNC, (||), (0))
73 #define UARTE_ANY_HW_ASYNC 1
74 #endif
75 
76 /* Determine if any instance is using enhanced poll_out feature. */
77 #define IS_ENHANCED_POLL_OUT(unused, prefix, i, _) \
78 	IS_ENABLED(CONFIG_UART_##prefix##i##_ENHANCED_POLL_OUT)
79 
80 #if UARTE_FOR_EACH_INSTANCE(IS_ENHANCED_POLL_OUT, (||), (0))
81 #define UARTE_ENHANCED_POLL_OUT 1
82 #endif
83 
84 #define INSTANCE_PROP(unused, prefix, i, prop) UARTE_PROP(prefix##i, prop)
85 #define INSTANCE_PRESENT(unused, prefix, i, prop) 1
86 
87 /* Driver supports case when all or none instances support that HW feature. */
88 #if	(UARTE_FOR_EACH_INSTANCE(INSTANCE_PROP, (+), (0), endtx_stoptx_supported)) == \
89 	(UARTE_FOR_EACH_INSTANCE(INSTANCE_PRESENT, (+), (0), endtx_stoptx_supported))
90 #define UARTE_HAS_ENDTX_STOPTX_SHORT 1
91 #endif
92 
93 #if	(UARTE_FOR_EACH_INSTANCE(INSTANCE_PROP, (+), (0), frame_timeout_supported)) == \
94 	(UARTE_FOR_EACH_INSTANCE(INSTANCE_PRESENT, (+), (0), frame_timeout_supported))
95 #define UARTE_HAS_FRAME_TIMEOUT 1
96 #endif
97 
98 #define INSTANCE_NEEDS_CACHE_MGMT(unused, prefix, i, prop) UARTE_IS_CACHEABLE(prefix##i)
99 
100 #if UARTE_FOR_EACH_INSTANCE(INSTANCE_NEEDS_CACHE_MGMT, (+), (0), _)
101 #define UARTE_ANY_CACHE 1
102 #endif
103 
104 #define IS_LOW_POWER(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_ASYNC_LOW_POWER)
105 
106 #if UARTE_FOR_EACH_INSTANCE(IS_LOW_POWER, (||), (0))
107 #define UARTE_ANY_LOW_POWER 1
108 #endif
109 
110 #ifdef CONFIG_SOC_NRF54H20_GPD
111 #include <nrf/gpd.h>
112 
113 /* Macro must resolve to literal 0 or 1 */
114 #define INSTANCE_IS_FAST_PD(unused, prefix, idx, _)						\
115 	COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(UARTE(idx)),					\
116 		    (COND_CODE_1(DT_NODE_HAS_PROP(UARTE(idx), power_domains),			\
117 			(IS_EQ(DT_PHA(UARTE(idx), power_domains, id), NRF_GPD_FAST_ACTIVE1)),	\
118 			(0))), (0))
119 
120 #if UARTE_FOR_EACH_INSTANCE(INSTANCE_IS_FAST_PD, (||), (0))
121 /* Instance in fast power domain (PD) requires special PM treatment so device runtime PM must
122  * be enabled.
123  */
124 BUILD_ASSERT(IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME));
125 #define UARTE_ANY_FAST_PD 1
126 #endif
127 #endif
128 
129 #define INSTANCE_IS_HIGH_SPEED(unused, prefix, idx, _) \
130 	COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(UARTE(prefix##idx)),				\
131 	    ((NRF_PERIPH_GET_FREQUENCY(UARTE(prefix##idx)) > NRF_UARTE_BASE_FREQUENCY_16MHZ)),	\
132 	    (0))
133 
134 /* Macro determines if there is any high speed instance (instance that is driven using
135  * clock that is faster than 16 MHz).
136  */
137 #define UARTE_ANY_HIGH_SPEED (UARTE_FOR_EACH_INSTANCE(INSTANCE_IS_HIGH_SPEED, (||), (0)))
138 
139 #ifdef UARTE_ANY_CACHE
140 /* uart120 instance does not retain BAUDRATE register when ENABLE=0. When this instance
141  * is used then baudrate must be set after enabling the peripheral and not before.
142  * This approach works for all instances so can be generally applied when uart120 is used.
143  * It is not default for all because it costs some resources. Since currently only uart120
144  * needs cache, that is used to determine if workaround shall be applied.
145  */
146 #define UARTE_BAUDRATE_RETENTION_WORKAROUND 1
147 #endif
148 
149 /*
150  * RX timeout is divided into time slabs, this define tells how many divisions
151  * should be made. More divisions - higher timeout accuracy and processor usage.
152  */
153 #define RX_TIMEOUT_DIV 5
154 
155 /* Size of hardware fifo in RX path. */
156 #define UARTE_HW_RX_FIFO_SIZE 5
157 
158 #ifdef UARTE_ANY_ASYNC
159 
160 struct uarte_async_tx {
161 	struct k_timer timer;
162 	const uint8_t *buf;
163 	volatile size_t len;
164 	const uint8_t *xfer_buf;
165 	size_t xfer_len;
166 	size_t cache_offset;
167 	volatile int amount;
168 	bool pending;
169 };
170 
171 struct uarte_async_rx {
172 	struct k_timer timer;
173 #ifdef CONFIG_HAS_NORDIC_DMM
174 	uint8_t *usr_buf;
175 	uint8_t *next_usr_buf;
176 #endif
177 	uint8_t *buf;
178 	size_t buf_len;
179 	size_t offset;
180 	uint8_t *next_buf;
181 	size_t next_buf_len;
182 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
183 #if !defined(UARTE_HAS_FRAME_TIMEOUT)
184 	uint32_t idle_cnt;
185 #endif
186 	k_timeout_t timeout;
187 #else
188 	uint32_t total_byte_cnt; /* Total number of bytes received */
189 	uint32_t total_user_byte_cnt; /* Total number of bytes passed to user */
190 	int32_t timeout_us; /* Timeout set by user */
191 	int32_t timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */
192 	int32_t timeout_left; /* Current time left until user callback */
193 	union {
194 		uint8_t ppi;
195 		uint32_t cnt;
196 	} cnt;
197 	/* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
198 	volatile bool is_in_irq;
199 #endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
200 	uint8_t flush_cnt;
201 	volatile bool enabled;
202 	volatile bool discard_fifo;
203 };
204 
205 struct uarte_async_cb {
206 	uart_callback_t user_callback;
207 	void *user_data;
208 	struct uarte_async_rx rx;
209 	struct uarte_async_tx tx;
210 };
211 #endif /* UARTE_ANY_ASYNC */
212 
213 #ifdef UARTE_INTERRUPT_DRIVEN
214 struct uarte_nrfx_int_driven {
215 	uart_irq_callback_user_data_t cb; /**< Callback function pointer */
216 	void *cb_data; /**< Callback function arg */
217 	uint8_t *tx_buffer;
218 	uint16_t tx_buff_size;
219 	volatile bool disable_tx_irq;
220 	bool tx_irq_enabled;
221 #ifdef CONFIG_PM_DEVICE
222 	bool rx_irq_enabled;
223 #endif
224 	atomic_t fifo_fill_lock;
225 };
226 #endif
227 
228 /* Device data structure */
229 struct uarte_nrfx_data {
230 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
231 	struct uart_config uart_config;
232 #ifdef UARTE_BAUDRATE_RETENTION_WORKAROUND
233 	nrf_uarte_baudrate_t nrf_baudrate;
234 #endif
235 #endif
236 #ifdef UARTE_INTERRUPT_DRIVEN
237 	struct uarte_nrfx_int_driven *int_driven;
238 #endif
239 #ifdef UARTE_ANY_ASYNC
240 	struct uarte_async_cb *async;
241 #endif
242 	atomic_val_t poll_out_lock;
243 	atomic_t flags;
244 #ifdef UARTE_ENHANCED_POLL_OUT
245 	uint8_t ppi_ch_endtx;
246 #endif
247 };
248 
249 #define UARTE_FLAG_LOW_POWER_TX BIT(0)
250 #define UARTE_FLAG_LOW_POWER_RX BIT(1)
251 #define UARTE_FLAG_LOW_POWER (UARTE_FLAG_LOW_POWER_TX | UARTE_FLAG_LOW_POWER_RX)
252 #define UARTE_FLAG_TRIG_RXTO BIT(2)
253 #define UARTE_FLAG_POLL_OUT BIT(3)
254 
255 /* If enabled then ENDTX is PPI'ed to TXSTOP */
256 #define UARTE_CFG_FLAG_PPI_ENDTX   BIT(0)
257 
258 /* If enabled then TIMER and PPI is used for byte counting. */
259 #define UARTE_CFG_FLAG_HW_BYTE_COUNTING   BIT(1)
260 
261 /* If enabled then UARTE peripheral is disabled when not used. This allows
262  * to achieve lowest power consumption in idle.
263  */
264 #define UARTE_CFG_FLAG_LOW_POWER   BIT(2)
265 
266 /* If enabled then UARTE peripheral is using memory which is cacheable. */
267 #define UARTE_CFG_FLAG_CACHEABLE BIT(3)
268 
269 /* Formula for getting the baudrate settings is following:
270  * 2^12 * (2^20 / (f_PCLK / desired_baudrate)) where f_PCLK is a frequency that
271  * drives the UARTE.
272  *
273  * @param f_pclk Frequency of the clock that drives the peripheral.
274  * @param baudrate Desired baudrate.
275  *
276  * @return Baudrate setting to be written to the BAUDRATE register
277  */
278 #define UARTE_GET_CUSTOM_BAUDRATE(f_pclk, baudrate) ((BIT(20) / (f_pclk / baudrate)) << 12)
279 
280 /* Macro for converting numerical baudrate to register value. It is convenient
281  * to use this approach because for constant input it can calculate nrf setting
282  * at compile time.
283  */
284 #define NRF_BAUDRATE(baudrate) ((baudrate) == 300 ? 0x00014000 :\
285 	(baudrate) == 600    ? 0x00027000 :			\
286 	(baudrate) == 1200   ? NRF_UARTE_BAUDRATE_1200 :	\
287 	(baudrate) == 2400   ? NRF_UARTE_BAUDRATE_2400 :	\
288 	(baudrate) == 4800   ? NRF_UARTE_BAUDRATE_4800 :	\
289 	(baudrate) == 9600   ? NRF_UARTE_BAUDRATE_9600 :	\
290 	(baudrate) == 14400  ? NRF_UARTE_BAUDRATE_14400 :	\
291 	(baudrate) == 19200  ? NRF_UARTE_BAUDRATE_19200 :	\
292 	(baudrate) == 28800  ? NRF_UARTE_BAUDRATE_28800 :	\
293 	(baudrate) == 31250  ? NRF_UARTE_BAUDRATE_31250 :	\
294 	(baudrate) == 38400  ? NRF_UARTE_BAUDRATE_38400 :	\
295 	(baudrate) == 56000  ? NRF_UARTE_BAUDRATE_56000 :	\
296 	(baudrate) == 57600  ? NRF_UARTE_BAUDRATE_57600 :	\
297 	(baudrate) == 76800  ? NRF_UARTE_BAUDRATE_76800 :	\
298 	(baudrate) == 115200 ? NRF_UARTE_BAUDRATE_115200 :	\
299 	(baudrate) == 230400 ? NRF_UARTE_BAUDRATE_230400 :	\
300 	(baudrate) == 250000 ? NRF_UARTE_BAUDRATE_250000 :	\
301 	(baudrate) == 460800 ? NRF_UARTE_BAUDRATE_460800 :	\
302 	(baudrate) == 921600 ? NRF_UARTE_BAUDRATE_921600 :	\
303 	(baudrate) == 1000000 ? NRF_UARTE_BAUDRATE_1000000 : 0)
304 
305 #define LOW_POWER_ENABLED(_config) \
306 	(IS_ENABLED(UARTE_ANY_LOW_POWER) && \
307 	 !IS_ENABLED(CONFIG_PM_DEVICE) && \
308 	 (_config->flags & UARTE_CFG_FLAG_LOW_POWER))
309 
310 /** @brief Check if device has PM that works in ISR safe mode.
311  *
312  * Only fast UARTE instance does not work in that mode so check PM configuration
313  * flags only if there is any fast instance present.
314  *
315  * @retval true if device PM is ISR safe.
316  * @retval false if device PM is not ISR safe.
317  */
318 #define IS_PM_ISR_SAFE(dev) \
319 	(!IS_ENABLED(UARTE_ANY_FAST_PD) ||\
320 	 COND_CODE_1(CONFIG_PM_DEVICE,\
321 			((dev->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE))), \
322 			(0)))
323 
324 /**
325  * @brief Structure for UARTE configuration.
326  */
327 struct uarte_nrfx_config {
328 	NRF_UARTE_Type *uarte_regs; /* Instance address */
329 	uint32_t flags;
330 	bool disable_rx;
331 	const struct pinctrl_dev_config *pcfg;
332 #ifdef CONFIG_HAS_NORDIC_DMM
333 	void *mem_reg;
334 #endif
335 #ifdef UARTE_ANY_FAST_PD
336 	const struct device *clk_dev;
337 	struct nrf_clock_spec clk_spec;
338 #endif
339 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
340 	/* None-zero in case of high speed instances. Baudrate is adjusted by that ratio. */
341 	uint32_t clock_freq;
342 #else
343 #ifdef UARTE_HAS_FRAME_TIMEOUT
344 	uint32_t baudrate;
345 #endif
346 	nrf_uarte_baudrate_t nrf_baudrate;
347 	nrf_uarte_config_t hw_config;
348 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
349 
350 #ifdef UARTE_ANY_ASYNC
351 	nrfx_timer_t timer;
352 	uint8_t *tx_cache;
353 	uint8_t *rx_flush_buf;
354 #endif
355 	uint8_t *poll_out_byte;
356 	uint8_t *poll_in_byte;
357 };
358 
359 /* Using Macro instead of static inline function to handle NO_OPTIMIZATIONS case
360  * where static inline fails on linking.
361  */
362 #define HW_RX_COUNTING_ENABLED(config)    \
363 	(IS_ENABLED(UARTE_ANY_HW_ASYNC) ? \
364 	 (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false)
365 
get_uarte_instance(const struct device * dev)366 static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev)
367 {
368 	const struct uarte_nrfx_config *config = dev->config;
369 
370 	return config->uarte_regs;
371 }
372 
endtx_isr(const struct device * dev)373 static void endtx_isr(const struct device *dev)
374 {
375 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
376 
377 	unsigned int key = irq_lock();
378 
379 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
380 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
381 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
382 	}
383 
384 	irq_unlock(key);
385 
386 }
387 
388 /** @brief Disable UARTE peripheral is not used by RX or TX.
389  *
390  * It must be called with interrupts locked so that deciding if no direction is
391  * using the UARTE is atomically performed with UARTE peripheral disabling. Otherwise
392  * it would be possible that after clearing flags we get preempted and UARTE is
393  * enabled from the higher priority context and when we come back UARTE is disabled
394  * here.
395  * @param dev Device.
396  * @param dis_mask Mask of direction (RX or TX) which now longer uses the UARTE instance.
397  */
uarte_disable_locked(const struct device * dev,uint32_t dis_mask)398 static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask)
399 {
400 	struct uarte_nrfx_data *data = dev->data;
401 
402 	data->flags &= ~dis_mask;
403 	if (data->flags & UARTE_FLAG_LOW_POWER) {
404 		return;
405 	}
406 
407 #if defined(UARTE_ANY_ASYNC) && !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
408 	const struct uarte_nrfx_config *config = dev->config;
409 
410 	if (data->async && HW_RX_COUNTING_ENABLED(config)) {
411 		nrfx_timer_disable(&config->timer);
412 		/* Timer/counter value is reset when disabled. */
413 		data->async->rx.total_byte_cnt = 0;
414 		data->async->rx.total_user_byte_cnt = 0;
415 	}
416 #endif
417 
418 #ifdef CONFIG_SOC_NRF54H20_GPD
419 	const struct uarte_nrfx_config *cfg = dev->config;
420 
421 	nrf_gpd_retain_pins_set(cfg->pcfg, true);
422 #endif
423 	nrf_uarte_disable(get_uarte_instance(dev));
424 }
425 
426 #ifdef UARTE_ANY_NONE_ASYNC
427 /**
428  * @brief Interrupt service routine.
429  *
430  * This simply calls the callback function, if one exists.
431  *
432  * @param arg Argument to ISR.
433  */
uarte_nrfx_isr_int(const void * arg)434 static void uarte_nrfx_isr_int(const void *arg)
435 {
436 	const struct device *dev = arg;
437 	const struct uarte_nrfx_config *config = dev->config;
438 	struct uarte_nrfx_data *data = dev->data;
439 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
440 
441 	/* If interrupt driven and asynchronous APIs are disabled then UART
442 	 * interrupt is still called to stop TX. Unless it is done using PPI.
443 	 */
444 	if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT) &&
445 	    nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK) &&
446 		nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
447 		endtx_isr(dev);
448 	}
449 
450 	bool txstopped = nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED);
451 
452 	if (txstopped && (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(config))) {
453 		unsigned int key = irq_lock();
454 
455 		if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
456 			if (data->flags & UARTE_FLAG_POLL_OUT) {
457 				data->flags &= ~UARTE_FLAG_POLL_OUT;
458 				pm_device_runtime_put_async(dev, K_NO_WAIT);
459 			}
460 		} else {
461 			uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_TX);
462 		}
463 #ifdef UARTE_INTERRUPT_DRIVEN
464 		if (!data->int_driven)
465 #endif
466 		{
467 			nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
468 		}
469 
470 		irq_unlock(key);
471 	}
472 
473 #ifdef UARTE_INTERRUPT_DRIVEN
474 	if (!data->int_driven) {
475 		return;
476 	}
477 
478 	if (txstopped) {
479 		data->int_driven->fifo_fill_lock = 0;
480 		if (!data->int_driven->tx_irq_enabled) {
481 
482 			nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
483 		}
484 
485 		if (data->int_driven->disable_tx_irq) {
486 			data->int_driven->disable_tx_irq = false;
487 			if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
488 				pm_device_runtime_put_async(dev, K_NO_WAIT);
489 			}
490 			return;
491 		}
492 	}
493 
494 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) {
495 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR);
496 	}
497 
498 	if (data->int_driven->cb) {
499 		data->int_driven->cb(dev, data->int_driven->cb_data);
500 	}
501 #endif /* UARTE_INTERRUPT_DRIVEN */
502 }
503 #endif /* UARTE_ANY_NONE_ASYNC */
504 
505 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
506 /**
507  * @brief Set the baud rate
508  *
509  * This routine set the given baud rate for the UARTE.
510  *
511  * @param dev UARTE device struct
512  * @param baudrate Baud rate
513  *
514  * @return 0 on success or error code
515  */
baudrate_set(const struct device * dev,uint32_t baudrate)516 static int baudrate_set(const struct device *dev, uint32_t baudrate)
517 {
518 	const struct uarte_nrfx_config *config = dev->config;
519 	nrf_uarte_baudrate_t nrf_baudrate;
520 
521 	/* calculated baudrate divisor */
522 	if (UARTE_ANY_HIGH_SPEED && (config->clock_freq > NRF_UARTE_BASE_FREQUENCY_16MHZ)) {
523 		nrf_baudrate = UARTE_GET_CUSTOM_BAUDRATE(config->clock_freq, baudrate);
524 	} else {
525 		nrf_baudrate = NRF_BAUDRATE(baudrate);
526 	}
527 
528 	if (nrf_baudrate == 0) {
529 		return -EINVAL;
530 	}
531 
532 #ifdef UARTE_BAUDRATE_RETENTION_WORKAROUND
533 	struct uarte_nrfx_data *data = dev->data;
534 
535 	data->nrf_baudrate = nrf_baudrate;
536 #else
537 	nrf_uarte_baudrate_set(get_uarte_instance(dev), nrf_baudrate);
538 #endif
539 
540 	return 0;
541 }
542 
uarte_nrfx_configure(const struct device * dev,const struct uart_config * cfg)543 static int uarte_nrfx_configure(const struct device *dev,
544 				const struct uart_config *cfg)
545 {
546 	struct uarte_nrfx_data *data = dev->data;
547 	nrf_uarte_config_t uarte_cfg;
548 
549 #if defined(UARTE_CONFIG_STOP_Msk)
550 	switch (cfg->stop_bits) {
551 	case UART_CFG_STOP_BITS_1:
552 		uarte_cfg.stop = NRF_UARTE_STOP_ONE;
553 		break;
554 	case UART_CFG_STOP_BITS_2:
555 		uarte_cfg.stop = NRF_UARTE_STOP_TWO;
556 		break;
557 	default:
558 		return -ENOTSUP;
559 	}
560 #else
561 	if (cfg->stop_bits != UART_CFG_STOP_BITS_1) {
562 		return -ENOTSUP;
563 	}
564 #endif
565 
566 	if (cfg->data_bits != UART_CFG_DATA_BITS_8) {
567 		return -ENOTSUP;
568 	}
569 
570 	switch (cfg->flow_ctrl) {
571 	case UART_CFG_FLOW_CTRL_NONE:
572 		uarte_cfg.hwfc = NRF_UARTE_HWFC_DISABLED;
573 		break;
574 	case UART_CFG_FLOW_CTRL_RTS_CTS:
575 		uarte_cfg.hwfc = NRF_UARTE_HWFC_ENABLED;
576 		break;
577 	default:
578 		return -ENOTSUP;
579 	}
580 
581 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
582 	uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_EVEN;
583 #endif
584 	switch (cfg->parity) {
585 	case UART_CFG_PARITY_NONE:
586 		uarte_cfg.parity = NRF_UARTE_PARITY_EXCLUDED;
587 		break;
588 	case UART_CFG_PARITY_EVEN:
589 		uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
590 		break;
591 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
592 	case UART_CFG_PARITY_ODD:
593 		uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
594 		uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_ODD;
595 		break;
596 #endif
597 	default:
598 		return -ENOTSUP;
599 	}
600 
601 	if (baudrate_set(dev, cfg->baudrate) != 0) {
602 		return -ENOTSUP;
603 	}
604 
605 #ifdef UARTE_HAS_FRAME_TIMEOUT
606 	uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_EN;
607 #endif
608 
609 #if NRF_UARTE_HAS_FRAME_SIZE
610 	uarte_cfg.frame_size = NRF_UARTE_FRAME_SIZE_8_BIT;
611 	uarte_cfg.endian = NRF_UARTE_ENDIAN_MSB;
612 #endif
613 
614 	nrf_uarte_configure(get_uarte_instance(dev), &uarte_cfg);
615 
616 	data->uart_config = *cfg;
617 
618 	return 0;
619 }
620 
uarte_nrfx_config_get(const struct device * dev,struct uart_config * cfg)621 static int uarte_nrfx_config_get(const struct device *dev,
622 				 struct uart_config *cfg)
623 {
624 	struct uarte_nrfx_data *data = dev->data;
625 
626 	*cfg = data->uart_config;
627 	return 0;
628 }
629 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
630 
631 
uarte_nrfx_err_check(const struct device * dev)632 static int uarte_nrfx_err_check(const struct device *dev)
633 {
634 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
635 	/* register bitfields maps to the defines in uart.h */
636 	return nrf_uarte_errorsrc_get_and_clear(uarte);
637 }
638 
639 /* Function returns true if new transfer can be started. Since TXSTOPPED
640  * (and ENDTX) is cleared before triggering new transfer, TX is ready for new
641  * transfer if any event is set.
642  */
is_tx_ready(const struct device * dev)643 static bool is_tx_ready(const struct device *dev)
644 {
645 	const struct uarte_nrfx_config *config = dev->config;
646 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
647 	bool ppi_endtx = config->flags & UARTE_CFG_FLAG_PPI_ENDTX ||
648 			 IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT);
649 
650 	return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) ||
651 		(!ppi_endtx ?
652 		       nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX) : 0);
653 }
654 
655 /* Wait until the transmitter is in the idle state. When this function returns,
656  * IRQ's are locked with the returned key.
657  */
wait_tx_ready(const struct device * dev)658 static int wait_tx_ready(const struct device *dev)
659 {
660 	unsigned int key;
661 
662 	do {
663 		/* wait arbitrary time before back off. */
664 		bool res;
665 
666 #if defined(CONFIG_ARCH_POSIX)
667 		NRFX_WAIT_FOR(is_tx_ready(dev), 33, 3, res);
668 #else
669 		NRFX_WAIT_FOR(is_tx_ready(dev), 100, 1, res);
670 #endif
671 
672 		if (res) {
673 			key = irq_lock();
674 			if (is_tx_ready(dev)) {
675 				break;
676 			}
677 
678 			irq_unlock(key);
679 		}
680 		if (IS_ENABLED(CONFIG_MULTITHREADING)) {
681 			k_msleep(1);
682 		}
683 	} while (1);
684 
685 	return key;
686 }
687 
uarte_periph_enable(const struct device * dev)688 static void uarte_periph_enable(const struct device *dev)
689 {
690 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
691 	const struct uarte_nrfx_config *config = dev->config;
692 	struct uarte_nrfx_data *data = dev->data;
693 
694 	(void)data;
695 #ifdef UARTE_ANY_FAST_PD
696 	if (config->clk_dev) {
697 		int err;
698 
699 		err = nrf_clock_control_request_sync(config->clk_dev, &config->clk_spec, K_FOREVER);
700 		(void)err;
701 		__ASSERT_NO_MSG(err >= 0);
702 	}
703 #endif
704 
705 	nrf_uarte_enable(uarte);
706 #ifdef CONFIG_SOC_NRF54H20_GPD
707 	nrf_gpd_retain_pins_set(config->pcfg, false);
708 #endif
709 #if UARTE_BAUDRATE_RETENTION_WORKAROUND
710 	nrf_uarte_baudrate_set(uarte,
711 		COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE,
712 			(data->nrf_baudrate), (config->nrf_baudrate)));
713 #endif
714 
715 #ifdef UARTE_ANY_ASYNC
716 	if (data->async) {
717 		if (HW_RX_COUNTING_ENABLED(config)) {
718 			const nrfx_timer_t *timer = &config->timer;
719 
720 			nrfx_timer_enable(timer);
721 
722 			for (int i = 0; i < data->async->rx.flush_cnt; i++) {
723 				nrfx_timer_increment(timer);
724 			}
725 		}
726 		return;
727 	}
728 #endif
729 
730 	if (IS_ENABLED(UARTE_ANY_NONE_ASYNC) && !config->disable_rx) {
731 		nrf_uarte_rx_buffer_set(uarte, config->poll_in_byte, 1);
732 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
733 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
734 #if defined(UARTE_INTERRUPT_DRIVEN) && defined(CONFIG_PM_DEVICE)
735 		if (data->int_driven && data->int_driven->rx_irq_enabled) {
736 			nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK);
737 		}
738 #endif
739 	}
740 }
741 
uarte_enable_locked(const struct device * dev,uint32_t act_mask)742 static void uarte_enable_locked(const struct device *dev, uint32_t act_mask)
743 {
744 	struct uarte_nrfx_data *data = dev->data;
745 	bool already_active = (data->flags & UARTE_FLAG_LOW_POWER) != 0;
746 
747 	data->flags |= act_mask;
748 	if (already_active) {
749 		/* Second direction already enabled so UARTE is enabled. */
750 		return;
751 	}
752 
753 	uarte_periph_enable(dev);
754 }
755 
756 /* At this point we should have irq locked and any previous transfer completed.
757  * Transfer can be started, no need to wait for completion.
758  */
tx_start(const struct device * dev,const uint8_t * buf,size_t len)759 static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
760 {
761 	const struct uarte_nrfx_config *config = dev->config;
762 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
763 
764 #if defined(CONFIG_PM_DEVICE) && !defined(CONFIG_PM_DEVICE_RUNTIME)
765 	enum pm_device_state state;
766 
767 	(void)pm_device_state_get(dev, &state);
768 	if (state != PM_DEVICE_STATE_ACTIVE) {
769 		return;
770 	}
771 #endif
772 
773 	if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
774 		sys_cache_data_flush_range((void *)buf, len);
775 	}
776 
777 	nrf_uarte_tx_buffer_set(uarte, buf, len);
778 	if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) {
779 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
780 	}
781 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED);
782 
783 	if (LOW_POWER_ENABLED(config)) {
784 		uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_TX);
785 	}
786 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
787 }
788 
789 #if defined(UARTE_ANY_ASYNC)
790 static void rx_timeout(struct k_timer *timer);
791 static void tx_timeout(struct k_timer *timer);
792 
793 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
timer_handler(nrf_timer_event_t event_type,void * p_context)794 static void timer_handler(nrf_timer_event_t event_type, void *p_context) { }
795 
uarte_nrfx_rx_counting_init(const struct device * dev)796 static int uarte_nrfx_rx_counting_init(const struct device *dev)
797 {
798 	struct uarte_nrfx_data *data = dev->data;
799 	const struct uarte_nrfx_config *cfg = dev->config;
800 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
801 	int ret;
802 
803 	if (HW_RX_COUNTING_ENABLED(cfg)) {
804 		nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG(
805 						NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg));
806 		uint32_t evt_addr = nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_RXDRDY);
807 		uint32_t tsk_addr = nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT);
808 
809 		tmr_config.mode = NRF_TIMER_MODE_COUNTER;
810 		tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32;
811 		ret = nrfx_timer_init(&cfg->timer,
812 				      &tmr_config,
813 				      timer_handler);
814 		if (ret != NRFX_SUCCESS) {
815 			LOG_ERR("Timer already initialized");
816 			return -EINVAL;
817 		} else {
818 			nrfx_timer_clear(&cfg->timer);
819 		}
820 
821 		ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi);
822 		if (ret != NRFX_SUCCESS) {
823 			LOG_ERR("Failed to allocate PPI Channel");
824 			nrfx_timer_uninit(&cfg->timer);
825 			return -EINVAL;
826 		}
827 
828 		nrfx_gppi_channel_endpoints_setup(data->async->rx.cnt.ppi, evt_addr, tsk_addr);
829 		nrfx_gppi_channels_enable(BIT(data->async->rx.cnt.ppi));
830 	} else {
831 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK);
832 	}
833 
834 	return 0;
835 }
836 #endif /* !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) */
837 
uarte_async_init(const struct device * dev)838 static int uarte_async_init(const struct device *dev)
839 {
840 	struct uarte_nrfx_data *data = dev->data;
841 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
842 	static const uint32_t rx_int_mask =
843 		NRF_UARTE_INT_ENDRX_MASK |
844 		NRF_UARTE_INT_RXSTARTED_MASK |
845 		NRF_UARTE_INT_ERROR_MASK |
846 		NRF_UARTE_INT_RXTO_MASK |
847 		((IS_ENABLED(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) &&
848 		  !IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) ? NRF_UARTE_INT_RXDRDY_MASK : 0);
849 
850 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
851 	int ret = uarte_nrfx_rx_counting_init(dev);
852 
853 	if (ret != 0) {
854 		return ret;
855 	}
856 #endif
857 
858 	nrf_uarte_int_enable(uarte, rx_int_mask);
859 
860 	k_timer_init(&data->async->rx.timer, rx_timeout, NULL);
861 	k_timer_user_data_set(&data->async->rx.timer, (void *)dev);
862 	k_timer_init(&data->async->tx.timer, tx_timeout, NULL);
863 	k_timer_user_data_set(&data->async->tx.timer, (void *)dev);
864 
865 	return 0;
866 }
867 
868 /* Attempt to start TX (asynchronous transfer). If hardware is not ready, then pending
869  * flag is set. When current poll_out is completed, pending transfer is started.
870  * Function must be called with interrupts locked.
871  */
start_tx_locked(const struct device * dev,struct uarte_nrfx_data * data)872 static void start_tx_locked(const struct device *dev, struct uarte_nrfx_data *data)
873 {
874 	nrf_uarte_int_enable(get_uarte_instance(dev), NRF_UARTE_INT_TXSTOPPED_MASK);
875 	if (!is_tx_ready(dev)) {
876 		/* Active poll out, postpone until it is completed. */
877 		data->async->tx.pending = true;
878 	} else {
879 		data->async->tx.pending = false;
880 		data->async->tx.amount = -1;
881 		tx_start(dev, data->async->tx.xfer_buf, data->async->tx.xfer_len);
882 	}
883 }
884 
885 /* Setup cache buffer (used for sending data outside of RAM memory).
886  * During setup data is copied to cache buffer and transfer length is set.
887  *
888  * @return True if cache was set, false if no more data to put in cache.
889  */
setup_tx_cache(const struct device * dev)890 static bool setup_tx_cache(const struct device *dev)
891 {
892 	struct uarte_nrfx_data *data = dev->data;
893 	const struct uarte_nrfx_config *config = dev->config;
894 	size_t remaining = data->async->tx.len - data->async->tx.cache_offset;
895 
896 	if (!remaining) {
897 		return false;
898 	}
899 
900 	size_t len = MIN(remaining, CONFIG_UART_ASYNC_TX_CACHE_SIZE);
901 
902 	data->async->tx.xfer_len = len;
903 	data->async->tx.xfer_buf = config->tx_cache;
904 	memcpy(config->tx_cache, &data->async->tx.buf[data->async->tx.cache_offset], len);
905 
906 	return true;
907 }
908 
has_hwfc(const struct device * dev)909 static bool has_hwfc(const struct device *dev)
910 {
911 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
912 	struct uarte_nrfx_data *data = dev->data;
913 
914 	return data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS;
915 #else
916 	const struct uarte_nrfx_config *config = dev->config;
917 
918 	return config->hw_config.hwfc == NRF_UARTE_HWFC_ENABLED;
919 #endif
920 }
921 
uarte_nrfx_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)922 static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf,
923 			 size_t len,
924 			 int32_t timeout)
925 {
926 	struct uarte_nrfx_data *data = dev->data;
927 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
928 
929 	unsigned int key = irq_lock();
930 
931 	if (data->async->tx.len) {
932 		irq_unlock(key);
933 		return -EBUSY;
934 	}
935 
936 	data->async->tx.len = len;
937 	data->async->tx.buf = buf;
938 
939 	if (nrf_dma_accessible_check(uarte, buf)) {
940 		data->async->tx.xfer_buf = buf;
941 		data->async->tx.xfer_len = len;
942 	} else {
943 		data->async->tx.cache_offset = 0;
944 		(void)setup_tx_cache(dev);
945 	}
946 
947 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
948 		if (!IS_PM_ISR_SAFE(dev) && k_is_in_isr()) {
949 			/* If instance does not support PM from ISR device shall
950 			 * already be turned on.
951 			 */
952 			enum pm_device_state state;
953 			int err;
954 
955 			err = pm_device_state_get(dev, &state);
956 			(void)err;
957 			__ASSERT_NO_MSG(err == 0);
958 			if (state != PM_DEVICE_STATE_ACTIVE) {
959 				return -ENOTSUP;
960 			}
961 		}
962 		pm_device_runtime_get(dev);
963 	}
964 
965 	start_tx_locked(dev, data);
966 
967 	irq_unlock(key);
968 
969 	if (has_hwfc(dev) && timeout != SYS_FOREVER_US) {
970 		k_timer_start(&data->async->tx.timer, K_USEC(timeout), K_NO_WAIT);
971 	}
972 	return 0;
973 }
974 
uarte_nrfx_tx_abort(const struct device * dev)975 static int uarte_nrfx_tx_abort(const struct device *dev)
976 {
977 	struct uarte_nrfx_data *data = dev->data;
978 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
979 
980 	if (data->async->tx.buf == NULL) {
981 		return -EFAULT;
982 	}
983 
984 	data->async->tx.pending = false;
985 	k_timer_stop(&data->async->tx.timer);
986 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
987 
988 	return 0;
989 }
990 
user_callback(const struct device * dev,struct uart_event * evt)991 static void user_callback(const struct device *dev, struct uart_event *evt)
992 {
993 	struct uarte_nrfx_data *data = dev->data;
994 
995 	if (data->async->user_callback) {
996 		data->async->user_callback(dev, evt, data->async->user_data);
997 	}
998 }
999 
notify_uart_rx_rdy(const struct device * dev,size_t len)1000 static void notify_uart_rx_rdy(const struct device *dev, size_t len)
1001 {
1002 	struct uarte_nrfx_data *data = dev->data;
1003 	struct uart_event evt = {
1004 		.type = UART_RX_RDY,
1005 		.data.rx.buf = data->async->rx.buf,
1006 		.data.rx.len = len,
1007 		.data.rx.offset = data->async->rx.offset
1008 	};
1009 
1010 	user_callback(dev, &evt);
1011 }
1012 
rx_buf_release(const struct device * dev,uint8_t * buf)1013 static void rx_buf_release(const struct device *dev, uint8_t *buf)
1014 {
1015 	struct uart_event evt = {
1016 		.type = UART_RX_BUF_RELEASED,
1017 		.data.rx_buf.buf = buf,
1018 	};
1019 
1020 	user_callback(dev, &evt);
1021 }
1022 
notify_rx_disable(const struct device * dev)1023 static void notify_rx_disable(const struct device *dev)
1024 {
1025 	struct uart_event evt = {
1026 		.type = UART_RX_DISABLED,
1027 	};
1028 
1029 	user_callback(dev, (struct uart_event *)&evt);
1030 
1031 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1032 		pm_device_runtime_put_async(dev, K_NO_WAIT);
1033 	}
1034 }
1035 
1036 #ifdef UARTE_HAS_FRAME_TIMEOUT
us_to_bauds(uint32_t baudrate,int32_t timeout)1037 static uint32_t us_to_bauds(uint32_t baudrate, int32_t timeout)
1038 {
1039 	uint64_t bauds = (uint64_t)baudrate * timeout / 1000000;
1040 
1041 	return MIN((uint32_t)bauds, UARTE_FRAMETIMEOUT_COUNTERTOP_Msk);
1042 }
1043 #endif
1044 
uarte_nrfx_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)1045 static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
1046 				size_t len,
1047 				int32_t timeout)
1048 {
1049 	struct uarte_nrfx_data *data = dev->data;
1050 	struct uarte_async_rx *async_rx = &data->async->rx;
1051 	const struct uarte_nrfx_config *cfg = dev->config;
1052 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1053 
1054 	if (cfg->disable_rx) {
1055 		__ASSERT(false, "TX only UARTE instance");
1056 		return -ENOTSUP;
1057 	}
1058 
1059 	/* Signal error if RX is already enabled or if the driver is waiting
1060 	 * for the RXTO event after a call to uart_rx_disable() to discard
1061 	 * data from the UARTE internal RX FIFO.
1062 	 */
1063 	if (async_rx->enabled || async_rx->discard_fifo) {
1064 		return -EBUSY;
1065 	}
1066 
1067 #ifdef CONFIG_HAS_NORDIC_DMM
1068 	uint8_t *dma_buf;
1069 	int ret = 0;
1070 
1071 	ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf);
1072 	if (ret < 0) {
1073 		return ret;
1074 	}
1075 
1076 	async_rx->usr_buf = buf;
1077 	buf = dma_buf;
1078 #endif
1079 
1080 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1081 #ifdef UARTE_HAS_FRAME_TIMEOUT
1082 	if (timeout != SYS_FOREVER_US) {
1083 		uint32_t baudrate = COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE,
1084 			(data->uart_config.baudrate), (cfg->baudrate));
1085 
1086 		async_rx->timeout = K_USEC(timeout);
1087 		nrf_uarte_frame_timeout_set(uarte, us_to_bauds(baudrate, timeout));
1088 		nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX);
1089 	} else {
1090 		async_rx->timeout = K_NO_WAIT;
1091 	}
1092 #else
1093 	async_rx->timeout = (timeout == SYS_FOREVER_US) ?
1094 		K_NO_WAIT : K_USEC(timeout / RX_TIMEOUT_DIV);
1095 	async_rx->idle_cnt = 0;
1096 #endif /* UARTE_HAS_FRAME_TIMEOUT */
1097 #else
1098 	async_rx->timeout_us = timeout;
1099 	async_rx->timeout_slab = timeout / RX_TIMEOUT_DIV;
1100 #endif
1101 
1102 	async_rx->buf = buf;
1103 	async_rx->buf_len = len;
1104 	async_rx->offset = 0;
1105 	async_rx->next_buf = NULL;
1106 	async_rx->next_buf_len = 0;
1107 
1108 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1109 		if (!IS_PM_ISR_SAFE(dev) && k_is_in_isr()) {
1110 			/* If instance does not support PM from ISR device shall
1111 			 * already be turned on.
1112 			 */
1113 			enum pm_device_state state;
1114 			int err;
1115 
1116 			err = pm_device_state_get(dev, &state);
1117 			(void)err;
1118 			__ASSERT_NO_MSG(err == 0);
1119 			if (state != PM_DEVICE_STATE_ACTIVE) {
1120 				return -ENOTSUP;
1121 			}
1122 		}
1123 		pm_device_runtime_get(dev);
1124 	}
1125 
1126 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(cfg)) {
1127 		if (async_rx->flush_cnt) {
1128 			int cpy_len = MIN(len, async_rx->flush_cnt);
1129 
1130 			if (IS_ENABLED(UARTE_ANY_CACHE) &&
1131 			    (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1132 				sys_cache_data_invd_range(cfg->rx_flush_buf, cpy_len);
1133 			}
1134 
1135 			memcpy(buf, cfg->rx_flush_buf, cpy_len);
1136 
1137 			if (IS_ENABLED(UARTE_ANY_CACHE) &&
1138 			    (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1139 				sys_cache_data_flush_range(buf, cpy_len);
1140 			}
1141 
1142 			buf += cpy_len;
1143 			len -= cpy_len;
1144 
1145 			/* If flush content filled whole new buffer trigger interrupt
1146 			 * to notify about received data and disabled RX from there.
1147 			 */
1148 			if (!len) {
1149 				async_rx->flush_cnt -= cpy_len;
1150 				memmove(cfg->rx_flush_buf, &cfg->rx_flush_buf[cpy_len],
1151 						async_rx->flush_cnt);
1152 				if (IS_ENABLED(UARTE_ANY_CACHE) &&
1153 				    (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1154 					sys_cache_data_flush_range(cfg->rx_flush_buf,
1155 								   async_rx->flush_cnt);
1156 				}
1157 				atomic_or(&data->flags, UARTE_FLAG_TRIG_RXTO);
1158 				NRFX_IRQ_PENDING_SET(nrfx_get_irq_number(uarte));
1159 				return 0;
1160 			} else {
1161 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1162 				if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) {
1163 					nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY);
1164 					k_timer_start(&async_rx->timer, async_rx->timeout,
1165 							K_NO_WAIT);
1166 				}
1167 #endif
1168 			}
1169 		}
1170 	}
1171 
1172 	nrf_uarte_rx_buffer_set(uarte, buf, len);
1173 
1174 	if (IS_ENABLED(UARTE_ANY_FAST_PD) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1175 		/* Spurious RXTO event was seen on fast instance (UARTE120) thus
1176 		 * RXTO interrupt is kept enabled only when RX is active.
1177 		 */
1178 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
1179 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXTO_MASK);
1180 	}
1181 
1182 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1183 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1184 
1185 	async_rx->enabled = true;
1186 
1187 	if (LOW_POWER_ENABLED(cfg)) {
1188 		unsigned int key = irq_lock();
1189 
1190 		uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_RX);
1191 		irq_unlock(key);
1192 	}
1193 
1194 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1195 
1196 	return 0;
1197 }
1198 
uarte_nrfx_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)1199 static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf,
1200 				 size_t len)
1201 {
1202 	struct uarte_nrfx_data *data = dev->data;
1203 	struct uarte_async_rx *async_rx = &data->async->rx;
1204 	int err;
1205 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1206 	unsigned int key = irq_lock();
1207 
1208 	if (async_rx->buf == NULL) {
1209 		err = -EACCES;
1210 	} else if (async_rx->next_buf == NULL) {
1211 #ifdef CONFIG_HAS_NORDIC_DMM
1212 		uint8_t *dma_buf;
1213 		const struct uarte_nrfx_config *config = dev->config;
1214 
1215 		err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf);
1216 		if (err < 0) {
1217 			return err;
1218 		}
1219 		async_rx->next_usr_buf = buf;
1220 		buf = dma_buf;
1221 #endif
1222 		async_rx->next_buf = buf;
1223 		async_rx->next_buf_len = len;
1224 		nrf_uarte_rx_buffer_set(uarte, buf, len);
1225 		/* If buffer is shorter than RX FIFO then there is a risk that due
1226 		 * to interrupt handling latency ENDRX event is not handled on time
1227 		 * and due to ENDRX_STARTRX short data will start to be overwritten.
1228 		 * In that case short is not enabled and ENDRX event handler will
1229 		 * manually start RX for that buffer. Thanks to RX FIFO there is
1230 		 * 5 byte time for doing that. If interrupt latency is higher and
1231 		 * there is no HWFC in both cases data will be lost or corrupted.
1232 		 */
1233 		if (len >= UARTE_HW_RX_FIFO_SIZE) {
1234 			nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1235 		}
1236 		err = 0;
1237 	} else {
1238 		err = -EBUSY;
1239 	}
1240 
1241 	irq_unlock(key);
1242 
1243 	return err;
1244 }
1245 
uarte_nrfx_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)1246 static int uarte_nrfx_callback_set(const struct device *dev,
1247 				   uart_callback_t callback,
1248 				   void *user_data)
1249 {
1250 	struct uarte_nrfx_data *data = dev->data;
1251 
1252 	if (!data->async) {
1253 		return -ENOTSUP;
1254 	}
1255 
1256 	data->async->user_callback = callback;
1257 	data->async->user_data = user_data;
1258 
1259 	return 0;
1260 }
1261 
uarte_nrfx_rx_disable(const struct device * dev)1262 static int uarte_nrfx_rx_disable(const struct device *dev)
1263 {
1264 	struct uarte_nrfx_data *data = dev->data;
1265 	struct uarte_async_rx *async_rx = &data->async->rx;
1266 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1267 	int key;
1268 
1269 	if (async_rx->buf == NULL) {
1270 		return -EFAULT;
1271 	}
1272 
1273 	k_timer_stop(&async_rx->timer);
1274 
1275 	key = irq_lock();
1276 
1277 	if (async_rx->next_buf != NULL) {
1278 		nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1279 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1280 	}
1281 
1282 	async_rx->enabled = false;
1283 	async_rx->discard_fifo = true;
1284 
1285 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1286 	irq_unlock(key);
1287 
1288 	return 0;
1289 }
1290 
tx_timeout(struct k_timer * timer)1291 static void tx_timeout(struct k_timer *timer)
1292 {
1293 	const struct device *dev = k_timer_user_data_get(timer);
1294 	(void) uarte_nrfx_tx_abort(dev);
1295 }
1296 
1297 /**
1298  * Whole timeout is divided by RX_TIMEOUT_DIV into smaller units, rx_timeout
1299  * is executed periodically every rx_timeout_slab us. If between executions
1300  * data was received, then we start counting down time from start, if not, then
1301  * we subtract rx_timeout_slab from rx_timeout_left.
1302  * If rx_timeout_left is less than rx_timeout_slab it means that receiving has
1303  * timed out and we should tell user about that.
1304  */
rx_timeout(struct k_timer * timer)1305 static void rx_timeout(struct k_timer *timer)
1306 {
1307 	const struct device *dev = k_timer_user_data_get(timer);
1308 
1309 #if  CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1310 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1311 
1312 #ifdef UARTE_HAS_FRAME_TIMEOUT
1313 	if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) {
1314 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1315 	}
1316 	return;
1317 #else /* UARTE_HAS_FRAME_TIMEOUT */
1318 	struct uarte_nrfx_data *data = dev->data;
1319 	struct uarte_async_rx *async_rx = &data->async->rx;
1320 
1321 	if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) {
1322 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY);
1323 		async_rx->idle_cnt = 0;
1324 	} else {
1325 		async_rx->idle_cnt++;
1326 		/* We compare against RX_TIMEOUT_DIV - 1 to get rather earlier timeout
1327 		 * than late. idle_cnt is reset when last RX activity (RXDRDY event) is
1328 		 * detected. It may happen that it happens when RX is inactive for whole
1329 		 * RX timeout period (and it is the case when transmission is short compared
1330 		 * to the timeout, for example timeout is 50 ms and transmission of few bytes
1331 		 * takes less than 1ms). In that case if we compare against RX_TIMEOUT_DIV
1332 		 * then RX notification would come after (RX_TIMEOUT_DIV + 1) * timeout.
1333 		 */
1334 		if (async_rx->idle_cnt == (RX_TIMEOUT_DIV - 1)) {
1335 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1336 			return;
1337 		}
1338 	}
1339 
1340 	k_timer_start(&async_rx->timer, async_rx->timeout, K_NO_WAIT);
1341 #endif /* UARTE_HAS_FRAME_TIMEOUT */
1342 #else /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
1343 	const struct uarte_nrfx_config *cfg = dev->config;
1344 	struct uarte_nrfx_data *data = dev->data;
1345 	struct uarte_async_rx *async_rx = &data->async->rx;
1346 	uint32_t read;
1347 
1348 	if (async_rx->is_in_irq) {
1349 		return;
1350 	}
1351 
1352 	/* Disable ENDRX ISR, in case ENDRX event is generated, it will be
1353 	 * handled after rx_timeout routine is complete.
1354 	 */
1355 	nrf_uarte_int_disable(get_uarte_instance(dev),
1356 			      NRF_UARTE_INT_ENDRX_MASK);
1357 
1358 	if (HW_RX_COUNTING_ENABLED(cfg)) {
1359 		read = nrfx_timer_capture(&cfg->timer, 0);
1360 	} else {
1361 		read = async_rx->cnt.cnt;
1362 	}
1363 
1364 	/* Check if data was received since last function call */
1365 	if (read != async_rx->total_byte_cnt) {
1366 		async_rx->total_byte_cnt = read;
1367 		async_rx->timeout_left = async_rx->timeout_us;
1368 	}
1369 
1370 	/* Check if there is data that was not sent to user yet
1371 	 * Note though that 'len' is a count of data bytes received, but not
1372 	 * necessarily the amount available in the current buffer
1373 	 */
1374 	int32_t len = async_rx->total_byte_cnt - async_rx->total_user_byte_cnt;
1375 
1376 	if (!HW_RX_COUNTING_ENABLED(cfg) &&
1377 	    (len < 0)) {
1378 		/* Prevent too low value of rx_cnt.cnt which may occur due to
1379 		 * latencies in handling of the RXRDY interrupt.
1380 		 * At this point, the number of received bytes is at least
1381 		 * equal to what was reported to the user.
1382 		 */
1383 		async_rx->cnt.cnt = async_rx->total_user_byte_cnt;
1384 		len = 0;
1385 	}
1386 
1387 	/* Check for current buffer being full.
1388 	 * if the UART receives characters before the ENDRX is handled
1389 	 * and the 'next' buffer is set up, then the SHORT between ENDRX and
1390 	 * STARTRX will mean that data will be going into to the 'next' buffer
1391 	 * until the ENDRX event gets a chance to be handled.
1392 	 */
1393 	bool clipped = false;
1394 
1395 	if (len + async_rx->offset > async_rx->buf_len) {
1396 		len = async_rx->buf_len - async_rx->offset;
1397 		clipped = true;
1398 	}
1399 
1400 	if (len > 0) {
1401 		if (clipped || (async_rx->timeout_left < async_rx->timeout_slab)) {
1402 			/* rx_timeout us elapsed since last receiving */
1403 			if (async_rx->buf != NULL) {
1404 				notify_uart_rx_rdy(dev, len);
1405 				async_rx->offset += len;
1406 				async_rx->total_user_byte_cnt += len;
1407 			}
1408 		} else {
1409 			async_rx->timeout_left -= async_rx->timeout_slab;
1410 		}
1411 
1412 		/* If there's nothing left to report until the buffers are
1413 		 * switched then the timer can be stopped
1414 		 */
1415 		if (clipped) {
1416 			k_timer_stop(&async_rx->timer);
1417 		}
1418 	}
1419 
1420 	nrf_uarte_int_enable(get_uarte_instance(dev),
1421 			     NRF_UARTE_INT_ENDRX_MASK);
1422 #endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
1423 }
1424 
1425 #define UARTE_ERROR_FROM_MASK(mask)					\
1426 	((mask) & NRF_UARTE_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN	\
1427 	 : (mask) & NRF_UARTE_ERROR_PARITY_MASK ? UART_ERROR_PARITY	\
1428 	 : (mask) & NRF_UARTE_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING	\
1429 	 : (mask) & NRF_UARTE_ERROR_BREAK_MASK ? UART_BREAK		\
1430 	 : 0)
1431 
error_isr(const struct device * dev)1432 static void error_isr(const struct device *dev)
1433 {
1434 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1435 	uint32_t err = nrf_uarte_errorsrc_get(uarte);
1436 	struct uart_event evt = {
1437 		.type = UART_RX_STOPPED,
1438 		.data.rx_stop.reason = UARTE_ERROR_FROM_MASK(err),
1439 	};
1440 
1441 	/* For VPR cores read and write may be reordered - barrier needed. */
1442 	nrf_barrier_r();
1443 	nrf_uarte_errorsrc_clear(uarte, err);
1444 
1445 	user_callback(dev, &evt);
1446 	(void) uarte_nrfx_rx_disable(dev);
1447 }
1448 
rxstarted_isr(const struct device * dev)1449 static void rxstarted_isr(const struct device *dev)
1450 {
1451 	struct uart_event evt = {
1452 		.type = UART_RX_BUF_REQUEST,
1453 	};
1454 
1455 #ifndef UARTE_HAS_FRAME_TIMEOUT
1456 	struct uarte_nrfx_data *data = dev->data;
1457 	struct uarte_async_rx *async_rx = &data->async->rx;
1458 
1459 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1460 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1461 
1462 	if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) {
1463 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK);
1464 	}
1465 #else
1466 	if (async_rx->timeout_us != SYS_FOREVER_US) {
1467 		k_timeout_t timeout = K_USEC(async_rx->timeout_slab);
1468 
1469 		async_rx->timeout_left = async_rx->timeout_us;
1470 		k_timer_start(&async_rx->timer, timeout, timeout);
1471 	}
1472 #endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
1473 #endif /* !UARTE_HAS_FRAME_TIMEOUT */
1474 	user_callback(dev, &evt);
1475 }
1476 
endrx_isr(const struct device * dev)1477 static void endrx_isr(const struct device *dev)
1478 {
1479 	struct uarte_nrfx_data *data = dev->data;
1480 	struct uarte_async_rx *async_rx = &data->async->rx;
1481 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1482 
1483 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1484 	async_rx->is_in_irq = true;
1485 #endif
1486 
1487 	/* ensure rx timer is stopped - it will be restarted in RXSTARTED
1488 	 * handler if needed
1489 	 */
1490 	k_timer_stop(&async_rx->timer);
1491 
1492 	/* this is the amount that the EasyDMA controller has copied into the
1493 	 * buffer
1494 	 */
1495 	const int rx_amount = nrf_uarte_rx_amount_get(uarte) + async_rx->flush_cnt;
1496 
1497 #ifdef CONFIG_HAS_NORDIC_DMM
1498 	const struct uarte_nrfx_config *config = dev->config;
1499 	int err =
1500 		dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, rx_amount, async_rx->buf);
1501 
1502 	(void)err;
1503 	__ASSERT_NO_MSG(err == 0);
1504 	async_rx->buf = async_rx->usr_buf;
1505 #endif
1506 	async_rx->flush_cnt = 0;
1507 
1508 	/* The 'rx_offset' can be bigger than 'rx_amount', so it the length
1509 	 * of data we report back the user may need to be clipped.
1510 	 * This can happen because the 'rx_offset' count derives from RXRDY
1511 	 * events, which can occur already for the next buffer before we are
1512 	 * here to handle this buffer. (The next buffer is now already active
1513 	 * because of the ENDRX_STARTRX shortcut)
1514 	 */
1515 	int rx_len = rx_amount - async_rx->offset;
1516 
1517 	if (rx_len < 0) {
1518 		rx_len = 0;
1519 	}
1520 
1521 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1522 	async_rx->total_user_byte_cnt += rx_len;
1523 #endif
1524 
1525 	/* Only send the RX_RDY event if there is something to send */
1526 	if (rx_len > 0) {
1527 		notify_uart_rx_rdy(dev, rx_len);
1528 	}
1529 
1530 	rx_buf_release(dev, async_rx->buf);
1531 	async_rx->buf = async_rx->next_buf;
1532 	async_rx->buf_len = async_rx->next_buf_len;
1533 #ifdef CONFIG_HAS_NORDIC_DMM
1534 	async_rx->usr_buf = async_rx->next_usr_buf;
1535 #endif
1536 	async_rx->next_buf = NULL;
1537 	async_rx->next_buf_len = 0;
1538 	async_rx->offset = 0;
1539 
1540 	if (async_rx->enabled) {
1541 		/* If there is a next buffer, then STARTRX will have already been
1542 		 * invoked by the short (the next buffer will be filling up already)
1543 		 * and here we just do the swap of which buffer the driver is following,
1544 		 * the next rx_timeout() will update the rx_offset.
1545 		 */
1546 		unsigned int key = irq_lock();
1547 
1548 		if (async_rx->buf) {
1549 			/* Check is based on assumption that ISR handler handles
1550 			 * ENDRX before RXSTARTED so if short was set on time, RXSTARTED
1551 			 * event will be set.
1552 			 */
1553 			if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
1554 				nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1555 			}
1556 			/* Remove the short until the subsequent next buffer is setup */
1557 			nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1558 		} else {
1559 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1560 		}
1561 
1562 		irq_unlock(key);
1563 	}
1564 
1565 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1566 	async_rx->is_in_irq = false;
1567 #endif
1568 }
1569 
1570 /** @brief RX FIFO flushing
1571  *
1572  * Due to the HW bug which does not update RX.AMOUNT register when FIFO was empty
1573  * a workaround is applied which checks RXSTARTED event. If that event is set it
1574  * means that FIFO was not empty.
1575  *
1576  * @param dev Device.
1577  *
1578  * @return number of bytes flushed from the fifo.
1579  */
rx_flush(const struct device * dev)1580 static uint8_t rx_flush(const struct device *dev)
1581 {
1582 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1583 	const struct uarte_nrfx_config *config = dev->config;
1584 	uint32_t rx_amount;
1585 
1586 	nrf_uarte_rx_buffer_set(uarte, config->rx_flush_buf, UARTE_HW_RX_FIFO_SIZE);
1587 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_FLUSHRX);
1588 	while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1589 		/* empty */
1590 	}
1591 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1592 
1593 	if (!IS_ENABLED(RX_FLUSH_WORKAROUND)) {
1594 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1595 		rx_amount = nrf_uarte_rx_amount_get(uarte);
1596 	} else if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
1597 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1598 		rx_amount = nrf_uarte_rx_amount_get(uarte);
1599 	} else {
1600 		rx_amount = 0;
1601 	}
1602 
1603 	if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE) &&
1604 	    rx_amount) {
1605 		sys_cache_data_invd_range(config->rx_flush_buf, rx_amount);
1606 	}
1607 
1608 	return rx_amount;
1609 }
1610 
1611 /* This handler is called when the receiver is stopped. If rx was aborted
1612  * data from fifo is flushed.
1613  */
rxto_isr(const struct device * dev)1614 static void rxto_isr(const struct device *dev)
1615 {
1616 	const struct uarte_nrfx_config *config = dev->config;
1617 	struct uarte_nrfx_data *data = dev->data;
1618 	struct uarte_async_rx *async_rx = &data->async->rx;
1619 
1620 	if (async_rx->buf) {
1621 #ifdef CONFIG_HAS_NORDIC_DMM
1622 		(void)dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, 0, async_rx->buf);
1623 		async_rx->buf = async_rx->usr_buf;
1624 #endif
1625 		rx_buf_release(dev, async_rx->buf);
1626 		async_rx->buf = NULL;
1627 	}
1628 
1629 	/* This point can be reached in two cases:
1630 	 * 1. RX is disabled because all provided RX buffers have been filled.
1631 	 * 2. RX was explicitly disabled by a call to uart_rx_disable().
1632 	 * In both cases, the rx_enabled flag is cleared, so that RX can be
1633 	 * enabled again.
1634 	 * In the second case, additionally, data from the UARTE internal RX
1635 	 * FIFO need to be discarded.
1636 	 */
1637 	async_rx->enabled = false;
1638 	if (async_rx->discard_fifo) {
1639 		async_rx->discard_fifo = false;
1640 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1641 		if (HW_RX_COUNTING_ENABLED(config)) {
1642 			/* It need to be included because TIMER+PPI got RXDRDY events
1643 			 * and counted those flushed bytes.
1644 			 */
1645 			async_rx->total_user_byte_cnt += rx_flush(dev);
1646 		}
1647 #endif
1648 	} else if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(config)) {
1649 		async_rx->flush_cnt = rx_flush(dev);
1650 	}
1651 
1652 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1653 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1654 	if (IS_ENABLED(UARTE_ANY_FAST_PD) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1655 		/* Spurious RXTO event was seen on fast instance (UARTE120) thus
1656 		 * RXTO interrupt is kept enabled only when RX is active.
1657 		 */
1658 		nrf_uarte_int_disable(uarte, NRF_UARTE_INT_RXTO_MASK);
1659 	}
1660 #ifdef UARTE_HAS_FRAME_TIMEOUT
1661 	nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX);
1662 #endif
1663 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY);
1664 #endif
1665 
1666 	if (LOW_POWER_ENABLED(config)) {
1667 		uint32_t key = irq_lock();
1668 
1669 		uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX);
1670 		irq_unlock(key);
1671 	}
1672 
1673 	notify_rx_disable(dev);
1674 }
1675 
txstopped_isr(const struct device * dev)1676 static void txstopped_isr(const struct device *dev)
1677 {
1678 	const struct uarte_nrfx_config *config = dev->config;
1679 	struct uarte_nrfx_data *data = dev->data;
1680 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1681 	unsigned int key;
1682 
1683 	key = irq_lock();
1684 
1685 	size_t amount = (data->async->tx.amount >= 0) ?
1686 			data->async->tx.amount : nrf_uarte_tx_amount_get(uarte);
1687 
1688 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1689 		nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1690 		if (data->flags & UARTE_FLAG_POLL_OUT) {
1691 			pm_device_runtime_put_async(dev, K_NO_WAIT);
1692 			data->flags &= ~UARTE_FLAG_POLL_OUT;
1693 		}
1694 	} else if (LOW_POWER_ENABLED(config)) {
1695 		nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1696 		uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_TX);
1697 	}
1698 
1699 	irq_unlock(key);
1700 
1701 	if (!data->async->tx.buf) {
1702 		return;
1703 	}
1704 
1705 	/* If there is a pending tx request, it means that uart_tx()
1706 	 * was called when there was ongoing uart_poll_out. Handling
1707 	 * TXSTOPPED interrupt means that uart_poll_out has completed.
1708 	 */
1709 	if (data->async->tx.pending) {
1710 		key = irq_lock();
1711 		start_tx_locked(dev, data);
1712 		irq_unlock(key);
1713 		return;
1714 	}
1715 
1716 	/* Cache buffer is used because tx_buf wasn't in RAM. */
1717 	if (data->async->tx.buf != data->async->tx.xfer_buf) {
1718 		/* In that case setup next chunk. If that was the last chunk
1719 		 * fall back to reporting TX_DONE.
1720 		 */
1721 		if (amount == data->async->tx.xfer_len) {
1722 			data->async->tx.cache_offset += amount;
1723 			if (setup_tx_cache(dev)) {
1724 				key = irq_lock();
1725 				start_tx_locked(dev, data);
1726 				irq_unlock(key);
1727 				return;
1728 			}
1729 
1730 			/* Amount is already included in cache_offset. */
1731 			amount = data->async->tx.cache_offset;
1732 		} else {
1733 			/* TX was aborted, include cache_offset in amount. */
1734 			amount += data->async->tx.cache_offset;
1735 		}
1736 	}
1737 
1738 	k_timer_stop(&data->async->tx.timer);
1739 
1740 	struct uart_event evt = {
1741 		.data.tx.buf = data->async->tx.buf,
1742 		.data.tx.len = amount,
1743 	};
1744 	if (amount == data->async->tx.len) {
1745 		evt.type = UART_TX_DONE;
1746 	} else {
1747 		evt.type = UART_TX_ABORTED;
1748 	}
1749 
1750 	nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1751 	data->async->tx.buf = NULL;
1752 	data->async->tx.len = 0;
1753 
1754 	user_callback(dev, &evt);
1755 
1756 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1757 		pm_device_runtime_put_async(dev, K_NO_WAIT);
1758 	}
1759 }
1760 
rxdrdy_isr(const struct device * dev)1761 static void rxdrdy_isr(const struct device *dev)
1762 {
1763 #if !defined(UARTE_HAS_FRAME_TIMEOUT)
1764 	struct uarte_nrfx_data *data = dev->data;
1765 
1766 #if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1767 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1768 
1769 	data->async->rx.idle_cnt = 0;
1770 	k_timer_start(&data->async->rx.timer, data->async->rx.timeout, K_NO_WAIT);
1771 	nrf_uarte_int_disable(uarte, NRF_UARTE_INT_RXDRDY_MASK);
1772 #else
1773 	data->async->rx.cnt.cnt++;
1774 #endif
1775 #endif /* !UARTE_HAS_FRAME_TIMEOUT */
1776 }
1777 
event_check_clear(NRF_UARTE_Type * uarte,nrf_uarte_event_t event,uint32_t int_mask,uint32_t int_en_mask)1778 static bool event_check_clear(NRF_UARTE_Type *uarte, nrf_uarte_event_t event,
1779 				uint32_t int_mask, uint32_t int_en_mask)
1780 {
1781 	if (nrf_uarte_event_check(uarte, event) && (int_mask & int_en_mask)) {
1782 		nrf_uarte_event_clear(uarte, event);
1783 		return true;
1784 	}
1785 
1786 	return false;
1787 }
1788 
uarte_nrfx_isr_async(const void * arg)1789 static void uarte_nrfx_isr_async(const void *arg)
1790 {
1791 	const struct device *dev = arg;
1792 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1793 	const struct uarte_nrfx_config *config = dev->config;
1794 	struct uarte_nrfx_data *data = dev->data;
1795 	struct uarte_async_rx *async_rx = &data->async->rx;
1796 	uint32_t imask = nrf_uarte_int_enable_check(uarte, UINT32_MAX);
1797 
1798 	if (!(HW_RX_COUNTING_ENABLED(config) || IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT))
1799 	    && event_check_clear(uarte, NRF_UARTE_EVENT_RXDRDY, NRF_UARTE_INT_RXDRDY_MASK, imask)) {
1800 		rxdrdy_isr(dev);
1801 
1802 	}
1803 
1804 	if (event_check_clear(uarte, NRF_UARTE_EVENT_ERROR, NRF_UARTE_INT_ERROR_MASK, imask)) {
1805 		error_isr(dev);
1806 	}
1807 
1808 	if (event_check_clear(uarte, NRF_UARTE_EVENT_ENDRX, NRF_UARTE_INT_ENDRX_MASK, imask)) {
1809 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1810 		endrx_isr(dev);
1811 	}
1812 
1813 	/* RXSTARTED must be handled after ENDRX because it starts the RX timeout
1814 	 * and if order is swapped then ENDRX will stop this timeout.
1815 	 * Skip if ENDRX is set when RXSTARTED is set. It means that
1816 	 * ENDRX occurred after check for ENDRX in isr which may happen when
1817 	 * UARTE interrupt got preempted. Events are not cleared
1818 	 * and isr will be called again. ENDRX will be handled first.
1819 	 */
1820 	if ((imask & NRF_UARTE_INT_RXSTARTED_MASK) &&
1821 	    nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED) &&
1822 	    !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1823 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1824 		rxstarted_isr(dev);
1825 	}
1826 
1827 	/* RXTO must be handled after ENDRX which should notify the buffer.
1828 	 * Skip if ENDRX is set when RXTO is set. It means that
1829 	 * ENDRX occurred after check for ENDRX in isr which may happen when
1830 	 * UARTE interrupt got preempted. Events are not cleared
1831 	 * and isr will be called again. ENDRX will be handled first.
1832 	 */
1833 	if ((imask & NRF_UARTE_INT_RXTO_MASK) &&
1834 	    nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) &&
1835 	    !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1836 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
1837 		rxto_isr(dev);
1838 	}
1839 
1840 	if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT) &&
1841 	    (imask & NRF_UARTE_INT_ENDTX_MASK) &&
1842 	    nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
1843 		endtx_isr(dev);
1844 	}
1845 
1846 	if ((imask & NRF_UARTE_INT_TXSTOPPED_MASK) &&
1847 	    nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
1848 		txstopped_isr(dev);
1849 	}
1850 
1851 	if (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO) {
1852 #ifdef CONFIG_HAS_NORDIC_DMM
1853 		int ret;
1854 
1855 		ret = dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, async_rx->buf_len,
1856 					    async_rx->buf);
1857 
1858 		(void)ret;
1859 		__ASSERT_NO_MSG(ret == 0);
1860 		async_rx->buf = async_rx->usr_buf;
1861 #endif
1862 		notify_uart_rx_rdy(dev, async_rx->buf_len);
1863 		rx_buf_release(dev, async_rx->buf);
1864 		async_rx->buf_len = 0;
1865 		async_rx->buf = NULL;
1866 		notify_rx_disable(dev);
1867 	}
1868 }
1869 
1870 #endif /* UARTE_ANY_ASYNC */
1871 
1872 /**
1873  * @brief Poll the device for input.
1874  *
1875  * @param dev UARTE device struct
1876  * @param c Pointer to character
1877  *
1878  * @return 0 if a character arrived, -1 if the input buffer is empty.
1879  */
uarte_nrfx_poll_in(const struct device * dev,unsigned char * c)1880 static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c)
1881 {
1882 	const struct uarte_nrfx_config *config = dev->config;
1883 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1884 
1885 #ifdef UARTE_ANY_ASYNC
1886 	struct uarte_nrfx_data *data = dev->data;
1887 
1888 	if (data->async) {
1889 		return -ENOTSUP;
1890 	}
1891 #endif
1892 
1893 	if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1894 		return -1;
1895 	}
1896 
1897 	if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1898 		sys_cache_data_invd_range(config->poll_in_byte, 1);
1899 	}
1900 
1901 	*c = *config->poll_in_byte;
1902 
1903 	/* clear the interrupt */
1904 	nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1905 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1906 
1907 	return 0;
1908 }
1909 
1910 /**
1911  * @brief Output a character in polled mode.
1912  *
1913  * @param dev UARTE device struct
1914  * @param c Character to send
1915  */
uarte_nrfx_poll_out(const struct device * dev,unsigned char c)1916 static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c)
1917 {
1918 	const struct uarte_nrfx_config *config = dev->config;
1919 	bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
1920 	struct uarte_nrfx_data *data = dev->data;
1921 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1922 	unsigned int key;
1923 
1924 	if (isr_mode) {
1925 		while (1) {
1926 			key = irq_lock();
1927 			if (is_tx_ready(dev)) {
1928 #if UARTE_ANY_ASYNC
1929 				if (data->async && data->async->tx.len &&
1930 					data->async->tx.amount < 0) {
1931 					data->async->tx.amount = nrf_uarte_tx_amount_get(uarte);
1932 				}
1933 #endif
1934 				break;
1935 			}
1936 
1937 			irq_unlock(key);
1938 			Z_SPIN_DELAY(3);
1939 		}
1940 	} else {
1941 		key = wait_tx_ready(dev);
1942 	}
1943 
1944 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1945 		if (!IS_PM_ISR_SAFE(dev) && k_is_in_isr()) {
1946 			/* If instance does not support PM from ISR device shall
1947 			 * already be turned on.
1948 			 */
1949 			enum pm_device_state state;
1950 			int err;
1951 
1952 			err = pm_device_state_get(dev, &state);
1953 			(void)err;
1954 			__ASSERT_NO_MSG(err == 0);
1955 			if (state != PM_DEVICE_STATE_ACTIVE) {
1956 				irq_unlock(key);
1957 				return;
1958 			}
1959 		}
1960 
1961 		if (!(data->flags & UARTE_FLAG_POLL_OUT)) {
1962 			data->flags |= UARTE_FLAG_POLL_OUT;
1963 			pm_device_runtime_get(dev);
1964 		}
1965 	}
1966 
1967 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(config)) {
1968 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1969 	}
1970 
1971 	*config->poll_out_byte = c;
1972 	tx_start(dev, config->poll_out_byte, 1);
1973 
1974 	irq_unlock(key);
1975 }
1976 
1977 
1978 #ifdef UARTE_INTERRUPT_DRIVEN
1979 /** Interrupt driven FIFO fill function */
uarte_nrfx_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)1980 static int uarte_nrfx_fifo_fill(const struct device *dev,
1981 				const uint8_t *tx_data,
1982 				int len)
1983 {
1984 	struct uarte_nrfx_data *data = dev->data;
1985 
1986 	len = MIN(len, data->int_driven->tx_buff_size);
1987 	if (!atomic_cas(&data->int_driven->fifo_fill_lock, 0, 1)) {
1988 		return 0;
1989 	}
1990 
1991 	/* Copy data to RAM buffer for EasyDMA transfer */
1992 	memcpy(data->int_driven->tx_buffer, tx_data, len);
1993 
1994 	unsigned int key = irq_lock();
1995 
1996 	if (!is_tx_ready(dev)) {
1997 		data->int_driven->fifo_fill_lock = 0;
1998 		len = 0;
1999 	} else {
2000 		tx_start(dev, data->int_driven->tx_buffer, len);
2001 	}
2002 
2003 	irq_unlock(key);
2004 
2005 	return len;
2006 }
2007 
2008 /** Interrupt driven FIFO read function */
uarte_nrfx_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)2009 static int uarte_nrfx_fifo_read(const struct device *dev,
2010 				uint8_t *rx_data,
2011 				const int size)
2012 {
2013 	int num_rx = 0;
2014 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2015 	const struct uarte_nrfx_config *config = dev->config;
2016 
2017 	if (size > 0 && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
2018 		/* Clear the interrupt */
2019 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
2020 
2021 		if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
2022 			sys_cache_data_invd_range(config->poll_in_byte, 1);
2023 		}
2024 
2025 		/* Receive a character */
2026 		rx_data[num_rx++] = *config->poll_in_byte;
2027 
2028 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
2029 	}
2030 
2031 	return num_rx;
2032 }
2033 
2034 /** Interrupt driven transfer enabling function */
uarte_nrfx_irq_tx_enable(const struct device * dev)2035 static void uarte_nrfx_irq_tx_enable(const struct device *dev)
2036 {
2037 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2038 	struct uarte_nrfx_data *data = dev->data;
2039 
2040 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
2041 		pm_device_runtime_get(dev);
2042 	}
2043 
2044 	unsigned int key = irq_lock();
2045 
2046 	data->int_driven->disable_tx_irq = false;
2047 	data->int_driven->tx_irq_enabled = true;
2048 	nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
2049 
2050 	irq_unlock(key);
2051 }
2052 
2053 /** Interrupt driven transfer disabling function */
uarte_nrfx_irq_tx_disable(const struct device * dev)2054 static void uarte_nrfx_irq_tx_disable(const struct device *dev)
2055 {
2056 	struct uarte_nrfx_data *data = dev->data;
2057 	/* TX IRQ will be disabled after current transmission is finished */
2058 	data->int_driven->disable_tx_irq = true;
2059 	data->int_driven->tx_irq_enabled = false;
2060 }
2061 
2062 /** Interrupt driven transfer ready function */
uarte_nrfx_irq_tx_ready_complete(const struct device * dev)2063 static int uarte_nrfx_irq_tx_ready_complete(const struct device *dev)
2064 {
2065 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2066 	struct uarte_nrfx_data *data = dev->data;
2067 
2068 	/* ENDTX flag is always on so that ISR is called when we enable TX IRQ.
2069 	 * Because of that we have to explicitly check if ENDTX interrupt is
2070 	 * enabled, otherwise this function would always return true no matter
2071 	 * what would be the source of interrupt.
2072 	 */
2073 	bool ready = data->int_driven->tx_irq_enabled &&
2074 		     nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED);
2075 
2076 	if (ready) {
2077 		data->int_driven->fifo_fill_lock = 0;
2078 	}
2079 
2080 	return ready ? data->int_driven->tx_buff_size : 0;
2081 }
2082 
uarte_nrfx_irq_rx_ready(const struct device * dev)2083 static int uarte_nrfx_irq_rx_ready(const struct device *dev)
2084 {
2085 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2086 
2087 	return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX);
2088 }
2089 
2090 /** Interrupt driven receiver enabling function */
uarte_nrfx_irq_rx_enable(const struct device * dev)2091 static void uarte_nrfx_irq_rx_enable(const struct device *dev)
2092 {
2093 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2094 
2095 	nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK);
2096 }
2097 
2098 /** Interrupt driven receiver disabling function */
uarte_nrfx_irq_rx_disable(const struct device * dev)2099 static void uarte_nrfx_irq_rx_disable(const struct device *dev)
2100 {
2101 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2102 
2103 	nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK);
2104 }
2105 
2106 /** Interrupt driven error enabling function */
uarte_nrfx_irq_err_enable(const struct device * dev)2107 static void uarte_nrfx_irq_err_enable(const struct device *dev)
2108 {
2109 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2110 
2111 	nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ERROR_MASK);
2112 }
2113 
2114 /** Interrupt driven error disabling function */
uarte_nrfx_irq_err_disable(const struct device * dev)2115 static void uarte_nrfx_irq_err_disable(const struct device *dev)
2116 {
2117 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2118 
2119 	nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ERROR_MASK);
2120 }
2121 
2122 /** Interrupt driven pending status function */
uarte_nrfx_irq_is_pending(const struct device * dev)2123 static int uarte_nrfx_irq_is_pending(const struct device *dev)
2124 {
2125 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2126 
2127 	return ((nrf_uarte_int_enable_check(uarte,
2128 					    NRF_UARTE_INT_TXSTOPPED_MASK) &&
2129 		 uarte_nrfx_irq_tx_ready_complete(dev))
2130 		||
2131 		(nrf_uarte_int_enable_check(uarte,
2132 					    NRF_UARTE_INT_ENDRX_MASK) &&
2133 		 uarte_nrfx_irq_rx_ready(dev)));
2134 }
2135 
2136 /** Interrupt driven interrupt update function */
uarte_nrfx_irq_update(const struct device * dev)2137 static int uarte_nrfx_irq_update(const struct device *dev)
2138 {
2139 	return 1;
2140 }
2141 
2142 /** Set the callback function */
uarte_nrfx_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)2143 static void uarte_nrfx_irq_callback_set(const struct device *dev,
2144 					uart_irq_callback_user_data_t cb,
2145 					void *cb_data)
2146 {
2147 	struct uarte_nrfx_data *data = dev->data;
2148 
2149 	data->int_driven->cb = cb;
2150 	data->int_driven->cb_data = cb_data;
2151 }
2152 #endif /* UARTE_INTERRUPT_DRIVEN */
2153 
2154 static DEVICE_API(uart, uart_nrfx_uarte_driver_api) = {
2155 	.poll_in		= uarte_nrfx_poll_in,
2156 	.poll_out		= uarte_nrfx_poll_out,
2157 	.err_check		= uarte_nrfx_err_check,
2158 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
2159 	.configure              = uarte_nrfx_configure,
2160 	.config_get             = uarte_nrfx_config_get,
2161 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
2162 #ifdef UARTE_ANY_ASYNC
2163 	.callback_set		= uarte_nrfx_callback_set,
2164 	.tx			= uarte_nrfx_tx,
2165 	.tx_abort		= uarte_nrfx_tx_abort,
2166 	.rx_enable		= uarte_nrfx_rx_enable,
2167 	.rx_buf_rsp		= uarte_nrfx_rx_buf_rsp,
2168 	.rx_disable		= uarte_nrfx_rx_disable,
2169 #endif /* UARTE_ANY_ASYNC */
2170 #ifdef UARTE_INTERRUPT_DRIVEN
2171 	.fifo_fill		= uarte_nrfx_fifo_fill,
2172 	.fifo_read		= uarte_nrfx_fifo_read,
2173 	.irq_tx_enable		= uarte_nrfx_irq_tx_enable,
2174 	.irq_tx_disable		= uarte_nrfx_irq_tx_disable,
2175 	.irq_tx_ready		= uarte_nrfx_irq_tx_ready_complete,
2176 	.irq_rx_enable		= uarte_nrfx_irq_rx_enable,
2177 	.irq_rx_disable		= uarte_nrfx_irq_rx_disable,
2178 	.irq_tx_complete	= uarte_nrfx_irq_tx_ready_complete,
2179 	.irq_rx_ready		= uarte_nrfx_irq_rx_ready,
2180 	.irq_err_enable		= uarte_nrfx_irq_err_enable,
2181 	.irq_err_disable	= uarte_nrfx_irq_err_disable,
2182 	.irq_is_pending		= uarte_nrfx_irq_is_pending,
2183 	.irq_update		= uarte_nrfx_irq_update,
2184 	.irq_callback_set	= uarte_nrfx_irq_callback_set,
2185 #endif /* UARTE_INTERRUPT_DRIVEN */
2186 };
2187 
2188 #ifdef UARTE_ENHANCED_POLL_OUT
endtx_stoptx_ppi_init(NRF_UARTE_Type * uarte,struct uarte_nrfx_data * data)2189 static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte,
2190 				 struct uarte_nrfx_data *data)
2191 {
2192 	nrfx_err_t ret;
2193 
2194 	ret = nrfx_gppi_channel_alloc(&data->ppi_ch_endtx);
2195 	if (ret != NRFX_SUCCESS) {
2196 		LOG_ERR("Failed to allocate PPI Channel");
2197 		return -EIO;
2198 	}
2199 
2200 	nrfx_gppi_channel_endpoints_setup(data->ppi_ch_endtx,
2201 		nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_ENDTX),
2202 		nrf_uarte_task_address_get(uarte, NRF_UARTE_TASK_STOPTX));
2203 	nrfx_gppi_channels_enable(BIT(data->ppi_ch_endtx));
2204 
2205 	return 0;
2206 }
2207 #endif /* UARTE_ENHANCED_POLL_OUT */
2208 
2209 /** @brief Pend until TX is stopped.
2210  *
2211  * There are 2 configurations that must be handled:
2212  * - ENDTX->TXSTOPPED PPI enabled - just pend until TXSTOPPED event is set
2213  * - disable ENDTX interrupt and manually trigger STOPTX, pend for TXSTOPPED
2214  */
wait_for_tx_stopped(const struct device * dev)2215 static void wait_for_tx_stopped(const struct device *dev)
2216 {
2217 	const struct uarte_nrfx_config *config = dev->config;
2218 	bool ppi_endtx = (config->flags & UARTE_CFG_FLAG_PPI_ENDTX) ||
2219 			 IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT);
2220 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2221 	bool res;
2222 
2223 	if (!ppi_endtx) {
2224 		/* We assume here that it can be called from any context,
2225 		 * including the one that uarte interrupt will not preempt.
2226 		 * Disable endtx interrupt to ensure that it will not be triggered
2227 		 * (if in lower priority context) and stop TX if necessary.
2228 		 */
2229 		nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDTX_MASK);
2230 		NRFX_WAIT_FOR(is_tx_ready(dev), 1000, 1, res);
2231 		if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
2232 			if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) {
2233 				nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
2234 			}
2235 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
2236 		}
2237 	}
2238 
2239 	NRFX_WAIT_FOR(nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED),
2240 		      1000, 1, res);
2241 
2242 	if (!ppi_endtx) {
2243 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK);
2244 	}
2245 }
2246 
uarte_pm_resume(const struct device * dev)2247 static void uarte_pm_resume(const struct device *dev)
2248 {
2249 	const struct uarte_nrfx_config *cfg = dev->config;
2250 
2251 	(void)pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
2252 
2253 	if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || !LOW_POWER_ENABLED(cfg)) {
2254 		uarte_periph_enable(dev);
2255 	}
2256 }
2257 
uarte_pm_suspend(const struct device * dev)2258 static void uarte_pm_suspend(const struct device *dev)
2259 {
2260 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2261 	const struct uarte_nrfx_config *cfg = dev->config;
2262 	struct uarte_nrfx_data *data = dev->data;
2263 
2264 	(void)data;
2265 #ifdef UARTE_ANY_FAST_PD
2266 	if (cfg->clk_dev) {
2267 		int err;
2268 
2269 		err = nrf_clock_control_release(cfg->clk_dev, &cfg->clk_spec);
2270 		(void)err;
2271 		__ASSERT_NO_MSG(err >= 0);
2272 	}
2273 #endif
2274 
2275 #ifdef UARTE_ANY_ASYNC
2276 	if (data->async) {
2277 		/* Entering inactive state requires device to be no
2278 		 * active asynchronous calls.
2279 		 */
2280 		__ASSERT_NO_MSG(!data->async->rx.enabled);
2281 		__ASSERT_NO_MSG(!data->async->tx.len);
2282 		if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
2283 			/* If runtime PM is enabled then reference counting ensures that
2284 			 * suspend will not occur when TX is active.
2285 			 */
2286 			__ASSERT_NO_MSG(nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED));
2287 		} else {
2288 			wait_for_tx_stopped(dev);
2289 		}
2290 
2291 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
2292 		if (data->async && HW_RX_COUNTING_ENABLED(cfg)) {
2293 			nrfx_timer_disable(&cfg->timer);
2294 			/* Timer/counter value is reset when disabled. */
2295 			data->async->rx.total_byte_cnt = 0;
2296 			data->async->rx.total_user_byte_cnt = 0;
2297 		}
2298 #endif
2299 	} else if (IS_ENABLED(UARTE_ANY_NONE_ASYNC))
2300 #endif
2301 	{
2302 		if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
2303 #if defined(UARTE_INTERRUPT_DRIVEN) && defined(CONFIG_PM_DEVICE)
2304 			if (data->int_driven) {
2305 				data->int_driven->rx_irq_enabled =
2306 						nrf_uarte_int_enable_check(uarte,
2307 							NRF_UARTE_INT_ENDRX_MASK);
2308 				if (data->int_driven->rx_irq_enabled) {
2309 					nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK);
2310 				}
2311 			}
2312 #endif
2313 			nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
2314 			while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO)) {
2315 				/* Busy wait for event to register */
2316 				Z_SPIN_DELAY(2);
2317 			}
2318 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
2319 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
2320 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
2321 			nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR);
2322 		}
2323 
2324 		wait_for_tx_stopped(dev);
2325 	}
2326 
2327 #ifdef CONFIG_SOC_NRF54H20_GPD
2328 	nrf_gpd_retain_pins_set(cfg->pcfg, true);
2329 #endif
2330 
2331 	nrf_uarte_disable(uarte);
2332 
2333 	(void)pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP);
2334 }
2335 
uarte_nrfx_pm_action(const struct device * dev,enum pm_device_action action)2336 static int uarte_nrfx_pm_action(const struct device *dev, enum pm_device_action action)
2337 {
2338 	if (action == PM_DEVICE_ACTION_RESUME) {
2339 		uarte_pm_resume(dev);
2340 	} else if (IS_ENABLED(CONFIG_PM_DEVICE) && (action == PM_DEVICE_ACTION_SUSPEND)) {
2341 		uarte_pm_suspend(dev);
2342 	} else {
2343 		return -ENOTSUP;
2344 	}
2345 
2346 	return 0;
2347 }
2348 
uarte_tx_path_init(const struct device * dev)2349 static int uarte_tx_path_init(const struct device *dev)
2350 {
2351 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2352 	const struct uarte_nrfx_config *cfg = dev->config;
2353 	bool auto_endtx = false;
2354 
2355 #ifdef UARTE_HAS_ENDTX_STOPTX_SHORT
2356 	nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDTX_STOPTX);
2357 	auto_endtx = true;
2358 #elif defined(UARTE_ENHANCED_POLL_OUT)
2359 	if (cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) {
2360 		struct uarte_nrfx_data *data = dev->data;
2361 		int err;
2362 
2363 		err = endtx_stoptx_ppi_init(uarte, data);
2364 		if (err < 0) {
2365 			return err;
2366 		}
2367 		auto_endtx = true;
2368 	}
2369 #endif
2370 
2371 	/* Get to the point where TXSTOPPED event is set but TXSTOPPED interrupt is
2372 	 * disabled. This trick is later on used to handle TX path and determine
2373 	 * using HW if TX is active (TXSTOPPED event set means TX is inactive).
2374 	 *
2375 	 * Set TXSTOPPED event by requesting fake (zero-length) transfer.
2376 	 * Pointer to RAM variable is set because otherwise such operation may
2377 	 * result in HardFault or RAM corruption.
2378 	 */
2379 	nrf_uarte_enable(uarte);
2380 	nrf_uarte_tx_buffer_set(uarte, cfg->poll_out_byte, 0);
2381 	nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
2382 	if (!auto_endtx) {
2383 		while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
2384 		}
2385 		nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
2386 		nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
2387 		nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK);
2388 	}
2389 	while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
2390 	}
2391 	nrf_uarte_disable(uarte);
2392 
2393 	return 0;
2394 }
2395 
uarte_instance_init(const struct device * dev,uint8_t interrupts_active)2396 static int uarte_instance_init(const struct device *dev,
2397 			       uint8_t interrupts_active)
2398 {
2399 	int err;
2400 	const struct uarte_nrfx_config *cfg = dev->config;
2401 
2402 	if (IS_ENABLED(CONFIG_ARCH_POSIX)) {
2403 		/* For simulation the DT provided peripheral address needs to be corrected */
2404 		((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)cfg->uarte_regs;
2405 	}
2406 
2407 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
2408 	err = uarte_nrfx_configure(dev, &((struct uarte_nrfx_data *)dev->data)->uart_config);
2409 	if (err) {
2410 		return err;
2411 	}
2412 #else
2413 	NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2414 
2415 	nrf_uarte_baudrate_set(uarte, cfg->nrf_baudrate);
2416 	nrf_uarte_configure(uarte, &cfg->hw_config);
2417 #endif
2418 
2419 #ifdef UARTE_ANY_ASYNC
2420 	struct uarte_nrfx_data *data = dev->data;
2421 
2422 	if (data->async) {
2423 		err = uarte_async_init(dev);
2424 		if (err < 0) {
2425 			return err;
2426 		}
2427 	}
2428 #endif
2429 
2430 	err = uarte_tx_path_init(dev);
2431 	if (err) {
2432 		return err;
2433 	}
2434 
2435 	return pm_device_driver_init(dev, uarte_nrfx_pm_action);
2436 }
2437 
2438 #define UARTE_IRQ_CONFIGURE(idx, isr_handler)				       \
2439 	do {								       \
2440 		IRQ_CONNECT(DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority), \
2441 			    isr_handler, DEVICE_DT_GET(UARTE(idx)), 0);	       \
2442 		irq_enable(DT_IRQN(UARTE(idx)));			       \
2443 	} while (false)
2444 
2445 /* Low power mode is used when disable_rx is not defined or in async mode if
2446  * kconfig option is enabled.
2447  */
2448 #define USE_LOW_POWER(idx)						       \
2449 	COND_CODE_1(CONFIG_PM_DEVICE, (0),				       \
2450 		(((!UARTE_PROP(idx, disable_rx) &&			       \
2451 		COND_CODE_1(CONFIG_UART_##idx##_ASYNC,			       \
2452 			(!IS_ENABLED(CONFIG_UART_##idx##_NRF_ASYNC_LOW_POWER)),\
2453 			(1))) ? 0 : UARTE_CFG_FLAG_LOW_POWER)))
2454 
2455 #define UARTE_DISABLE_RX_INIT(node_id) \
2456 	.disable_rx = DT_PROP(node_id, disable_rx)
2457 
2458 /* Get frequency divider that is used to adjust the BAUDRATE value. */
2459 #define UARTE_GET_BAUDRATE_DIV(f_pclk) (f_pclk / NRF_UARTE_BASE_FREQUENCY_16MHZ)
2460 
2461 /* When calculating baudrate we need to take into account that high speed instances
2462  * must have baudrate adjust to the ratio between UARTE clocking frequency and 16 MHz.
2463  * Additionally, >1Mbaud speeds are calculated using a formula.
2464  */
2465 #define UARTE_GET_BAUDRATE2(f_pclk, current_speed)					\
2466 	((f_pclk > NRF_UARTE_BASE_FREQUENCY_16MHZ) && (current_speed > 1000000)) ?	\
2467 		UARTE_GET_CUSTOM_BAUDRATE(f_pclk, current_speed) :			\
2468 		(NRF_BAUDRATE(current_speed) / UARTE_GET_BAUDRATE_DIV(f_pclk))
2469 
2470 /* Convert DT current-speed to a value that is written to the BAUDRATE register. */
2471 #define UARTE_GET_BAUDRATE(idx) \
2472 	UARTE_GET_BAUDRATE2(NRF_PERIPH_GET_FREQUENCY(UARTE(idx)), UARTE_PROP(idx, current_speed))
2473 
2474 /* Get initialization level of an instance. Instances that requires clock control
2475  * which is using nrfs (IPC) are initialized later.
2476  */
2477 #define UARTE_INIT_LEVEL(idx) \
2478 	COND_CODE_1(INSTANCE_IS_FAST_PD(_, /*empty*/, idx, _), (POST_KERNEL), (PRE_KERNEL_1))
2479 
2480 /* Get initialization priority of an instance. Instances that requires clock control
2481  * which is using nrfs (IPC) are initialized later.
2482  */
2483 #define UARTE_INIT_PRIO(idx)								\
2484 	COND_CODE_1(INSTANCE_IS_FAST_PD(_, /*empty*/, idx, _),				\
2485 		    (UTIL_INC(CONFIG_CLOCK_CONTROL_NRF2_GLOBAL_HSFLL_INIT_PRIORITY)),	\
2486 		    (CONFIG_SERIAL_INIT_PRIORITY))
2487 
2488 /* Macro for setting nRF specific configuration structures. */
2489 #define UARTE_NRF_CONFIG(idx) {							\
2490 		.hwfc = (UARTE_PROP(idx, hw_flow_control) ==			\
2491 			UART_CFG_FLOW_CTRL_RTS_CTS) ?				\
2492 			NRF_UARTE_HWFC_ENABLED : NRF_UARTE_HWFC_DISABLED,	\
2493 		.parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) ?	\
2494 			NRF_UARTE_PARITY_INCLUDED : NRF_UARTE_PARITY_EXCLUDED,	\
2495 		IF_ENABLED(UARTE_HAS_STOP_CONFIG, (.stop = NRF_UARTE_STOP_ONE,))\
2496 		IF_ENABLED(UARTE_ODD_PARITY_ALLOWED,				\
2497 			(.paritytype = NRF_UARTE_PARITYTYPE_EVEN,))		\
2498 		IF_ENABLED(UARTE_HAS_FRAME_TIMEOUT,				\
2499 			(.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_EN,))		\
2500 	}
2501 
2502 /* Macro for setting zephyr specific configuration structures. */
2503 #define UARTE_CONFIG(idx) {						       \
2504 		.baudrate = UARTE_PROP(idx, current_speed),		       \
2505 		.data_bits = UART_CFG_DATA_BITS_8,			       \
2506 		.stop_bits = UART_CFG_STOP_BITS_1,			       \
2507 		.parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT)       \
2508 			  ? UART_CFG_PARITY_EVEN			       \
2509 			  : UART_CFG_PARITY_NONE,			       \
2510 		.flow_ctrl = UARTE_PROP(idx, hw_flow_control)		       \
2511 			     ? UART_CFG_FLOW_CTRL_RTS_CTS		       \
2512 			     : UART_CFG_FLOW_CTRL_NONE,			       \
2513 	}
2514 
2515 #define UART_NRF_UARTE_DEVICE(idx)					       \
2516 	NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(UARTE(idx));		       \
2517 	UARTE_INT_DRIVEN(idx);						       \
2518 	PINCTRL_DT_DEFINE(UARTE(idx));					       \
2519 	IF_ENABLED(CONFIG_UART_##idx##_ASYNC, (				       \
2520 		static uint8_t						       \
2521 			uarte##idx##_tx_cache[CONFIG_UART_ASYNC_TX_CACHE_SIZE] \
2522 			DMM_MEMORY_SECTION(UARTE(idx));			       \
2523 		static uint8_t uarte##idx##_flush_buf[UARTE_HW_RX_FIFO_SIZE]   \
2524 			DMM_MEMORY_SECTION(UARTE(idx));			       \
2525 		struct uarte_async_cb uarte##idx##_async;))		       \
2526 	static uint8_t uarte##idx##_poll_out_byte DMM_MEMORY_SECTION(UARTE(idx));\
2527 	static uint8_t uarte##idx##_poll_in_byte DMM_MEMORY_SECTION(UARTE(idx)); \
2528 	static struct uarte_nrfx_data uarte_##idx##_data = {		       \
2529 		IF_ENABLED(CONFIG_UART_USE_RUNTIME_CONFIGURE,		       \
2530 				(.uart_config = UARTE_CONFIG(idx),))	       \
2531 		IF_ENABLED(CONFIG_UART_##idx##_ASYNC,			       \
2532 			    (.async = &uarte##idx##_async,))		       \
2533 		IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN,	       \
2534 			    (.int_driven = &uarte##idx##_int_driven,))	       \
2535 	};								       \
2536 	COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, (),		       \
2537 		(BUILD_ASSERT(UARTE_GET_BAUDRATE(idx) > 0,		       \
2538 			      "Unsupported baudrate");))		       \
2539 	static const struct uarte_nrfx_config uarte_##idx##z_config = {	       \
2540 		COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE,		       \
2541 			(.clock_freq = NRF_PERIPH_GET_FREQUENCY(UARTE(idx)),),  \
2542 		    (IF_ENABLED(UARTE_HAS_FRAME_TIMEOUT,		       \
2543 			(.baudrate = UARTE_PROP(idx, current_speed),))	       \
2544 		     .nrf_baudrate = UARTE_GET_BAUDRATE(idx),		       \
2545 		     .hw_config = UARTE_NRF_CONFIG(idx),))		       \
2546 		.pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)),		       \
2547 		.uarte_regs = _CONCAT(NRF_UARTE, idx),                         \
2548 		IF_ENABLED(CONFIG_HAS_NORDIC_DMM,			       \
2549 				(.mem_reg = DMM_DEV_TO_REG(UARTE(idx)),))      \
2550 		.flags =						       \
2551 			(IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT) ?   \
2552 				UARTE_CFG_FLAG_PPI_ENDTX : 0) |		       \
2553 			(IS_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC) ?        \
2554 				UARTE_CFG_FLAG_HW_BYTE_COUNTING : 0) |	       \
2555 			(!IS_ENABLED(CONFIG_HAS_NORDIC_DMM) ? 0 :	       \
2556 			  (UARTE_IS_CACHEABLE(idx) ?			       \
2557 				UARTE_CFG_FLAG_CACHEABLE : 0)) |	       \
2558 			USE_LOW_POWER(idx),				       \
2559 		UARTE_DISABLE_RX_INIT(UARTE(idx)),			       \
2560 		.poll_out_byte = &uarte##idx##_poll_out_byte,		       \
2561 		.poll_in_byte = &uarte##idx##_poll_in_byte,		       \
2562 		IF_ENABLED(CONFIG_UART_##idx##_ASYNC,			       \
2563 				(.tx_cache = uarte##idx##_tx_cache,	       \
2564 				 .rx_flush_buf = uarte##idx##_flush_buf,))     \
2565 		IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC,		       \
2566 			(.timer = NRFX_TIMER_INSTANCE(			       \
2567 				CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),))     \
2568 		IF_ENABLED(INSTANCE_IS_FAST_PD(_, /*empty*/, idx, _),	       \
2569 			(.clk_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(UARTE(idx))), \
2570 			 .clk_spec = {					       \
2571 				.frequency = NRF_PERIPH_GET_FREQUENCY(UARTE(idx)),\
2572 				.accuracy = 0,				       \
2573 				.precision = NRF_CLOCK_CONTROL_PRECISION_DEFAULT,\
2574 				},))					       \
2575 	};								       \
2576 	static int uarte_##idx##_init(const struct device *dev)		       \
2577 	{								       \
2578 		COND_CODE_1(CONFIG_UART_##idx##_ASYNC,			       \
2579 			   (UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_async);),  \
2580 			   (UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_int);))    \
2581 		return uarte_instance_init(				       \
2582 			dev,						       \
2583 			IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN));     \
2584 	}								       \
2585 									       \
2586 	PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action,		       \
2587 			    COND_CODE_1(INSTANCE_IS_FAST_PD(_, /*empty*/, idx, _),\
2588 				    (0), (PM_DEVICE_ISR_SAFE)));	       \
2589 									       \
2590 	DEVICE_DT_DEFINE(UARTE(idx),					       \
2591 		      uarte_##idx##_init,				       \
2592 		      PM_DEVICE_DT_GET(UARTE(idx)),			       \
2593 		      &uarte_##idx##_data,				       \
2594 		      &uarte_##idx##z_config,				       \
2595 		      UARTE_INIT_LEVEL(idx),				       \
2596 		      UARTE_INIT_PRIO(idx),				       \
2597 		      &uart_nrfx_uarte_driver_api)
2598 
2599 #define UARTE_INT_DRIVEN(idx)						       \
2600 	IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN,		       \
2601 		(static uint8_t uarte##idx##_tx_buffer			       \
2602 			[MIN(CONFIG_UART_##idx##_NRF_TX_BUFFER_SIZE,	       \
2603 			     BIT_MASK(UARTE##idx##_EASYDMA_MAXCNT_SIZE))]      \
2604 			DMM_MEMORY_SECTION(UARTE(idx));			       \
2605 		 static struct uarte_nrfx_int_driven			       \
2606 			uarte##idx##_int_driven = {			       \
2607 				.tx_buffer = uarte##idx##_tx_buffer,	       \
2608 				.tx_buff_size = sizeof(uarte##idx##_tx_buffer),\
2609 			};))
2610 
2611 #define COND_UART_NRF_UARTE_DEVICE(unused, prefix, i, _) \
2612 	IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i, (UART_NRF_UARTE_DEVICE(prefix##i);))
2613 
2614 UARTE_FOR_EACH_INSTANCE(COND_UART_NRF_UARTE_DEVICE, (), ())
2615