1 /*
2  * Copyright (c) 2023 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @brief Driver for Nordic Semiconductor nRF UARTE
9  */
10 
11 #include <zephyr/kernel.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/irq.h>
14 #include <zephyr/drivers/uart.h>
15 #include <zephyr/drivers/serial/uart_async_to_irq.h>
16 #include <zephyr/pm/device.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #include <zephyr/linker/devicetree_regions.h>
19 #include <zephyr/logging/log.h>
20 #include <nrfx_uarte.h>
21 #include <helpers/nrfx_gppi.h>
22 #include <haly/nrfy_uarte.h>
23 #define LOG_MODULE_NAME uarte
24 LOG_MODULE_REGISTER(LOG_MODULE_NAME, CONFIG_UART_LOG_LEVEL);
25 
26 #define INSTANCE_INT_DRIVEN(periph, prefix, i, _) \
27 	 IS_ENABLED(CONFIG_UART_##prefix##i##_INTERRUPT_DRIVEN)
28 
29 #define INSTANCE_ASYNC(periph, prefix, i, _) \
30 	 IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC)
31 
32 #define INSTANCE_POLLING(periph, prefix, id, _) \
33 	UTIL_AND(CONFIG_HAS_HW_NRF_UARTE##prefix##id, \
34 	  UTIL_AND(COND_CODE_1(CONFIG_UART_##prefix##id##_INTERRUPT_DRIVEN, (0), (1)), \
35 		   COND_CODE_1(CONFIG_UART_##prefix##id##_ASYNC, (0), (1))))
36 
37 #define INSTANCE_ENHANCED_POLL_OUT(periph, prefix, i, _) \
38 	 IS_ENABLED(CONFIG_UART_##prefix##i##_ENHANCED_POLL_OUT)
39 
40 /* Macro determining if any instance is using interrupt driven API. */
41 #if (NRFX_FOREACH_ENABLED(UARTE, INSTANCE_INT_DRIVEN, (+), (0), _))
42 #define UARTE_ANY_INTERRUPT_DRIVEN 1
43 #else
44 #define UARTE_ANY_INTERRUPT_DRIVEN 0
45 #endif
46 
47 /* Macro determining if any instance is enabled and using ASYNC API. */
48 #if (NRFX_FOREACH_ENABLED(UARTE, INSTANCE_ASYNC, (+), (0), _))
49 #define UARTE_ANY_ASYNC 1
50 #else
51 #define UARTE_ANY_ASYNC 0
52 #endif
53 
54 /* Macro determining if any instance is using only polling API. */
55 #if (NRFX_FOREACH_ENABLED(UARTE, INSTANCE_POLLING, (+), (0), _))
56 #define UARTE_ANY_POLLING 1
57 #else
58 #define UARTE_ANY_POLLING 0
59 #endif
60 
61 /* Macro determining if any instance is using interrupt driven API. */
62 #if (NRFX_FOREACH_ENABLED(UARTE, INSTANCE_ENHANCED_POLL_OUT, (+), (0), _))
63 #define UARTE_ENHANCED_POLL_OUT 1
64 #else
65 #define UARTE_ENHANCED_POLL_OUT 0
66 #endif
67 
68 #if UARTE_ANY_INTERRUPT_DRIVEN || UARTE_ANY_ASYNC
69 #define UARTE_INT_ASYNC 1
70 #else
71 #define UARTE_INT_ASYNC 0
72 #endif
73 
74 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
75 #define UARTE_ODD_PARITY_ALLOWED 1
76 #else
77 #define UARTE_ODD_PARITY_ALLOWED 0
78 #endif
79 
80 /*
81  * RX timeout is divided into time slabs, this define tells how many divisions
82  * should be made. More divisions - higher timeout accuracy and processor usage.
83  */
84 #define RX_TIMEOUT_DIV 5
85 
86 /* Macro for converting numerical baudrate to register value. It is convenient
87  * to use this approach because for constant input it can calculate nrf setting
88  * at compile time.
89  */
90 #define NRF_BAUDRATE(baudrate) ((baudrate) == 300 ? 0x00014000 :\
91 	(baudrate) == 600    ? 0x00027000 :			\
92 	(baudrate) == 1200   ? NRF_UARTE_BAUDRATE_1200 :	\
93 	(baudrate) == 2400   ? NRF_UARTE_BAUDRATE_2400 :	\
94 	(baudrate) == 4800   ? NRF_UARTE_BAUDRATE_4800 :	\
95 	(baudrate) == 9600   ? NRF_UARTE_BAUDRATE_9600 :	\
96 	(baudrate) == 14400  ? NRF_UARTE_BAUDRATE_14400 :	\
97 	(baudrate) == 19200  ? NRF_UARTE_BAUDRATE_19200 :	\
98 	(baudrate) == 28800  ? NRF_UARTE_BAUDRATE_28800 :	\
99 	(baudrate) == 31250  ? NRF_UARTE_BAUDRATE_31250 :	\
100 	(baudrate) == 38400  ? NRF_UARTE_BAUDRATE_38400 :	\
101 	(baudrate) == 56000  ? NRF_UARTE_BAUDRATE_56000 :	\
102 	(baudrate) == 57600  ? NRF_UARTE_BAUDRATE_57600 :	\
103 	(baudrate) == 76800  ? NRF_UARTE_BAUDRATE_76800 :	\
104 	(baudrate) == 115200 ? NRF_UARTE_BAUDRATE_115200 :	\
105 	(baudrate) == 230400 ? NRF_UARTE_BAUDRATE_230400 :	\
106 	(baudrate) == 250000 ? NRF_UARTE_BAUDRATE_250000 :	\
107 	(baudrate) == 460800 ? NRF_UARTE_BAUDRATE_460800 :	\
108 	(baudrate) == 921600 ? NRF_UARTE_BAUDRATE_921600 :	\
109 	(baudrate) == 1000000 ? NRF_UARTE_BAUDRATE_1000000 : 0)
110 
111 #define UARTE_DATA_FLAG_TRAMPOLINE	BIT(0)
112 #define UARTE_DATA_FLAG_RX_ENABLED	BIT(1)
113 
114 struct uarte_async_data {
115 	uart_callback_t user_callback;
116 	void *user_data;
117 
118 	uint8_t *en_rx_buf;
119 	size_t en_rx_len;
120 
121 	struct k_timer tx_timer;
122 	struct k_timer rx_timer;
123 
124 	k_timeout_t rx_timeout;
125 
126 	/* Keeps the most recent error mask. */
127 	uint32_t err;
128 
129 	uint8_t idle_cnt;
130 };
131 
132 /* Device data structure */
133 struct uarte_nrfx_data {
134 	struct uart_async_to_irq_data *a2i_data;
135 #if CONFIG_UART_USE_RUNTIME_CONFIGURE
136 	struct uart_config uart_config;
137 #endif
138 	struct uarte_async_data *async;
139 	atomic_t flags;
140 	uint8_t  rx_byte;
141 };
142 BUILD_ASSERT(offsetof(struct uarte_nrfx_data, a2i_data) == 0);
143 
144 /* If set then receiver is not used. */
145 #define UARTE_CFG_FLAG_NO_RX			BIT(0)
146 
147 /* If set then instance is using interrupt driven API. */
148 #define UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API	BIT(1)
149 
150 /**
151  * @brief Structure for UARTE configuration.
152  */
153 struct uarte_nrfx_config {
154 	const struct uart_async_to_irq_config *a2i_config;
155 	nrfx_uarte_t nrfx_dev;
156 	nrfx_uarte_config_t nrfx_config;
157 	const struct pinctrl_dev_config *pcfg;
158 	uint32_t flags;
159 
160 	LOG_INSTANCE_PTR_DECLARE(log);
161 };
162 BUILD_ASSERT(offsetof(struct uarte_nrfx_config, a2i_config) == 0);
163 
164 #define UARTE_ERROR_FROM_MASK(mask)					\
165 	((mask) & NRF_UARTE_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN	\
166 	 : (mask) & NRF_UARTE_ERROR_PARITY_MASK ? UART_ERROR_PARITY	\
167 	 : (mask) & NRF_UARTE_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING	\
168 	 : (mask) & NRF_UARTE_ERROR_BREAK_MASK ? UART_BREAK		\
169 	 : 0)
170 
171 /* Determine if the device has interrupt driven API enabled. */
172 #define IS_INT_DRIVEN_API(dev)						\
173 	(UARTE_ANY_INTERRUPT_DRIVEN &&					\
174 	 (((const struct uarte_nrfx_config *)dev->config)->flags &	\
175 	  UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API))
176 
177 /* Determine if the device supports only polling API. */
178 #define IS_POLLING_API(dev) \
179 	(!UARTE_INT_ASYNC || (((struct uarte_nrfx_data *)dev->data)->async == NULL))
180 
181 /* Determine if the device supports asynchronous API. */
182 #define IS_ASYNC_API(dev) (!IS_INT_DRIVEN_API(dev) && !IS_POLLING_API(dev))
183 
get_nrfx_dev(const struct device * dev)184 static inline const nrfx_uarte_t *get_nrfx_dev(const struct device *dev)
185 {
186 	const struct uarte_nrfx_config *config = dev->config;
187 
188 	return &config->nrfx_dev;
189 }
190 
callback_set(const struct device * dev,uart_callback_t callback,void * user_data)191 static int callback_set(const struct device *dev, uart_callback_t callback, void *user_data)
192 {
193 	struct uarte_nrfx_data *data = dev->data;
194 
195 	data->async->user_callback = callback;
196 	data->async->user_data = user_data;
197 
198 	return 0;
199 }
200 
201 #if UARTE_ANY_ASYNC
api_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)202 static int api_callback_set(const struct device *dev, uart_callback_t callback, void *user_data)
203 {
204 	if (!IS_ASYNC_API(dev)) {
205 		return -ENOTSUP;
206 	}
207 
208 	return callback_set(dev, callback, user_data);
209 }
210 #endif
211 
on_tx_done(const struct device * dev,const nrfx_uarte_event_t * event)212 static void on_tx_done(const struct device *dev, const nrfx_uarte_event_t *event)
213 {
214 	struct uarte_nrfx_data *data = dev->data;
215 	struct uart_event evt = {
216 		.type = (event->data.tx.flags & NRFX_UARTE_TX_DONE_ABORTED) ?
217 			UART_TX_ABORTED : UART_TX_DONE,
218 		.data.tx.buf = event->data.tx.p_buffer,
219 		.data.tx.len = event->data.tx.length
220 	};
221 	bool hwfc;
222 
223 #if CONFIG_UART_USE_RUNTIME_CONFIGURE
224 	hwfc = data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS;
225 #else
226 	const struct uarte_nrfx_config *config = dev->config;
227 
228 	hwfc = config->nrfx_config.config.hwfc == NRF_UARTE_HWFC_ENABLED;
229 #endif
230 
231 	if (hwfc) {
232 		k_timer_stop(&data->async->tx_timer);
233 	}
234 	data->async->user_callback(dev, &evt, data->async->user_data);
235 }
236 
on_rx_done(const struct device * dev,const nrfx_uarte_event_t * event)237 static void on_rx_done(const struct device *dev, const nrfx_uarte_event_t *event)
238 {
239 	struct uarte_nrfx_data *data = dev->data;
240 	struct uart_event evt;
241 
242 	if (data->async->err) {
243 		evt.type = UART_RX_STOPPED;
244 		evt.data.rx_stop.reason = UARTE_ERROR_FROM_MASK(data->async->err);
245 		evt.data.rx_stop.data.buf = event->data.rx.p_buffer;
246 		evt.data.rx_stop.data.len = event->data.rx.length;
247 		/* Keep error code for uart_err_check(). */
248 		if (!IS_INT_DRIVEN_API(dev)) {
249 			data->async->err = 0;
250 		}
251 		data->async->user_callback(dev, &evt, data->async->user_data);
252 	} else if (event->data.rx.length) {
253 		evt.type = UART_RX_RDY,
254 		evt.data.rx.buf = event->data.rx.p_buffer,
255 		evt.data.rx.len = event->data.rx.length,
256 		evt.data.rx.offset = 0;
257 		data->async->user_callback(dev, &evt, data->async->user_data);
258 	}
259 
260 	evt.type = UART_RX_BUF_RELEASED;
261 	evt.data.rx_buf.buf = event->data.rx.p_buffer;
262 
263 	data->async->user_callback(dev, &evt, data->async->user_data);
264 }
265 
start_rx_timer(struct uarte_nrfx_data * data)266 static void start_rx_timer(struct uarte_nrfx_data *data)
267 {
268 	struct uarte_async_data *adata = data->async;
269 
270 	k_timer_start(&adata->rx_timer, adata->rx_timeout, K_NO_WAIT);
271 }
272 
on_rx_byte(const struct device * dev)273 static void on_rx_byte(const struct device *dev)
274 {
275 	struct uarte_nrfx_data *data = dev->data;
276 	struct uarte_async_data *adata = data->async;
277 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
278 
279 	nrfx_uarte_rxdrdy_disable(nrfx_dev);
280 	adata->idle_cnt = RX_TIMEOUT_DIV;
281 	start_rx_timer(data);
282 }
283 
on_rx_buf_req(const struct device * dev)284 static void on_rx_buf_req(const struct device *dev)
285 {
286 	struct uarte_nrfx_data *data = dev->data;
287 	struct uarte_async_data *adata = data->async;
288 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
289 
290 	/* If buffer is not null it indicates that event comes from RX enabling
291 	 * function context. We need to pass provided buffer to the driver.
292 	 */
293 	if (adata->en_rx_buf) {
294 		uint8_t *buf = adata->en_rx_buf;
295 		size_t len = adata->en_rx_len;
296 		nrfx_err_t err;
297 
298 		adata->en_rx_buf = NULL;
299 		adata->en_rx_len = 0;
300 
301 		err = nrfx_uarte_rx_buffer_set(nrfx_dev, buf, len);
302 		__ASSERT_NO_MSG(err == NRFX_SUCCESS);
303 		return;
304 	}
305 
306 	struct uart_event evt = {
307 		.type = UART_RX_BUF_REQUEST
308 	};
309 
310 	/* If counter reached zero that indicates that timeout was reached and
311 	 * reception of one buffer was terminated to restart another transfer.
312 	 */
313 	if (!K_TIMEOUT_EQ(adata->rx_timeout, K_NO_WAIT)) {
314 		nrfx_uarte_rxdrdy_enable(nrfx_dev);
315 	}
316 	data->async->user_callback(dev, &evt, data->async->user_data);
317 }
318 
on_rx_disabled(const struct device * dev,struct uarte_nrfx_data * data)319 static void on_rx_disabled(const struct device *dev, struct uarte_nrfx_data *data)
320 {
321 	struct uart_event evt = {
322 		.type = UART_RX_DISABLED
323 	};
324 
325 	atomic_and(&data->flags, ~UARTE_DATA_FLAG_RX_ENABLED);
326 	k_timer_stop(&data->async->rx_timer);
327 
328 	data->async->user_callback(dev, &evt, data->async->user_data);
329 }
330 
trigger_handler(const struct device * dev)331 static void trigger_handler(const struct device *dev)
332 {
333 	struct uarte_nrfx_data *data = dev->data;
334 
335 	if (UARTE_ANY_INTERRUPT_DRIVEN &&
336 	    atomic_and(&data->flags, ~UARTE_DATA_FLAG_TRAMPOLINE) &
337 	    UARTE_DATA_FLAG_TRAMPOLINE) {
338 		uart_async_to_irq_trampoline_cb(dev);
339 	}
340 }
341 
evt_handler(nrfx_uarte_event_t const * event,void * context)342 static void evt_handler(nrfx_uarte_event_t const *event, void *context)
343 {
344 	const struct device *dev = context;
345 	struct uarte_nrfx_data *data = dev->data;
346 
347 	switch (event->type) {
348 	case NRFX_UARTE_EVT_TX_DONE:
349 		on_tx_done(dev, event);
350 		break;
351 	case NRFX_UARTE_EVT_RX_DONE:
352 		on_rx_done(dev, event);
353 		break;
354 	case NRFX_UARTE_EVT_RX_BYTE:
355 		on_rx_byte(dev);
356 		break;
357 	case NRFX_UARTE_EVT_ERROR:
358 		data->async->err = event->data.error.error_mask;
359 		if (IS_ASYNC_API(dev)) {
360 			(void)uart_rx_disable(dev);
361 		}
362 		break;
363 	case NRFX_UARTE_EVT_RX_BUF_REQUEST:
364 		on_rx_buf_req(dev);
365 		break;
366 	case NRFX_UARTE_EVT_RX_DISABLED:
367 		on_rx_disabled(dev, data);
368 		break;
369 	case NRFX_UARTE_EVT_RX_BUF_TOO_LATE:
370 		/* No support */
371 		break;
372 	case NRFX_UARTE_EVT_TRIGGER:
373 		trigger_handler(dev);
374 		break;
375 	default:
376 		__ASSERT_NO_MSG(0);
377 	}
378 }
379 
api_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)380 static int api_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout)
381 {
382 	struct uarte_nrfx_data *data = dev->data;
383 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
384 	nrfx_err_t err;
385 	bool hwfc;
386 
387 #if CONFIG_PM_DEVICE
388 	enum pm_device_state state;
389 
390 	(void)pm_device_state_get(dev, &state);
391 	if (state != PM_DEVICE_STATE_ACTIVE) {
392 		return -ECANCELED;
393 	}
394 #endif
395 
396 #if CONFIG_UART_USE_RUNTIME_CONFIGURE
397 	hwfc = data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS;
398 #else
399 	const struct uarte_nrfx_config *config = dev->config;
400 
401 	hwfc = config->nrfx_config.config.hwfc == NRF_UARTE_HWFC_ENABLED;
402 #endif
403 
404 	err = nrfx_uarte_tx(nrfx_dev, buf, len, 0);
405 	if (err != NRFX_SUCCESS) {
406 		return (err == NRFX_ERROR_BUSY) ? -EBUSY : -EIO;
407 	}
408 
409 	if (hwfc && timeout != SYS_FOREVER_US) {
410 		k_timer_start(&data->async->tx_timer, K_USEC(timeout), K_NO_WAIT);
411 	}
412 
413 	return 0;
414 }
415 
api_tx_abort(const struct device * dev)416 static int api_tx_abort(const struct device *dev)
417 {
418 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
419 	nrfx_err_t err;
420 
421 	err = nrfx_uarte_tx_abort(nrfx_dev, false);
422 	return (err == NRFX_SUCCESS) ? 0 : -EFAULT;
423 }
424 
tx_timeout_handler(struct k_timer * timer)425 static void tx_timeout_handler(struct k_timer *timer)
426 {
427 	const struct device *dev = k_timer_user_data_get(timer);
428 
429 	(void)api_tx_abort(dev);
430 }
431 
rx_timeout_handler(struct k_timer * timer)432 static void rx_timeout_handler(struct k_timer *timer)
433 {
434 	const struct device *dev = (const struct device *)k_timer_user_data_get(timer);
435 	struct uarte_nrfx_data *data = dev->data;
436 	struct uarte_async_data *adata = data->async;
437 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
438 
439 	if (nrfx_uarte_rx_new_data_check(nrfx_dev)) {
440 		adata->idle_cnt = RX_TIMEOUT_DIV - 1;
441 	} else {
442 		adata->idle_cnt--;
443 		if (adata->idle_cnt == 0) {
444 			(void)nrfx_uarte_rx_abort(nrfx_dev, false, false);
445 			return;
446 		}
447 	}
448 
449 	start_rx_timer(data);
450 }
451 
452 /* Determine if RX FIFO content shall be kept when device is being disabled.
453  * When flow-control is used then we expect to keep RX FIFO content since HWFC
454  * enforces lossless communication. However, when HWFC is not used (by any instance
455  * then RX FIFO handling feature is disabled in the nrfx_uarte to save space.
456  * It is based on assumption that without HWFC it is expected that some data may
457  * be lost and there are means to prevent that (keeping receiver always opened by
458  * provided reception buffers on time).
459  */
get_keep_fifo_content_flag(const struct device * dev)460 static inline uint32_t get_keep_fifo_content_flag(const struct device *dev)
461 {
462 #if CONFIG_UART_USE_RUNTIME_CONFIGURE
463 	struct uarte_nrfx_data *data = dev->data;
464 
465 	if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS) {
466 		return NRFX_UARTE_RX_ENABLE_KEEP_FIFO_CONTENT;
467 	}
468 #else
469 	const struct uarte_nrfx_config *config = dev->config;
470 
471 	if (config->nrfx_config.config.hwfc == NRF_UARTE_HWFC_ENABLED) {
472 		return NRFX_UARTE_RX_ENABLE_KEEP_FIFO_CONTENT;
473 	}
474 #endif
475 
476 	return 0;
477 }
478 
api_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)479 static int api_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout)
480 {
481 	nrfx_err_t err;
482 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
483 	const struct uarte_nrfx_config *cfg = dev->config;
484 	struct uarte_nrfx_data *data = dev->data;
485 	struct uarte_async_data *adata = data->async;
486 	uint32_t flags = NRFX_UARTE_RX_ENABLE_CONT |
487 			 get_keep_fifo_content_flag(dev) |
488 			 (IS_ASYNC_API(dev) ? NRFX_UARTE_RX_ENABLE_STOP_ON_END : 0);
489 
490 	if (cfg->flags & UARTE_CFG_FLAG_NO_RX) {
491 		return -ENOTSUP;
492 	}
493 
494 	if (timeout != SYS_FOREVER_US) {
495 		adata->idle_cnt = RX_TIMEOUT_DIV + 1;
496 		adata->rx_timeout = K_USEC(timeout / RX_TIMEOUT_DIV);
497 		nrfx_uarte_rxdrdy_enable(nrfx_dev);
498 	} else {
499 		adata->rx_timeout = K_NO_WAIT;
500 	}
501 
502 	/* Store the buffer. It will be passed to the driver in the event handler.
503 	 * We do that instead of calling nrfx_uarte_rx_buffer_set here to ensure
504 	 * that nrfx_uarte_rx_buffer_set is called when RX enable configuration
505 	 * flags are already known to the driver (e.g. if flushed data shall be
506 	 * kept or not).
507 	 */
508 	adata->err = 0;
509 	adata->en_rx_buf = buf;
510 	adata->en_rx_len = len;
511 
512 	atomic_or(&data->flags, UARTE_DATA_FLAG_RX_ENABLED);
513 
514 	err = nrfx_uarte_rx_enable(nrfx_dev, flags);
515 	if (err != NRFX_SUCCESS) {
516 		atomic_and(&data->flags, ~UARTE_DATA_FLAG_RX_ENABLED);
517 		return (err == NRFX_ERROR_BUSY) ? -EBUSY : -EIO;
518 	}
519 
520 	return 0;
521 }
522 
api_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)523 static int api_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
524 {
525 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
526 	struct uarte_nrfx_data *data = dev->data;
527 	nrfx_err_t err;
528 
529 	if (!(data->flags & UARTE_DATA_FLAG_RX_ENABLED)) {
530 		return -EACCES;
531 	}
532 
533 	err = nrfx_uarte_rx_buffer_set(nrfx_dev, buf, len);
534 	switch (err) {
535 	case NRFX_SUCCESS:
536 		return 0;
537 	case NRFX_ERROR_BUSY:
538 		return -EBUSY;
539 	default:
540 		return -EIO;
541 	}
542 }
543 
api_rx_disable(const struct device * dev)544 static int api_rx_disable(const struct device *dev)
545 {
546 	struct uarte_nrfx_data *data = dev->data;
547 
548 	k_timer_stop(&data->async->rx_timer);
549 
550 	return (nrfx_uarte_rx_abort(get_nrfx_dev(dev), true, false) == NRFX_SUCCESS) ? 0 : -EFAULT;
551 }
552 
api_poll_in(const struct device * dev,unsigned char * c)553 static int api_poll_in(const struct device *dev, unsigned char *c)
554 {
555 	const struct uarte_nrfx_config *cfg = dev->config;
556 	const nrfx_uarte_t *instance = &cfg->nrfx_dev;
557 	nrfx_err_t err;
558 
559 	if (IS_INT_DRIVEN_API(dev)) {
560 		return uart_fifo_read(dev, c, 1) == 0 ? -1 : 0;
561 	}
562 
563 	if (IS_ASYNC_API(dev)) {
564 		return -EBUSY;
565 	}
566 
567 	err = nrfx_uarte_rx_ready(instance, NULL);
568 	if (err == NRFX_SUCCESS) {
569 		uint8_t *rx_byte = cfg->nrfx_config.rx_cache.p_buffer;
570 
571 		*c = *rx_byte;
572 		err = nrfx_uarte_rx_buffer_set(instance, rx_byte, 1);
573 		__ASSERT_NO_MSG(err == NRFX_SUCCESS);
574 
575 		return 0;
576 	}
577 
578 	return -1;
579 }
580 
api_poll_out(const struct device * dev,unsigned char out_char)581 static void api_poll_out(const struct device *dev, unsigned char out_char)
582 {
583 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
584 	nrfx_err_t err;
585 
586 #if CONFIG_PM_DEVICE
587 	enum pm_device_state state;
588 
589 	(void)pm_device_state_get(dev, &state);
590 	if (state != PM_DEVICE_STATE_ACTIVE) {
591 		return;
592 	}
593 #endif
594 
595 	do {
596 		/* When runtime PM is used we cannot use early return because then
597 		 * we have no information when UART is actually done with the
598 		 * transmission. It reduces UART performance however, polling in
599 		 * general is not power efficient and should be avoided in low
600 		 * power applications.
601 		 */
602 		err = nrfx_uarte_tx(nrfx_dev, &out_char, 1, NRFX_UARTE_TX_EARLY_RETURN);
603 		__ASSERT(err != NRFX_ERROR_INVALID_ADDR, "Invalid address of the buffer");
604 
605 		if (err == NRFX_ERROR_BUSY) {
606 			if (IS_ENABLED(CONFIG_MULTITHREADING) && k_is_preempt_thread()) {
607 				k_msleep(1);
608 			} else {
609 				Z_SPIN_DELAY(3);
610 			}
611 		}
612 	} while (err == NRFX_ERROR_BUSY);
613 }
614 
615 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
616 /**
617  * @brief Set the baud rate
618  *
619  * This routine set the given baud rate for the UARTE.
620  *
621  * @param dev UARTE device struct
622  * @param baudrate Baud rate
623  *
624  * @return 0 on success or error code
625  */
baudrate_set(NRF_UARTE_Type * uarte,uint32_t baudrate)626 static int baudrate_set(NRF_UARTE_Type *uarte, uint32_t baudrate)
627 {
628 	nrf_uarte_baudrate_t nrf_baudrate = NRF_BAUDRATE(baudrate);
629 
630 	if (baudrate == 0) {
631 		return -EINVAL;
632 	}
633 
634 	nrfy_uarte_baudrate_set(uarte, nrf_baudrate);
635 
636 	return 0;
637 }
638 
uarte_nrfx_configure(const struct device * dev,const struct uart_config * cfg)639 static int uarte_nrfx_configure(const struct device *dev,
640 				const struct uart_config *cfg)
641 {
642 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
643 	struct uarte_nrfx_data *data = dev->data;
644 	nrf_uarte_config_t uarte_cfg;
645 
646 #if NRF_UARTE_HAS_FRAME_TIMEOUT
647 	uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_DIS;
648 #endif
649 
650 #if defined(UARTE_CONFIG_STOP_Msk)
651 	switch (cfg->stop_bits) {
652 	case UART_CFG_STOP_BITS_1:
653 		uarte_cfg.stop = NRF_UARTE_STOP_ONE;
654 		break;
655 	case UART_CFG_STOP_BITS_2:
656 		uarte_cfg.stop = NRF_UARTE_STOP_TWO;
657 		break;
658 	default:
659 		return -ENOTSUP;
660 	}
661 #else
662 	if (cfg->stop_bits != UART_CFG_STOP_BITS_1) {
663 		return -ENOTSUP;
664 	}
665 #endif
666 
667 	if (cfg->data_bits != UART_CFG_DATA_BITS_8) {
668 		return -ENOTSUP;
669 	}
670 
671 	switch (cfg->flow_ctrl) {
672 	case UART_CFG_FLOW_CTRL_NONE:
673 		uarte_cfg.hwfc = NRF_UARTE_HWFC_DISABLED;
674 		break;
675 	case UART_CFG_FLOW_CTRL_RTS_CTS:
676 		uarte_cfg.hwfc = NRF_UARTE_HWFC_ENABLED;
677 		break;
678 	default:
679 		return -ENOTSUP;
680 	}
681 
682 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
683 	uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_EVEN;
684 #endif
685 	switch (cfg->parity) {
686 	case UART_CFG_PARITY_NONE:
687 		uarte_cfg.parity = NRF_UARTE_PARITY_EXCLUDED;
688 		break;
689 	case UART_CFG_PARITY_EVEN:
690 		uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
691 		break;
692 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
693 	case UART_CFG_PARITY_ODD:
694 		uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
695 		uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_ODD;
696 		break;
697 #endif
698 	default:
699 		return -ENOTSUP;
700 	}
701 
702 	if (baudrate_set(nrfx_dev->p_reg, cfg->baudrate) != 0) {
703 		return -ENOTSUP;
704 	}
705 
706 	nrfy_uarte_configure(nrfx_dev->p_reg, &uarte_cfg);
707 
708 	data->uart_config = *cfg;
709 
710 	return 0;
711 }
712 
uarte_nrfx_config_get(const struct device * dev,struct uart_config * cfg)713 static int uarte_nrfx_config_get(const struct device *dev,
714 				 struct uart_config *cfg)
715 {
716 	struct uarte_nrfx_data *data = dev->data;
717 
718 	*cfg = data->uart_config;
719 	return 0;
720 }
721 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
722 
723 #if UARTE_ANY_POLLING || UARTE_ANY_INTERRUPT_DRIVEN
api_err_check(const struct device * dev)724 static int api_err_check(const struct device *dev)
725 {
726 	if (IS_POLLING_API(dev)) {
727 		const struct uarte_nrfx_config *cfg = dev->config;
728 		const nrfx_uarte_t *instance = &cfg->nrfx_dev;
729 		uint32_t mask = nrfx_uarte_errorsrc_get(instance);
730 
731 		return mask;
732 	}
733 
734 	struct uarte_nrfx_data *data = dev->data;
735 	uint32_t rv = data->async->err;
736 
737 	data->async->err = 0;
738 
739 	return rv;
740 }
741 #endif
742 
743 static const struct uart_async_to_irq_async_api a2i_api = {
744 	.callback_set		= callback_set,
745 	.tx			= api_tx,
746 	.tx_abort		= api_tx_abort,
747 	.rx_enable		= api_rx_enable,
748 	.rx_buf_rsp		= api_rx_buf_rsp,
749 	.rx_disable		= api_rx_disable,
750 };
751 
752 static DEVICE_API(uart, uart_nrfx_uarte_driver_api) = {
753 	.poll_in	= api_poll_in,
754 	.poll_out	= api_poll_out,
755 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
756 	.configure	= uarte_nrfx_configure,
757 	.config_get	= uarte_nrfx_config_get,
758 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
759 #if UARTE_ANY_POLLING || UARTE_ANY_INTERRUPT_DRIVEN
760 	.err_check	= api_err_check,
761 #endif
762 #if UARTE_ANY_ASYNC
763 	.callback_set	= api_callback_set,
764 	.tx		= api_tx,
765 	.tx_abort	= api_tx_abort,
766 	.rx_enable	= api_rx_enable,
767 	.rx_buf_rsp	= api_rx_buf_rsp,
768 	.rx_disable	= api_rx_disable,
769 #endif /* UARTE_ANY_ASYNC */
770 #if UARTE_ANY_INTERRUPT_DRIVEN
771 	UART_ASYNC_TO_IRQ_API_INIT(),
772 #endif /* UARTE_ANY_INTERRUPT_DRIVEN */
773 };
774 
endtx_stoptx_ppi_init(NRF_UARTE_Type * uarte)775 static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte)
776 {
777 	nrfx_err_t ret;
778 	uint8_t ch;
779 
780 	ret = nrfx_gppi_channel_alloc(&ch);
781 	if (ret != NRFX_SUCCESS) {
782 		LOG_ERR("Failed to allocate PPI Channel");
783 		return -EIO;
784 	}
785 
786 	nrfx_gppi_channel_endpoints_setup(ch,
787 		nrfy_uarte_event_address_get(uarte, NRF_UARTE_EVENT_ENDTX),
788 		nrfy_uarte_task_address_get(uarte, NRF_UARTE_TASK_STOPTX));
789 	nrfx_gppi_channels_enable(BIT(ch));
790 
791 	return 0;
792 }
793 
start_rx(const struct device * dev)794 static int start_rx(const struct device *dev)
795 {
796 	const struct uarte_nrfx_config *cfg = dev->config;
797 
798 	if (IS_INT_DRIVEN_API(dev)) {
799 		return uart_async_to_irq_rx_enable(dev);
800 	}
801 
802 	__ASSERT_NO_MSG(IS_POLLING_API(dev));
803 
804 	nrfx_err_t err;
805 	const nrfx_uarte_t *instance = &cfg->nrfx_dev;
806 	uint8_t *rx_byte = cfg->nrfx_config.rx_cache.p_buffer;
807 
808 	err = nrfx_uarte_rx_buffer_set(instance, rx_byte, 1);
809 	__ASSERT_NO_MSG(err == NRFX_SUCCESS);
810 
811 	err = nrfx_uarte_rx_enable(instance, 0);
812 	__ASSERT_NO_MSG(err == NRFX_SUCCESS || err == NRFX_ERROR_BUSY);
813 
814 	(void)err;
815 
816 	return 0;
817 }
818 
async_to_irq_trampoline(const struct device * dev)819 static void async_to_irq_trampoline(const struct device *dev)
820 {
821 	const struct uarte_nrfx_config *cfg = dev->config;
822 	struct uarte_nrfx_data *data = dev->data;
823 	uint32_t prev = atomic_or(&data->flags, UARTE_DATA_FLAG_TRAMPOLINE);
824 
825 	if (!(prev & UARTE_DATA_FLAG_TRAMPOLINE)) {
826 		nrfx_uarte_int_trigger(&cfg->nrfx_dev);
827 	}
828 }
829 
uarte_nrfx_init(const struct device * dev)830 static int uarte_nrfx_init(const struct device *dev)
831 {
832 	int err;
833 	nrfx_err_t nerr;
834 	const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev);
835 	const struct uarte_nrfx_config *cfg = dev->config;
836 	struct uarte_nrfx_data *data = dev->data;
837 
838 #ifdef CONFIG_ARCH_POSIX
839 	/* For simulation the DT provided peripheral address needs to be corrected */
840 	((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)nrfx_dev->p_reg;
841 #endif
842 
843 	err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
844 	if (err < 0) {
845 		return err;
846 	}
847 
848 	if (UARTE_ENHANCED_POLL_OUT && cfg->nrfx_config.tx_stop_on_end) {
849 		err = endtx_stoptx_ppi_init(nrfx_dev->p_reg);
850 		if (err < 0) {
851 			return err;
852 		}
853 	}
854 
855 	if (UARTE_ANY_INTERRUPT_DRIVEN) {
856 		if (cfg->a2i_config) {
857 			err = uart_async_to_irq_init(dev);
858 			if (err < 0) {
859 				return err;
860 			}
861 		}
862 	}
863 
864 	if (IS_ENABLED(UARTE_INT_ASYNC) && data->async) {
865 		k_timer_init(&data->async->rx_timer, rx_timeout_handler, NULL);
866 		k_timer_user_data_set(&data->async->rx_timer, (void *)dev);
867 		k_timer_init(&data->async->tx_timer, tx_timeout_handler, NULL);
868 		k_timer_user_data_set(&data->async->tx_timer, (void *)dev);
869 	}
870 
871 	nerr = nrfx_uarte_init(nrfx_dev, &cfg->nrfx_config,
872 			IS_ENABLED(UARTE_INT_ASYNC) ?
873 				(IS_POLLING_API(dev) ? NULL : evt_handler) : NULL);
874 	if (nerr == NRFX_SUCCESS && !IS_ASYNC_API(dev) && !(cfg->flags & UARTE_CFG_FLAG_NO_RX)) {
875 		err = start_rx(dev);
876 	}
877 
878 	switch (nerr) {
879 	case NRFX_ERROR_INVALID_STATE:
880 		return -EBUSY;
881 	case NRFX_ERROR_BUSY:
882 		return -EACCES;
883 	case NRFX_ERROR_INVALID_PARAM:
884 		return -EINVAL;
885 	default:
886 		return 0;
887 	}
888 }
889 
890 #ifdef CONFIG_PM_DEVICE
stop_rx(const struct device * dev)891 static int stop_rx(const struct device *dev)
892 {
893 	const struct uarte_nrfx_config *cfg = dev->config;
894 
895 	if (IS_INT_DRIVEN_API(dev)) {
896 		return uart_async_to_irq_rx_disable(dev);
897 	}
898 
899 	__ASSERT_NO_MSG(IS_POLLING_API(dev));
900 	nrfx_err_t err;
901 	const nrfx_uarte_t *instance = &cfg->nrfx_dev;
902 
903 	err = nrfx_uarte_rx_abort(instance, true, true);
904 	__ASSERT_NO_MSG(err == NRFX_SUCCESS);
905 
906 	return 0;
907 }
908 
uarte_nrfx_pm_action(const struct device * dev,enum pm_device_action action)909 static int uarte_nrfx_pm_action(const struct device *dev,
910 				enum pm_device_action action)
911 {
912 	const struct uarte_nrfx_config *cfg = dev->config;
913 	int ret;
914 
915 	switch (action) {
916 	case PM_DEVICE_ACTION_RESUME:
917 		ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
918 		if (ret < 0) {
919 			return ret;
920 		}
921 		if (!IS_ASYNC_API(dev) && !(cfg->flags & UARTE_CFG_FLAG_NO_RX)) {
922 			return start_rx(dev);
923 		}
924 
925 		break;
926 	case PM_DEVICE_ACTION_SUSPEND:
927 		if (!IS_ASYNC_API(dev) && !(cfg->flags & UARTE_CFG_FLAG_NO_RX)) {
928 			stop_rx(dev);
929 		}
930 
931 		ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP);
932 		if (ret < 0) {
933 			return ret;
934 		}
935 
936 		break;
937 	default:
938 		return -ENOTSUP;
939 	}
940 
941 	return 0;
942 }
943 #endif
944 
945 #if defined(UARTE_CONFIG_STOP_Msk)
946 #define UARTE_HAS_STOP_CONFIG 1
947 #endif
948 
949 #define UARTE(idx)			DT_NODELABEL(uart##idx)
950 #define UARTE_HAS_PROP(idx, prop)	DT_NODE_HAS_PROP(UARTE(idx), prop)
951 #define UARTE_PROP(idx, prop)		DT_PROP(UARTE(idx), prop)
952 
953 /* Macro returning initial log level. Logs are off for UART used for console. */
954 #define GET_INIT_LOG_LEVEL(idx)					  \
955 	COND_CODE_1(DT_HAS_CHOSEN(zephyr_console),		  \
956 		(DT_SAME_NODE(UARTE(idx),			  \
957 			      DT_CHOSEN(zephyr_console)) ?	  \
958 			 LOG_LEVEL_NONE : CONFIG_UART_LOG_LEVEL), \
959 			(CONFIG_UART_LOG_LEVEL))
960 
961 /* Macro puts buffers in dedicated section if device tree property is set. */
962 #define UARTE_MEMORY_SECTION(idx)					       \
963 	COND_CODE_1(UARTE_HAS_PROP(idx, memory_regions),		       \
964 		(__attribute__((__section__(LINKER_DT_NODE_REGION_NAME(	       \
965 			DT_PHANDLE(UARTE(idx), memory_regions)))))),	       \
966 		())
967 
968 #define UART_NRF_UARTE_DEVICE(idx) \
969 	LOG_INSTANCE_REGISTER(LOG_MODULE_NAME, idx, GET_INIT_LOG_LEVEL(idx));			\
970 	static uint8_t uarte##idx##_tx_cache[CONFIG_UART_##idx##_TX_CACHE_SIZE]			\
971 			UARTE_MEMORY_SECTION(idx) __aligned(4);					\
972 	static uint8_t uarte##idx##_rx_cache[CONFIG_UART_##idx##_RX_CACHE_SIZE]			\
973 			UARTE_MEMORY_SECTION(idx) __aligned(4);					\
974 	static nrfx_uarte_rx_cache_t uarte##idx##_rx_cache_scratch;				\
975 	IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN,					\
976 		(static uint8_t a2i_rx_buf##idx[CONFIG_UART_##idx##_A2I_RX_SIZE]		\
977 			UARTE_MEMORY_SECTION(idx) __aligned(4);))				\
978 	PINCTRL_DT_DEFINE(UARTE(idx));								\
979 	static const struct uart_async_to_irq_config uarte_a2i_config_##idx =			\
980 		UART_ASYNC_TO_IRQ_API_CONFIG_INITIALIZER(&a2i_api,				\
981 					  async_to_irq_trampoline,				\
982 					  UARTE_PROP(idx, current_speed),			\
983 					  uarte##idx##_tx_cache,				\
984 					  /* nrfx_uarte driver is using the last byte in the */	\
985 					  /* cache buffer for keeping a byte that is currently*/\
986 					  /* polled out so it cannot be used as a cache buffer*/\
987 					  /* by the adaptation layer. */			\
988 					  sizeof(uarte##idx##_tx_cache) - 1,			\
989 					  COND_CODE_1(CONFIG_UART_##idx##_INTERRUPT_DRIVEN,	\
990 						  (a2i_rx_buf##idx), (NULL)),			\
991 					  COND_CODE_1(CONFIG_UART_##idx##_INTERRUPT_DRIVEN,	\
992 						  (sizeof(a2i_rx_buf##idx)), (0)),		\
993 					  CONFIG_UART_##idx##_A2I_RX_BUF_COUNT,			\
994 					  LOG_INSTANCE_PTR(LOG_MODULE_NAME, idx));		\
995 	static const struct uarte_nrfx_config uarte_config_##idx = {				\
996 		.a2i_config = IS_ENABLED(CONFIG_UART_##idx## _INTERRUPT_DRIVEN) ?		\
997 			&uarte_a2i_config_##idx : NULL,						\
998 		.nrfx_dev = NRFX_UARTE_INSTANCE(idx),						\
999 		.nrfx_config = {								\
1000 			.p_context = (void *)DEVICE_DT_GET(UARTE(idx)),				\
1001 			.tx_cache = {								\
1002 				.p_buffer = uarte##idx##_tx_cache,				\
1003 				.length = CONFIG_UART_##idx##_TX_CACHE_SIZE			\
1004 			},									\
1005 			.rx_cache = {								\
1006 				.p_buffer = uarte##idx##_rx_cache,				\
1007 				.length = CONFIG_UART_##idx##_RX_CACHE_SIZE			\
1008 			},									\
1009 			.p_rx_cache_scratch = &uarte##idx##_rx_cache_scratch,			\
1010 			.baudrate = NRF_BAUDRATE(UARTE_PROP(idx, current_speed)),		\
1011 			.interrupt_priority = DT_IRQ(UARTE(idx), priority),			\
1012 			.config = {								\
1013 				.hwfc = (UARTE_PROP(idx, hw_flow_control) ==			\
1014 					UART_CFG_FLOW_CTRL_RTS_CTS) ?				\
1015 					NRF_UARTE_HWFC_ENABLED : NRF_UARTE_HWFC_DISABLED,	\
1016 				.parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) ?	\
1017 					NRF_UARTE_PARITY_INCLUDED : NRF_UARTE_PARITY_EXCLUDED,	\
1018 				IF_ENABLED(UARTE_HAS_STOP_CONFIG, (.stop = NRF_UARTE_STOP_ONE,))\
1019 				IF_ENABLED(UARTE_ODD_PARITY_ALLOWED,				\
1020 					(.paritytype = NRF_UARTE_PARITYTYPE_EVEN,))		\
1021 			},									\
1022 			.tx_stop_on_end = IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT),	\
1023 			.skip_psel_cfg = true,							\
1024 			.skip_gpio_cfg = true,							\
1025 		},										\
1026 		.pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)),					\
1027 		.flags = (UARTE_PROP(idx, disable_rx) ? UARTE_CFG_FLAG_NO_RX : 0) |		\
1028 			(IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN) ?			\
1029 				UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API : 0),			\
1030 		LOG_INSTANCE_PTR_INIT(log, LOG_MODULE_NAME, idx)				\
1031 	};											\
1032 	static struct uart_async_to_irq_data uarte_a2i_data_##idx;				\
1033 	static struct uarte_async_data uarte_async_##idx;					\
1034 	static struct uarte_nrfx_data uarte_data_##idx = {					\
1035 		.a2i_data = IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN) ?			\
1036 			&uarte_a2i_data_##idx : NULL,						\
1037 		IF_ENABLED(CONFIG_UART_USE_RUNTIME_CONFIGURE,					\
1038 			(.uart_config = {							\
1039 			.baudrate = UARTE_PROP(idx, current_speed),                             \
1040 			.parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) ?              \
1041 				  UART_CFG_PARITY_EVEN : UART_CFG_PARITY_NONE,                  \
1042 			.stop_bits = UART_CFG_STOP_BITS_1,                                      \
1043 			.data_bits = UART_CFG_DATA_BITS_8,                                      \
1044 			.flow_ctrl = UARTE_PROP(idx, hw_flow_control) ?                         \
1045 				     UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE,      \
1046 			},))									\
1047 		.async = (IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN) ||			\
1048 			  IS_ENABLED(CONFIG_UART_##idx##_ASYNC)) ? &uarte_async_##idx : NULL	\
1049 	};											\
1050 	static int uarte_init_##idx(const struct device *dev)					\
1051 	{											\
1052 		COND_CODE_1(INSTANCE_POLLING(_, /*empty*/, idx, _), (),				\
1053 			(									\
1054 			IRQ_CONNECT(DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority),		\
1055 				    nrfx_isr, nrfx_uarte_##idx##_irq_handler, 0);		\
1056 			irq_enable(DT_IRQN(UARTE(idx)));					\
1057 			)									\
1058 		)										\
1059 		return uarte_nrfx_init(dev);							\
1060 	}											\
1061 	PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action);					\
1062 	DEVICE_DT_DEFINE(UARTE(idx),								\
1063 		      uarte_init_##idx,								\
1064 		      PM_DEVICE_DT_GET(UARTE(idx)),						\
1065 		      &uarte_data_##idx,							\
1066 		      &uarte_config_##idx,							\
1067 		      PRE_KERNEL_1,								\
1068 		      CONFIG_KERNEL_INIT_PRIORITY_DEVICE,					\
1069 		      &uart_nrfx_uarte_driver_api)
1070 
1071 /* Macro creates device instance if it is enabled in devicetree. */
1072 #define UARTE_DEVICE(periph, prefix, id, _) \
1073 	IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##id, (UART_NRF_UARTE_DEVICE(prefix##id);))
1074 
1075 /* Macro iterates over nrfx_uarte instances enabled in the nrfx_config.h. */
1076 NRFX_FOREACH_ENABLED(UARTE, UARTE_DEVICE, (), (), _)
1077