1 /*
2 * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @brief Driver for Nordic Semiconductor nRF UARTE
9 */
10
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/drivers/pinctrl.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/pm/device_runtime.h>
15 #include <hal/nrf_uarte.h>
16 #include <nrfx_timer.h>
17 #include <zephyr/sys/util.h>
18 #include <zephyr/kernel.h>
19 #include <zephyr/cache.h>
20 #include <soc.h>
21 #include <dmm.h>
22 #include <helpers/nrfx_gppi.h>
23 #include <zephyr/linker/devicetree_regions.h>
24 #include <zephyr/irq.h>
25 #include <zephyr/logging/log.h>
26
27 #ifdef CONFIG_SOC_NRF54H20_GPD
28 #include <nrf/gpd.h>
29 #endif
30
31 LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL);
32
33 #define RX_FLUSH_WORKAROUND 1
34
35 #define UARTE(idx) DT_NODELABEL(uart##idx)
36 #define UARTE_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(UARTE(idx), prop)
37 #define UARTE_PROP(idx, prop) DT_PROP(UARTE(idx), prop)
38
39 #define UARTE_IS_CACHEABLE(idx) DMM_IS_REG_CACHEABLE(DT_PHANDLE(UARTE(idx), memory_regions))
40
41 /* Execute macro f(x) for all instances. */
42 #define UARTE_FOR_EACH_INSTANCE(f, sep, off_code, ...) \
43 NRFX_FOREACH_PRESENT(UARTE, f, sep, off_code, __VA_ARGS__)
44
45 /* Determine if any instance is using interrupt driven API. */
46 #define IS_INT_DRIVEN(unused, prefix, i, _) \
47 (IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
48 IS_ENABLED(CONFIG_UART_##prefix##i##_INTERRUPT_DRIVEN))
49
50 #if UARTE_FOR_EACH_INSTANCE(IS_INT_DRIVEN, (||), (0))
51 #define UARTE_INTERRUPT_DRIVEN 1
52 #endif
53
54 /* Determine if any instance is not using asynchronous API. */
55 #define IS_NOT_ASYNC(unused, prefix, i, _) \
56 (IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
57 !IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC))
58
59 #if UARTE_FOR_EACH_INSTANCE(IS_NOT_ASYNC, (||), (0))
60 #define UARTE_ANY_NONE_ASYNC 1
61 #endif
62
63 /* Determine if any instance is using asynchronous API. */
64 #define IS_ASYNC(unused, prefix, i, _) \
65 (IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \
66 IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC))
67
68 #if UARTE_FOR_EACH_INSTANCE(IS_ASYNC, (||), (0))
69 #define UARTE_ANY_ASYNC 1
70 #endif
71
72 /* Determine if any instance is using asynchronous API with HW byte counting. */
73 #define IS_HW_ASYNC(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_HW_ASYNC)
74
75 #if UARTE_FOR_EACH_INSTANCE(IS_HW_ASYNC, (||), (0))
76 #define UARTE_ANY_HW_ASYNC 1
77 #endif
78
79 /* Determine if any instance is using enhanced poll_out feature. */
80 #define IS_ENHANCED_POLL_OUT(unused, prefix, i, _) \
81 IS_ENABLED(CONFIG_UART_##prefix##i##_ENHANCED_POLL_OUT)
82
83 #if UARTE_FOR_EACH_INSTANCE(IS_ENHANCED_POLL_OUT, (||), (0))
84 #define UARTE_ENHANCED_POLL_OUT 1
85 #endif
86
87 #define INSTANCE_PROP(unused, prefix, i, prop) UARTE_PROP(prefix##i, prop)
88 #define INSTANCE_PRESENT(unused, prefix, i, prop) 1
89
90 /* Driver supports case when all or none instances support that HW feature. */
91 #if (UARTE_FOR_EACH_INSTANCE(INSTANCE_PROP, (+), (0), endtx_stoptx_supported)) == \
92 (UARTE_FOR_EACH_INSTANCE(INSTANCE_PRESENT, (+), (0), endtx_stoptx_supported))
93 #define UARTE_HAS_ENDTX_STOPTX_SHORT 1
94 #endif
95
96 #if (UARTE_FOR_EACH_INSTANCE(INSTANCE_PROP, (+), (0), frame_timeout_supported)) == \
97 (UARTE_FOR_EACH_INSTANCE(INSTANCE_PRESENT, (+), (0), frame_timeout_supported))
98 #define UARTE_HAS_FRAME_TIMEOUT 1
99 #endif
100
101 #define INSTANCE_NEEDS_CACHE_MGMT(unused, prefix, i, prop) UARTE_IS_CACHEABLE(prefix##i)
102
103 #if UARTE_FOR_EACH_INSTANCE(INSTANCE_NEEDS_CACHE_MGMT, (+), (0), _)
104 #define UARTE_ANY_CACHE 1
105 #endif
106
107 #define IS_LOW_POWER(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_ASYNC_LOW_POWER)
108
109 #if UARTE_FOR_EACH_INSTANCE(IS_LOW_POWER, (||), (0))
110 #define UARTE_ANY_LOW_POWER 1
111 #endif
112
113 /* Macro must resolve to literal 0 or 1 */
114 #define INSTANCE_IS_FAST(unused, prefix, idx, _) \
115 COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(UARTE(idx)), \
116 (COND_CODE_1(UTIL_AND(IS_ENABLED(CONFIG_SOC_NRF54H20_GPD), \
117 DT_NODE_HAS_PROP(UARTE(idx), power_domains)), \
118 (COND_CODE_0(DT_PHA(UARTE(idx), power_domains, id), (1), (0))),\
119 (0))), (0))
120
121 #if UARTE_FOR_EACH_INSTANCE(INSTANCE_IS_FAST, (||), (0))
122 #define UARTE_ANY_FAST 1
123 #endif
124
125 #ifdef UARTE_ANY_CACHE
126 /* uart120 instance does not retain BAUDRATE register when ENABLE=0. When this instance
127 * is used then baudrate must be set after enabling the peripheral and not before.
128 * This approach works for all instances so can be generally applied when uart120 is used.
129 * It is not default for all because it costs some resources. Since currently only uart120
130 * needs cache, that is used to determine if workaround shall be applied.
131 */
132 #define UARTE_BAUDRATE_RETENTION_WORKAROUND 1
133 #endif
134
135 /*
136 * RX timeout is divided into time slabs, this define tells how many divisions
137 * should be made. More divisions - higher timeout accuracy and processor usage.
138 */
139 #define RX_TIMEOUT_DIV 5
140
141 /* Size of hardware fifo in RX path. */
142 #define UARTE_HW_RX_FIFO_SIZE 5
143
144 #ifdef UARTE_ANY_ASYNC
145
146 struct uarte_async_tx {
147 struct k_timer timer;
148 const uint8_t *buf;
149 volatile size_t len;
150 const uint8_t *xfer_buf;
151 size_t xfer_len;
152 size_t cache_offset;
153 volatile int amount;
154 bool pending;
155 };
156
157 struct uarte_async_rx {
158 struct k_timer timer;
159 #ifdef CONFIG_HAS_NORDIC_DMM
160 uint8_t *usr_buf;
161 uint8_t *next_usr_buf;
162 #endif
163 uint8_t *buf;
164 size_t buf_len;
165 size_t offset;
166 uint8_t *next_buf;
167 size_t next_buf_len;
168 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
169 #if !defined(UARTE_HAS_FRAME_TIMEOUT)
170 uint32_t idle_cnt;
171 #endif
172 k_timeout_t timeout;
173 #else
174 uint32_t total_byte_cnt; /* Total number of bytes received */
175 uint32_t total_user_byte_cnt; /* Total number of bytes passed to user */
176 int32_t timeout_us; /* Timeout set by user */
177 int32_t timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */
178 int32_t timeout_left; /* Current time left until user callback */
179 union {
180 uint8_t ppi;
181 uint32_t cnt;
182 } cnt;
183 /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
184 volatile bool is_in_irq;
185 #endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
186 uint8_t flush_cnt;
187 volatile bool enabled;
188 volatile bool discard_fifo;
189 };
190
191 struct uarte_async_cb {
192 uart_callback_t user_callback;
193 void *user_data;
194 struct uarte_async_rx rx;
195 struct uarte_async_tx tx;
196 };
197 #endif /* UARTE_ANY_ASYNC */
198
199 #ifdef UARTE_INTERRUPT_DRIVEN
200 struct uarte_nrfx_int_driven {
201 uart_irq_callback_user_data_t cb; /**< Callback function pointer */
202 void *cb_data; /**< Callback function arg */
203 uint8_t *tx_buffer;
204 uint16_t tx_buff_size;
205 volatile bool disable_tx_irq;
206 bool tx_irq_enabled;
207 #ifdef CONFIG_PM_DEVICE
208 bool rx_irq_enabled;
209 #endif
210 atomic_t fifo_fill_lock;
211 };
212 #endif
213
214 /* Device data structure */
215 struct uarte_nrfx_data {
216 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
217 struct uart_config uart_config;
218 #ifdef UARTE_BAUDRATE_RETENTION_WORKAROUND
219 nrf_uarte_baudrate_t nrf_baudrate;
220 #endif
221 #endif
222 #ifdef UARTE_INTERRUPT_DRIVEN
223 struct uarte_nrfx_int_driven *int_driven;
224 #endif
225 #ifdef UARTE_ANY_ASYNC
226 struct uarte_async_cb *async;
227 #endif
228 atomic_val_t poll_out_lock;
229 atomic_t flags;
230 #ifdef UARTE_ENHANCED_POLL_OUT
231 uint8_t ppi_ch_endtx;
232 #endif
233 };
234
235 #define UARTE_FLAG_LOW_POWER_TX BIT(0)
236 #define UARTE_FLAG_LOW_POWER_RX BIT(1)
237 #define UARTE_FLAG_LOW_POWER (UARTE_FLAG_LOW_POWER_TX | UARTE_FLAG_LOW_POWER_RX)
238 #define UARTE_FLAG_TRIG_RXTO BIT(2)
239 #define UARTE_FLAG_POLL_OUT BIT(3)
240
241 /* If enabled then ENDTX is PPI'ed to TXSTOP */
242 #define UARTE_CFG_FLAG_PPI_ENDTX BIT(0)
243
244 /* If enabled then TIMER and PPI is used for byte counting. */
245 #define UARTE_CFG_FLAG_HW_BYTE_COUNTING BIT(1)
246
247 /* If enabled then UARTE peripheral is disabled when not used. This allows
248 * to achieve lowest power consumption in idle.
249 */
250 #define UARTE_CFG_FLAG_LOW_POWER BIT(2)
251
252 /* If enabled then UARTE peripheral is using memory which is cacheable. */
253 #define UARTE_CFG_FLAG_CACHEABLE BIT(3)
254
255 /* Macro for converting numerical baudrate to register value. It is convenient
256 * to use this approach because for constant input it can calculate nrf setting
257 * at compile time.
258 */
259 #define NRF_BAUDRATE(baudrate) ((baudrate) == 300 ? 0x00014000 :\
260 (baudrate) == 600 ? 0x00027000 : \
261 (baudrate) == 1200 ? NRF_UARTE_BAUDRATE_1200 : \
262 (baudrate) == 2400 ? NRF_UARTE_BAUDRATE_2400 : \
263 (baudrate) == 4800 ? NRF_UARTE_BAUDRATE_4800 : \
264 (baudrate) == 9600 ? NRF_UARTE_BAUDRATE_9600 : \
265 (baudrate) == 14400 ? NRF_UARTE_BAUDRATE_14400 : \
266 (baudrate) == 19200 ? NRF_UARTE_BAUDRATE_19200 : \
267 (baudrate) == 28800 ? NRF_UARTE_BAUDRATE_28800 : \
268 (baudrate) == 31250 ? NRF_UARTE_BAUDRATE_31250 : \
269 (baudrate) == 38400 ? NRF_UARTE_BAUDRATE_38400 : \
270 (baudrate) == 56000 ? NRF_UARTE_BAUDRATE_56000 : \
271 (baudrate) == 57600 ? NRF_UARTE_BAUDRATE_57600 : \
272 (baudrate) == 76800 ? NRF_UARTE_BAUDRATE_76800 : \
273 (baudrate) == 115200 ? NRF_UARTE_BAUDRATE_115200 : \
274 (baudrate) == 230400 ? NRF_UARTE_BAUDRATE_230400 : \
275 (baudrate) == 250000 ? NRF_UARTE_BAUDRATE_250000 : \
276 (baudrate) == 460800 ? NRF_UARTE_BAUDRATE_460800 : \
277 (baudrate) == 921600 ? NRF_UARTE_BAUDRATE_921600 : \
278 (baudrate) == 1000000 ? NRF_UARTE_BAUDRATE_1000000 : 0)
279
280 #define LOW_POWER_ENABLED(_config) \
281 (IS_ENABLED(UARTE_ANY_LOW_POWER) && \
282 !IS_ENABLED(CONFIG_PM_DEVICE) && \
283 (_config->flags & UARTE_CFG_FLAG_LOW_POWER))
284
285 /** @brief Check if device has PM that works in ISR safe mode.
286 *
287 * Only fast UARTE instance does not work in that mode so check PM configuration
288 * flags only if there is any fast instance present.
289 *
290 * @retval true if device PM is ISR safe.
291 * @retval false if device PM is not ISR safe.
292 */
293 #define IS_PM_ISR_SAFE(dev) \
294 (!IS_ENABLED(UARTE_ANY_FAST) ||\
295 COND_CODE_1(CONFIG_PM_DEVICE,\
296 ((dev->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE))), \
297 (0)))
298
299 /**
300 * @brief Structure for UARTE configuration.
301 */
302 struct uarte_nrfx_config {
303 NRF_UARTE_Type *uarte_regs; /* Instance address */
304 uint32_t flags;
305 bool disable_rx;
306 const struct pinctrl_dev_config *pcfg;
307 #ifdef CONFIG_HAS_NORDIC_DMM
308 void *mem_reg;
309 #endif
310 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
311 /* None-zero in case of high speed instances. Baudrate is adjusted by that ratio. */
312 uint32_t clock_freq;
313 #else
314 #ifdef UARTE_HAS_FRAME_TIMEOUT
315 uint32_t baudrate;
316 #endif
317 nrf_uarte_baudrate_t nrf_baudrate;
318 nrf_uarte_config_t hw_config;
319 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
320
321 #ifdef UARTE_ANY_ASYNC
322 nrfx_timer_t timer;
323 uint8_t *tx_cache;
324 uint8_t *rx_flush_buf;
325 #endif
326 uint8_t *poll_out_byte;
327 uint8_t *poll_in_byte;
328 };
329
330 /* Using Macro instead of static inline function to handle NO_OPTIMIZATIONS case
331 * where static inline fails on linking.
332 */
333 #define HW_RX_COUNTING_ENABLED(config) \
334 (IS_ENABLED(UARTE_ANY_HW_ASYNC) ? \
335 (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false)
336
get_uarte_instance(const struct device * dev)337 static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev)
338 {
339 const struct uarte_nrfx_config *config = dev->config;
340
341 return config->uarte_regs;
342 }
343
endtx_isr(const struct device * dev)344 static void endtx_isr(const struct device *dev)
345 {
346 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
347
348 unsigned int key = irq_lock();
349
350 if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
351 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
352 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
353 }
354
355 irq_unlock(key);
356
357 }
358
359 /** @brief Disable UARTE peripheral is not used by RX or TX.
360 *
361 * It must be called with interrupts locked so that deciding if no direction is
362 * using the UARTE is atomically performed with UARTE peripheral disabling. Otherwise
363 * it would be possible that after clearing flags we get preempted and UARTE is
364 * enabled from the higher priority context and when we come back UARTE is disabled
365 * here.
366 * @param dev Device.
367 * @param dis_mask Mask of direction (RX or TX) which now longer uses the UARTE instance.
368 */
uarte_disable_locked(const struct device * dev,uint32_t dis_mask)369 static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask)
370 {
371 struct uarte_nrfx_data *data = dev->data;
372
373 data->flags &= ~dis_mask;
374 if (data->flags & UARTE_FLAG_LOW_POWER) {
375 return;
376 }
377
378 #if defined(UARTE_ANY_ASYNC) && !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
379 const struct uarte_nrfx_config *config = dev->config;
380
381 if (data->async && HW_RX_COUNTING_ENABLED(config)) {
382 nrfx_timer_disable(&config->timer);
383 /* Timer/counter value is reset when disabled. */
384 data->async->rx.total_byte_cnt = 0;
385 data->async->rx.total_user_byte_cnt = 0;
386 }
387 #endif
388
389 #ifdef CONFIG_SOC_NRF54H20_GPD
390 const struct uarte_nrfx_config *cfg = dev->config;
391
392 nrf_gpd_retain_pins_set(cfg->pcfg, true);
393 #endif
394 nrf_uarte_disable(get_uarte_instance(dev));
395 }
396
397 #ifdef UARTE_ANY_NONE_ASYNC
398 /**
399 * @brief Interrupt service routine.
400 *
401 * This simply calls the callback function, if one exists.
402 *
403 * @param arg Argument to ISR.
404 */
uarte_nrfx_isr_int(const void * arg)405 static void uarte_nrfx_isr_int(const void *arg)
406 {
407 const struct device *dev = arg;
408 const struct uarte_nrfx_config *config = dev->config;
409 struct uarte_nrfx_data *data = dev->data;
410 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
411
412 /* If interrupt driven and asynchronous APIs are disabled then UART
413 * interrupt is still called to stop TX. Unless it is done using PPI.
414 */
415 if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT) &&
416 nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK) &&
417 nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
418 endtx_isr(dev);
419 }
420
421 bool txstopped = nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED);
422
423 if (txstopped && (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(config))) {
424 unsigned int key = irq_lock();
425
426 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
427 if (data->flags & UARTE_FLAG_POLL_OUT) {
428 data->flags &= ~UARTE_FLAG_POLL_OUT;
429 pm_device_runtime_put_async(dev, K_NO_WAIT);
430 }
431 } else {
432 uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_TX);
433 }
434 #ifdef UARTE_INTERRUPT_DRIVEN
435 if (!data->int_driven)
436 #endif
437 {
438 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
439 }
440
441 irq_unlock(key);
442 }
443
444 #ifdef UARTE_INTERRUPT_DRIVEN
445 if (!data->int_driven) {
446 return;
447 }
448
449 if (txstopped) {
450 data->int_driven->fifo_fill_lock = 0;
451 if (!data->int_driven->tx_irq_enabled) {
452
453 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
454 }
455
456 if (data->int_driven->disable_tx_irq) {
457 data->int_driven->disable_tx_irq = false;
458 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
459 pm_device_runtime_put_async(dev, K_NO_WAIT);
460 }
461 return;
462 }
463 }
464
465 if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) {
466 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR);
467 }
468
469 if (data->int_driven->cb) {
470 data->int_driven->cb(dev, data->int_driven->cb_data);
471 }
472 #endif /* UARTE_INTERRUPT_DRIVEN */
473 }
474 #endif /* UARTE_ANY_NONE_ASYNC */
475
476 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
477 /**
478 * @brief Set the baud rate
479 *
480 * This routine set the given baud rate for the UARTE.
481 *
482 * @param dev UARTE device struct
483 * @param baudrate Baud rate
484 *
485 * @return 0 on success or error code
486 */
baudrate_set(const struct device * dev,uint32_t baudrate)487 static int baudrate_set(const struct device *dev, uint32_t baudrate)
488 {
489 const struct uarte_nrfx_config *config = dev->config;
490 /* calculated baudrate divisor */
491 nrf_uarte_baudrate_t nrf_baudrate = NRF_BAUDRATE(baudrate);
492
493 if (nrf_baudrate == 0) {
494 return -EINVAL;
495 }
496
497 /* scale baudrate setting */
498 if (config->clock_freq > 0U) {
499 nrf_baudrate /= config->clock_freq / NRF_UARTE_BASE_FREQUENCY_16MHZ;
500 }
501
502 #ifdef UARTE_BAUDRATE_RETENTION_WORKAROUND
503 struct uarte_nrfx_data *data = dev->data;
504
505 data->nrf_baudrate = nrf_baudrate;
506 #else
507 nrf_uarte_baudrate_set(get_uarte_instance(dev), nrf_baudrate);
508 #endif
509
510 return 0;
511 }
512
uarte_nrfx_configure(const struct device * dev,const struct uart_config * cfg)513 static int uarte_nrfx_configure(const struct device *dev,
514 const struct uart_config *cfg)
515 {
516 struct uarte_nrfx_data *data = dev->data;
517 nrf_uarte_config_t uarte_cfg;
518
519 #if defined(UARTE_CONFIG_STOP_Msk)
520 switch (cfg->stop_bits) {
521 case UART_CFG_STOP_BITS_1:
522 uarte_cfg.stop = NRF_UARTE_STOP_ONE;
523 break;
524 case UART_CFG_STOP_BITS_2:
525 uarte_cfg.stop = NRF_UARTE_STOP_TWO;
526 break;
527 default:
528 return -ENOTSUP;
529 }
530 #else
531 if (cfg->stop_bits != UART_CFG_STOP_BITS_1) {
532 return -ENOTSUP;
533 }
534 #endif
535
536 if (cfg->data_bits != UART_CFG_DATA_BITS_8) {
537 return -ENOTSUP;
538 }
539
540 switch (cfg->flow_ctrl) {
541 case UART_CFG_FLOW_CTRL_NONE:
542 uarte_cfg.hwfc = NRF_UARTE_HWFC_DISABLED;
543 break;
544 case UART_CFG_FLOW_CTRL_RTS_CTS:
545 uarte_cfg.hwfc = NRF_UARTE_HWFC_ENABLED;
546 break;
547 default:
548 return -ENOTSUP;
549 }
550
551 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
552 uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_EVEN;
553 #endif
554 switch (cfg->parity) {
555 case UART_CFG_PARITY_NONE:
556 uarte_cfg.parity = NRF_UARTE_PARITY_EXCLUDED;
557 break;
558 case UART_CFG_PARITY_EVEN:
559 uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
560 break;
561 #if defined(UARTE_CONFIG_PARITYTYPE_Msk)
562 case UART_CFG_PARITY_ODD:
563 uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
564 uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_ODD;
565 break;
566 #endif
567 default:
568 return -ENOTSUP;
569 }
570
571 if (baudrate_set(dev, cfg->baudrate) != 0) {
572 return -ENOTSUP;
573 }
574
575 #ifdef UARTE_HAS_FRAME_TIMEOUT
576 uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_EN;
577 #endif
578 nrf_uarte_configure(get_uarte_instance(dev), &uarte_cfg);
579
580 data->uart_config = *cfg;
581
582 return 0;
583 }
584
uarte_nrfx_config_get(const struct device * dev,struct uart_config * cfg)585 static int uarte_nrfx_config_get(const struct device *dev,
586 struct uart_config *cfg)
587 {
588 struct uarte_nrfx_data *data = dev->data;
589
590 *cfg = data->uart_config;
591 return 0;
592 }
593 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
594
595
uarte_nrfx_err_check(const struct device * dev)596 static int uarte_nrfx_err_check(const struct device *dev)
597 {
598 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
599 /* register bitfields maps to the defines in uart.h */
600 return nrf_uarte_errorsrc_get_and_clear(uarte);
601 }
602
603 /* Function returns true if new transfer can be started. Since TXSTOPPED
604 * (and ENDTX) is cleared before triggering new transfer, TX is ready for new
605 * transfer if any event is set.
606 */
is_tx_ready(const struct device * dev)607 static bool is_tx_ready(const struct device *dev)
608 {
609 const struct uarte_nrfx_config *config = dev->config;
610 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
611 bool ppi_endtx = config->flags & UARTE_CFG_FLAG_PPI_ENDTX ||
612 IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT);
613
614 return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) ||
615 (!ppi_endtx ?
616 nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX) : 0);
617 }
618
619 /* Wait until the transmitter is in the idle state. When this function returns,
620 * IRQ's are locked with the returned key.
621 */
wait_tx_ready(const struct device * dev)622 static int wait_tx_ready(const struct device *dev)
623 {
624 unsigned int key;
625
626 do {
627 /* wait arbitrary time before back off. */
628 bool res;
629
630 #if defined(CONFIG_ARCH_POSIX)
631 NRFX_WAIT_FOR(is_tx_ready(dev), 33, 3, res);
632 #else
633 NRFX_WAIT_FOR(is_tx_ready(dev), 100, 1, res);
634 #endif
635
636 if (res) {
637 key = irq_lock();
638 if (is_tx_ready(dev)) {
639 break;
640 }
641
642 irq_unlock(key);
643 }
644 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
645 k_msleep(1);
646 }
647 } while (1);
648
649 return key;
650 }
651
uarte_periph_enable(const struct device * dev)652 static void uarte_periph_enable(const struct device *dev)
653 {
654 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
655 const struct uarte_nrfx_config *config = dev->config;
656 struct uarte_nrfx_data *data = dev->data;
657
658 (void)data;
659 nrf_uarte_enable(uarte);
660 #ifdef CONFIG_SOC_NRF54H20_GPD
661 nrf_gpd_retain_pins_set(config->pcfg, false);
662 #endif
663 #if UARTE_BAUDRATE_RETENTION_WORKAROUND
664 nrf_uarte_baudrate_set(uarte,
665 COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE,
666 (data->nrf_baudrate), (config->nrf_baudrate)));
667 #endif
668
669 #ifdef UARTE_ANY_ASYNC
670 if (data->async) {
671 if (HW_RX_COUNTING_ENABLED(config)) {
672 const nrfx_timer_t *timer = &config->timer;
673
674 nrfx_timer_enable(timer);
675
676 for (int i = 0; i < data->async->rx.flush_cnt; i++) {
677 nrfx_timer_increment(timer);
678 }
679 }
680 return;
681 }
682 #endif
683
684 if (IS_ENABLED(UARTE_ANY_NONE_ASYNC) && !config->disable_rx) {
685 nrf_uarte_rx_buffer_set(uarte, config->poll_in_byte, 1);
686 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
687 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
688 #if defined(UARTE_INTERRUPT_DRIVEN) && defined(CONFIG_PM_DEVICE)
689 if (data->int_driven && data->int_driven->rx_irq_enabled) {
690 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK);
691 }
692 #endif
693 }
694 }
695
uarte_enable_locked(const struct device * dev,uint32_t act_mask)696 static void uarte_enable_locked(const struct device *dev, uint32_t act_mask)
697 {
698 struct uarte_nrfx_data *data = dev->data;
699 bool already_active = (data->flags & UARTE_FLAG_LOW_POWER) != 0;
700
701 data->flags |= act_mask;
702 if (already_active) {
703 /* Second direction already enabled so UARTE is enabled. */
704 return;
705 }
706
707 uarte_periph_enable(dev);
708 }
709
710 /* At this point we should have irq locked and any previous transfer completed.
711 * Transfer can be started, no need to wait for completion.
712 */
tx_start(const struct device * dev,const uint8_t * buf,size_t len)713 static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
714 {
715 const struct uarte_nrfx_config *config = dev->config;
716 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
717
718 #if defined(CONFIG_PM_DEVICE) && !defined(CONFIG_PM_DEVICE_RUNTIME)
719 enum pm_device_state state;
720
721 (void)pm_device_state_get(dev, &state);
722 if (state != PM_DEVICE_STATE_ACTIVE) {
723 return;
724 }
725 #endif
726
727 if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
728 sys_cache_data_flush_range((void *)buf, len);
729 }
730
731 nrf_uarte_tx_buffer_set(uarte, buf, len);
732 if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) {
733 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
734 }
735 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED);
736
737 if (LOW_POWER_ENABLED(config)) {
738 uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_TX);
739 }
740
741 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
742 }
743
744 #if defined(UARTE_ANY_ASYNC)
745 static void rx_timeout(struct k_timer *timer);
746 static void tx_timeout(struct k_timer *timer);
747
748 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
timer_handler(nrf_timer_event_t event_type,void * p_context)749 static void timer_handler(nrf_timer_event_t event_type, void *p_context) { }
750
uarte_nrfx_rx_counting_init(const struct device * dev)751 static int uarte_nrfx_rx_counting_init(const struct device *dev)
752 {
753 struct uarte_nrfx_data *data = dev->data;
754 const struct uarte_nrfx_config *cfg = dev->config;
755 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
756 int ret;
757
758 if (HW_RX_COUNTING_ENABLED(cfg)) {
759 nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG(
760 NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg));
761 uint32_t evt_addr = nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_RXDRDY);
762 uint32_t tsk_addr = nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT);
763
764 tmr_config.mode = NRF_TIMER_MODE_COUNTER;
765 tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32;
766 ret = nrfx_timer_init(&cfg->timer,
767 &tmr_config,
768 timer_handler);
769 if (ret != NRFX_SUCCESS) {
770 LOG_ERR("Timer already initialized");
771 return -EINVAL;
772 } else {
773 nrfx_timer_clear(&cfg->timer);
774 }
775
776 ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi);
777 if (ret != NRFX_SUCCESS) {
778 LOG_ERR("Failed to allocate PPI Channel");
779 nrfx_timer_uninit(&cfg->timer);
780 return -EINVAL;
781 }
782
783 nrfx_gppi_channel_endpoints_setup(data->async->rx.cnt.ppi, evt_addr, tsk_addr);
784 nrfx_gppi_channels_enable(BIT(data->async->rx.cnt.ppi));
785 } else {
786 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK);
787 }
788
789 return 0;
790 }
791 #endif /* !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) */
792
uarte_async_init(const struct device * dev)793 static int uarte_async_init(const struct device *dev)
794 {
795 struct uarte_nrfx_data *data = dev->data;
796 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
797 static const uint32_t rx_int_mask =
798 NRF_UARTE_INT_ENDRX_MASK |
799 NRF_UARTE_INT_RXSTARTED_MASK |
800 NRF_UARTE_INT_ERROR_MASK |
801 NRF_UARTE_INT_RXTO_MASK |
802 ((IS_ENABLED(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) &&
803 !IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) ? NRF_UARTE_INT_RXDRDY_MASK : 0);
804
805 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
806 int ret = uarte_nrfx_rx_counting_init(dev);
807
808 if (ret != 0) {
809 return ret;
810 }
811 #endif
812
813 nrf_uarte_int_enable(uarte, rx_int_mask);
814
815 k_timer_init(&data->async->rx.timer, rx_timeout, NULL);
816 k_timer_user_data_set(&data->async->rx.timer, (void *)dev);
817 k_timer_init(&data->async->tx.timer, tx_timeout, NULL);
818 k_timer_user_data_set(&data->async->tx.timer, (void *)dev);
819
820 return 0;
821 }
822
823 /* Attempt to start TX (asynchronous transfer). If hardware is not ready, then pending
824 * flag is set. When current poll_out is completed, pending transfer is started.
825 * Function must be called with interrupts locked.
826 */
start_tx_locked(const struct device * dev,struct uarte_nrfx_data * data)827 static void start_tx_locked(const struct device *dev, struct uarte_nrfx_data *data)
828 {
829 nrf_uarte_int_enable(get_uarte_instance(dev), NRF_UARTE_INT_TXSTOPPED_MASK);
830 if (!is_tx_ready(dev)) {
831 /* Active poll out, postpone until it is completed. */
832 data->async->tx.pending = true;
833 } else {
834 data->async->tx.pending = false;
835 data->async->tx.amount = -1;
836 tx_start(dev, data->async->tx.xfer_buf, data->async->tx.xfer_len);
837 }
838 }
839
840 /* Setup cache buffer (used for sending data outside of RAM memory).
841 * During setup data is copied to cache buffer and transfer length is set.
842 *
843 * @return True if cache was set, false if no more data to put in cache.
844 */
setup_tx_cache(const struct device * dev)845 static bool setup_tx_cache(const struct device *dev)
846 {
847 struct uarte_nrfx_data *data = dev->data;
848 const struct uarte_nrfx_config *config = dev->config;
849 size_t remaining = data->async->tx.len - data->async->tx.cache_offset;
850
851 if (!remaining) {
852 return false;
853 }
854
855 size_t len = MIN(remaining, CONFIG_UART_ASYNC_TX_CACHE_SIZE);
856
857 data->async->tx.xfer_len = len;
858 data->async->tx.xfer_buf = config->tx_cache;
859 memcpy(config->tx_cache, &data->async->tx.buf[data->async->tx.cache_offset], len);
860
861 return true;
862 }
863
has_hwfc(const struct device * dev)864 static bool has_hwfc(const struct device *dev)
865 {
866 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
867 struct uarte_nrfx_data *data = dev->data;
868
869 return data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS;
870 #else
871 const struct uarte_nrfx_config *config = dev->config;
872
873 return config->hw_config.hwfc == NRF_UARTE_HWFC_ENABLED;
874 #endif
875 }
876
uarte_nrfx_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)877 static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf,
878 size_t len,
879 int32_t timeout)
880 {
881 struct uarte_nrfx_data *data = dev->data;
882 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
883
884 unsigned int key = irq_lock();
885
886 if (data->async->tx.len) {
887 irq_unlock(key);
888 return -EBUSY;
889 }
890
891 data->async->tx.len = len;
892 data->async->tx.buf = buf;
893
894 if (nrf_dma_accessible_check(uarte, buf)) {
895 data->async->tx.xfer_buf = buf;
896 data->async->tx.xfer_len = len;
897 } else {
898 data->async->tx.cache_offset = 0;
899 (void)setup_tx_cache(dev);
900 }
901
902 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
903 if (!IS_PM_ISR_SAFE(dev) && k_is_in_isr()) {
904 /* If instance does not support PM from ISR device shall
905 * already be turned on.
906 */
907 enum pm_device_state state;
908 int err;
909
910 err = pm_device_state_get(dev, &state);
911 (void)err;
912 __ASSERT_NO_MSG(err == 0);
913 if (state != PM_DEVICE_STATE_ACTIVE) {
914 return -ENOTSUP;
915 }
916 }
917 pm_device_runtime_get(dev);
918 }
919
920 start_tx_locked(dev, data);
921
922 irq_unlock(key);
923
924 if (has_hwfc(dev) && timeout != SYS_FOREVER_US) {
925 k_timer_start(&data->async->tx.timer, K_USEC(timeout), K_NO_WAIT);
926 }
927 return 0;
928 }
929
uarte_nrfx_tx_abort(const struct device * dev)930 static int uarte_nrfx_tx_abort(const struct device *dev)
931 {
932 struct uarte_nrfx_data *data = dev->data;
933 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
934
935 if (data->async->tx.buf == NULL) {
936 return -EFAULT;
937 }
938
939 data->async->tx.pending = false;
940 k_timer_stop(&data->async->tx.timer);
941 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
942
943 return 0;
944 }
945
user_callback(const struct device * dev,struct uart_event * evt)946 static void user_callback(const struct device *dev, struct uart_event *evt)
947 {
948 struct uarte_nrfx_data *data = dev->data;
949
950 if (data->async->user_callback) {
951 data->async->user_callback(dev, evt, data->async->user_data);
952 }
953 }
954
notify_uart_rx_rdy(const struct device * dev,size_t len)955 static void notify_uart_rx_rdy(const struct device *dev, size_t len)
956 {
957 struct uarte_nrfx_data *data = dev->data;
958 struct uart_event evt = {
959 .type = UART_RX_RDY,
960 .data.rx.buf = data->async->rx.buf,
961 .data.rx.len = len,
962 .data.rx.offset = data->async->rx.offset
963 };
964
965 user_callback(dev, &evt);
966 }
967
rx_buf_release(const struct device * dev,uint8_t * buf)968 static void rx_buf_release(const struct device *dev, uint8_t *buf)
969 {
970 struct uart_event evt = {
971 .type = UART_RX_BUF_RELEASED,
972 .data.rx_buf.buf = buf,
973 };
974
975 user_callback(dev, &evt);
976 }
977
notify_rx_disable(const struct device * dev)978 static void notify_rx_disable(const struct device *dev)
979 {
980 struct uart_event evt = {
981 .type = UART_RX_DISABLED,
982 };
983
984 user_callback(dev, (struct uart_event *)&evt);
985
986 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
987 pm_device_runtime_put_async(dev, K_NO_WAIT);
988 }
989 }
990
991 #ifdef UARTE_HAS_FRAME_TIMEOUT
us_to_bauds(uint32_t baudrate,int32_t timeout)992 static uint32_t us_to_bauds(uint32_t baudrate, int32_t timeout)
993 {
994 uint64_t bauds = (uint64_t)baudrate * timeout / 1000000;
995
996 return MIN((uint32_t)bauds, UARTE_FRAMETIMEOUT_COUNTERTOP_Msk);
997 }
998 #endif
999
uarte_nrfx_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)1000 static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
1001 size_t len,
1002 int32_t timeout)
1003 {
1004 struct uarte_nrfx_data *data = dev->data;
1005 struct uarte_async_rx *async_rx = &data->async->rx;
1006 const struct uarte_nrfx_config *cfg = dev->config;
1007 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1008
1009 if (cfg->disable_rx) {
1010 __ASSERT(false, "TX only UARTE instance");
1011 return -ENOTSUP;
1012 }
1013
1014 /* Signal error if RX is already enabled or if the driver is waiting
1015 * for the RXTO event after a call to uart_rx_disable() to discard
1016 * data from the UARTE internal RX FIFO.
1017 */
1018 if (async_rx->enabled || async_rx->discard_fifo) {
1019 return -EBUSY;
1020 }
1021
1022 #ifdef CONFIG_HAS_NORDIC_DMM
1023 uint8_t *dma_buf;
1024 int ret = 0;
1025
1026 ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf);
1027 if (ret < 0) {
1028 return ret;
1029 }
1030
1031 async_rx->usr_buf = buf;
1032 buf = dma_buf;
1033 #endif
1034
1035 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1036 #ifdef UARTE_HAS_FRAME_TIMEOUT
1037 if (timeout != SYS_FOREVER_US) {
1038 uint32_t baudrate = COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE,
1039 (data->uart_config.baudrate), (cfg->baudrate));
1040
1041 async_rx->timeout = K_USEC(timeout);
1042 nrf_uarte_frame_timeout_set(uarte, us_to_bauds(baudrate, timeout));
1043 nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX);
1044 } else {
1045 async_rx->timeout = K_NO_WAIT;
1046 }
1047 #else
1048 async_rx->timeout = (timeout == SYS_FOREVER_US) ?
1049 K_NO_WAIT : K_USEC(timeout / RX_TIMEOUT_DIV);
1050 async_rx->idle_cnt = 0;
1051 #endif /* UARTE_HAS_FRAME_TIMEOUT */
1052 #else
1053 async_rx->timeout_us = timeout;
1054 async_rx->timeout_slab = timeout / RX_TIMEOUT_DIV;
1055 #endif
1056
1057 async_rx->buf = buf;
1058 async_rx->buf_len = len;
1059 async_rx->offset = 0;
1060 async_rx->next_buf = NULL;
1061 async_rx->next_buf_len = 0;
1062
1063 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1064 if (!IS_PM_ISR_SAFE(dev) && k_is_in_isr()) {
1065 /* If instance does not support PM from ISR device shall
1066 * already be turned on.
1067 */
1068 enum pm_device_state state;
1069 int err;
1070
1071 err = pm_device_state_get(dev, &state);
1072 (void)err;
1073 __ASSERT_NO_MSG(err == 0);
1074 if (state != PM_DEVICE_STATE_ACTIVE) {
1075 return -ENOTSUP;
1076 }
1077 }
1078 pm_device_runtime_get(dev);
1079 }
1080
1081 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(cfg)) {
1082 if (async_rx->flush_cnt) {
1083 int cpy_len = MIN(len, async_rx->flush_cnt);
1084
1085 if (IS_ENABLED(UARTE_ANY_CACHE) &&
1086 (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1087 sys_cache_data_invd_range(cfg->rx_flush_buf, cpy_len);
1088 }
1089
1090 memcpy(buf, cfg->rx_flush_buf, cpy_len);
1091
1092 if (IS_ENABLED(UARTE_ANY_CACHE) &&
1093 (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1094 sys_cache_data_flush_range(buf, cpy_len);
1095 }
1096
1097 buf += cpy_len;
1098 len -= cpy_len;
1099
1100 /* If flush content filled whole new buffer trigger interrupt
1101 * to notify about received data and disabled RX from there.
1102 */
1103 if (!len) {
1104 async_rx->flush_cnt -= cpy_len;
1105 memmove(cfg->rx_flush_buf, &cfg->rx_flush_buf[cpy_len],
1106 async_rx->flush_cnt);
1107 if (IS_ENABLED(UARTE_ANY_CACHE) &&
1108 (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1109 sys_cache_data_flush_range(cfg->rx_flush_buf,
1110 async_rx->flush_cnt);
1111 }
1112 atomic_or(&data->flags, UARTE_FLAG_TRIG_RXTO);
1113 NRFX_IRQ_PENDING_SET(nrfx_get_irq_number(uarte));
1114 return 0;
1115 } else {
1116 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1117 if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) {
1118 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY);
1119 k_timer_start(&async_rx->timer, async_rx->timeout,
1120 K_NO_WAIT);
1121 }
1122 #endif
1123 }
1124 }
1125 }
1126
1127 nrf_uarte_rx_buffer_set(uarte, buf, len);
1128
1129 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1130 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1131
1132 async_rx->enabled = true;
1133
1134 if (LOW_POWER_ENABLED(cfg)) {
1135 unsigned int key = irq_lock();
1136
1137 uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_RX);
1138 irq_unlock(key);
1139 }
1140
1141 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1142
1143 return 0;
1144 }
1145
uarte_nrfx_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)1146 static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf,
1147 size_t len)
1148 {
1149 struct uarte_nrfx_data *data = dev->data;
1150 struct uarte_async_rx *async_rx = &data->async->rx;
1151 int err;
1152 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1153 unsigned int key = irq_lock();
1154
1155 if (async_rx->buf == NULL) {
1156 err = -EACCES;
1157 } else if (async_rx->next_buf == NULL) {
1158 #ifdef CONFIG_HAS_NORDIC_DMM
1159 uint8_t *dma_buf;
1160 const struct uarte_nrfx_config *config = dev->config;
1161
1162 err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf);
1163 if (err < 0) {
1164 return err;
1165 }
1166 async_rx->next_usr_buf = buf;
1167 buf = dma_buf;
1168 #endif
1169 async_rx->next_buf = buf;
1170 async_rx->next_buf_len = len;
1171 nrf_uarte_rx_buffer_set(uarte, buf, len);
1172 /* If buffer is shorter than RX FIFO then there is a risk that due
1173 * to interrupt handling latency ENDRX event is not handled on time
1174 * and due to ENDRX_STARTRX short data will start to be overwritten.
1175 * In that case short is not enabled and ENDRX event handler will
1176 * manually start RX for that buffer. Thanks to RX FIFO there is
1177 * 5 byte time for doing that. If interrupt latency is higher and
1178 * there is no HWFC in both cases data will be lost or corrupted.
1179 */
1180 if (len >= UARTE_HW_RX_FIFO_SIZE) {
1181 nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1182 }
1183 err = 0;
1184 } else {
1185 err = -EBUSY;
1186 }
1187
1188 irq_unlock(key);
1189
1190 return err;
1191 }
1192
uarte_nrfx_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)1193 static int uarte_nrfx_callback_set(const struct device *dev,
1194 uart_callback_t callback,
1195 void *user_data)
1196 {
1197 struct uarte_nrfx_data *data = dev->data;
1198
1199 if (!data->async) {
1200 return -ENOTSUP;
1201 }
1202
1203 data->async->user_callback = callback;
1204 data->async->user_data = user_data;
1205
1206 return 0;
1207 }
1208
uarte_nrfx_rx_disable(const struct device * dev)1209 static int uarte_nrfx_rx_disable(const struct device *dev)
1210 {
1211 struct uarte_nrfx_data *data = dev->data;
1212 struct uarte_async_rx *async_rx = &data->async->rx;
1213 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1214 int key;
1215
1216 if (async_rx->buf == NULL) {
1217 return -EFAULT;
1218 }
1219
1220 k_timer_stop(&async_rx->timer);
1221
1222 key = irq_lock();
1223
1224 if (async_rx->next_buf != NULL) {
1225 nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1226 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1227 }
1228
1229 async_rx->enabled = false;
1230 async_rx->discard_fifo = true;
1231
1232 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1233 irq_unlock(key);
1234
1235 return 0;
1236 }
1237
tx_timeout(struct k_timer * timer)1238 static void tx_timeout(struct k_timer *timer)
1239 {
1240 const struct device *dev = k_timer_user_data_get(timer);
1241 (void) uarte_nrfx_tx_abort(dev);
1242 }
1243
1244 /**
1245 * Whole timeout is divided by RX_TIMEOUT_DIV into smaller units, rx_timeout
1246 * is executed periodically every rx_timeout_slab us. If between executions
1247 * data was received, then we start counting down time from start, if not, then
1248 * we subtract rx_timeout_slab from rx_timeout_left.
1249 * If rx_timeout_left is less than rx_timeout_slab it means that receiving has
1250 * timed out and we should tell user about that.
1251 */
rx_timeout(struct k_timer * timer)1252 static void rx_timeout(struct k_timer *timer)
1253 {
1254 const struct device *dev = k_timer_user_data_get(timer);
1255
1256 #if CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1257 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1258
1259 #ifdef UARTE_HAS_FRAME_TIMEOUT
1260 if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) {
1261 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1262 }
1263 return;
1264 #else /* UARTE_HAS_FRAME_TIMEOUT */
1265 struct uarte_nrfx_data *data = dev->data;
1266 struct uarte_async_rx *async_rx = &data->async->rx;
1267
1268 if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) {
1269 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY);
1270 async_rx->idle_cnt = 0;
1271 } else {
1272 async_rx->idle_cnt++;
1273 /* We compare against RX_TIMEOUT_DIV - 1 to get rather earlier timeout
1274 * than late. idle_cnt is reset when last RX activity (RXDRDY event) is
1275 * detected. It may happen that it happens when RX is inactive for whole
1276 * RX timeout period (and it is the case when transmission is short compared
1277 * to the timeout, for example timeout is 50 ms and transmission of few bytes
1278 * takes less than 1ms). In that case if we compare against RX_TIMEOUT_DIV
1279 * then RX notification would come after (RX_TIMEOUT_DIV + 1) * timeout.
1280 */
1281 if (async_rx->idle_cnt == (RX_TIMEOUT_DIV - 1)) {
1282 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1283 return;
1284 }
1285 }
1286
1287 k_timer_start(&async_rx->timer, async_rx->timeout, K_NO_WAIT);
1288 #endif /* UARTE_HAS_FRAME_TIMEOUT */
1289 #else /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
1290 const struct uarte_nrfx_config *cfg = dev->config;
1291 struct uarte_nrfx_data *data = dev->data;
1292 struct uarte_async_rx *async_rx = &data->async->rx;
1293 uint32_t read;
1294
1295 if (async_rx->is_in_irq) {
1296 return;
1297 }
1298
1299 /* Disable ENDRX ISR, in case ENDRX event is generated, it will be
1300 * handled after rx_timeout routine is complete.
1301 */
1302 nrf_uarte_int_disable(get_uarte_instance(dev),
1303 NRF_UARTE_INT_ENDRX_MASK);
1304
1305 if (HW_RX_COUNTING_ENABLED(cfg)) {
1306 read = nrfx_timer_capture(&cfg->timer, 0);
1307 } else {
1308 read = async_rx->cnt.cnt;
1309 }
1310
1311 /* Check if data was received since last function call */
1312 if (read != async_rx->total_byte_cnt) {
1313 async_rx->total_byte_cnt = read;
1314 async_rx->timeout_left = async_rx->timeout_us;
1315 }
1316
1317 /* Check if there is data that was not sent to user yet
1318 * Note though that 'len' is a count of data bytes received, but not
1319 * necessarily the amount available in the current buffer
1320 */
1321 int32_t len = async_rx->total_byte_cnt - async_rx->total_user_byte_cnt;
1322
1323 if (!HW_RX_COUNTING_ENABLED(cfg) &&
1324 (len < 0)) {
1325 /* Prevent too low value of rx_cnt.cnt which may occur due to
1326 * latencies in handling of the RXRDY interrupt.
1327 * At this point, the number of received bytes is at least
1328 * equal to what was reported to the user.
1329 */
1330 async_rx->cnt.cnt = async_rx->total_user_byte_cnt;
1331 len = 0;
1332 }
1333
1334 /* Check for current buffer being full.
1335 * if the UART receives characters before the ENDRX is handled
1336 * and the 'next' buffer is set up, then the SHORT between ENDRX and
1337 * STARTRX will mean that data will be going into to the 'next' buffer
1338 * until the ENDRX event gets a chance to be handled.
1339 */
1340 bool clipped = false;
1341
1342 if (len + async_rx->offset > async_rx->buf_len) {
1343 len = async_rx->buf_len - async_rx->offset;
1344 clipped = true;
1345 }
1346
1347 if (len > 0) {
1348 if (clipped || (async_rx->timeout_left < async_rx->timeout_slab)) {
1349 /* rx_timeout us elapsed since last receiving */
1350 if (async_rx->buf != NULL) {
1351 notify_uart_rx_rdy(dev, len);
1352 async_rx->offset += len;
1353 async_rx->total_user_byte_cnt += len;
1354 }
1355 } else {
1356 async_rx->timeout_left -= async_rx->timeout_slab;
1357 }
1358
1359 /* If there's nothing left to report until the buffers are
1360 * switched then the timer can be stopped
1361 */
1362 if (clipped) {
1363 k_timer_stop(&async_rx->timer);
1364 }
1365 }
1366
1367 nrf_uarte_int_enable(get_uarte_instance(dev),
1368 NRF_UARTE_INT_ENDRX_MASK);
1369 #endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
1370 }
1371
1372 #define UARTE_ERROR_FROM_MASK(mask) \
1373 ((mask) & NRF_UARTE_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN \
1374 : (mask) & NRF_UARTE_ERROR_PARITY_MASK ? UART_ERROR_PARITY \
1375 : (mask) & NRF_UARTE_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING \
1376 : (mask) & NRF_UARTE_ERROR_BREAK_MASK ? UART_BREAK \
1377 : 0)
1378
error_isr(const struct device * dev)1379 static void error_isr(const struct device *dev)
1380 {
1381 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1382 uint32_t err = nrf_uarte_errorsrc_get(uarte);
1383 struct uart_event evt = {
1384 .type = UART_RX_STOPPED,
1385 .data.rx_stop.reason = UARTE_ERROR_FROM_MASK(err),
1386 };
1387
1388 /* For VPR cores read and write may be reordered - barrier needed. */
1389 nrf_barrier_r();
1390 nrf_uarte_errorsrc_clear(uarte, err);
1391
1392 user_callback(dev, &evt);
1393 (void) uarte_nrfx_rx_disable(dev);
1394 }
1395
rxstarted_isr(const struct device * dev)1396 static void rxstarted_isr(const struct device *dev)
1397 {
1398 struct uart_event evt = {
1399 .type = UART_RX_BUF_REQUEST,
1400 };
1401
1402 #ifndef UARTE_HAS_FRAME_TIMEOUT
1403 struct uarte_nrfx_data *data = dev->data;
1404 struct uarte_async_rx *async_rx = &data->async->rx;
1405
1406 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1407 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1408
1409 if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) {
1410 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK);
1411 }
1412 #else
1413 if (async_rx->timeout_us != SYS_FOREVER_US) {
1414 k_timeout_t timeout = K_USEC(async_rx->timeout_slab);
1415
1416 async_rx->timeout_left = async_rx->timeout_us;
1417 k_timer_start(&async_rx->timer, timeout, timeout);
1418 }
1419 #endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */
1420 #endif /* !UARTE_HAS_FRAME_TIMEOUT */
1421 user_callback(dev, &evt);
1422 }
1423
endrx_isr(const struct device * dev)1424 static void endrx_isr(const struct device *dev)
1425 {
1426 struct uarte_nrfx_data *data = dev->data;
1427 struct uarte_async_rx *async_rx = &data->async->rx;
1428 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1429
1430 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1431 async_rx->is_in_irq = true;
1432 #endif
1433
1434 /* ensure rx timer is stopped - it will be restarted in RXSTARTED
1435 * handler if needed
1436 */
1437 k_timer_stop(&async_rx->timer);
1438
1439 /* this is the amount that the EasyDMA controller has copied into the
1440 * buffer
1441 */
1442 const int rx_amount = nrf_uarte_rx_amount_get(uarte) + async_rx->flush_cnt;
1443
1444 #ifdef CONFIG_HAS_NORDIC_DMM
1445 const struct uarte_nrfx_config *config = dev->config;
1446 int err =
1447 dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, rx_amount, async_rx->buf);
1448
1449 (void)err;
1450 __ASSERT_NO_MSG(err == 0);
1451 async_rx->buf = async_rx->usr_buf;
1452 #endif
1453 async_rx->flush_cnt = 0;
1454
1455 /* The 'rx_offset' can be bigger than 'rx_amount', so it the length
1456 * of data we report back the user may need to be clipped.
1457 * This can happen because the 'rx_offset' count derives from RXRDY
1458 * events, which can occur already for the next buffer before we are
1459 * here to handle this buffer. (The next buffer is now already active
1460 * because of the ENDRX_STARTRX shortcut)
1461 */
1462 int rx_len = rx_amount - async_rx->offset;
1463
1464 if (rx_len < 0) {
1465 rx_len = 0;
1466 }
1467
1468 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1469 async_rx->total_user_byte_cnt += rx_len;
1470 #endif
1471
1472 /* Only send the RX_RDY event if there is something to send */
1473 if (rx_len > 0) {
1474 notify_uart_rx_rdy(dev, rx_len);
1475 }
1476
1477 rx_buf_release(dev, async_rx->buf);
1478 async_rx->buf = async_rx->next_buf;
1479 async_rx->buf_len = async_rx->next_buf_len;
1480 #ifdef CONFIG_HAS_NORDIC_DMM
1481 async_rx->usr_buf = async_rx->next_usr_buf;
1482 #endif
1483 async_rx->next_buf = NULL;
1484 async_rx->next_buf_len = 0;
1485 async_rx->offset = 0;
1486
1487 if (async_rx->enabled) {
1488 /* If there is a next buffer, then STARTRX will have already been
1489 * invoked by the short (the next buffer will be filling up already)
1490 * and here we just do the swap of which buffer the driver is following,
1491 * the next rx_timeout() will update the rx_offset.
1492 */
1493 unsigned int key = irq_lock();
1494
1495 if (async_rx->buf) {
1496 /* Check is based on assumption that ISR handler handles
1497 * ENDRX before RXSTARTED so if short was set on time, RXSTARTED
1498 * event will be set.
1499 */
1500 if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
1501 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1502 }
1503 /* Remove the short until the subsequent next buffer is setup */
1504 nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1505 } else {
1506 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
1507 }
1508
1509 irq_unlock(key);
1510 }
1511
1512 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1513 async_rx->is_in_irq = false;
1514 #endif
1515 }
1516
1517 /** @brief RX FIFO flushing
1518 *
1519 * Due to the HW bug which does not update RX.AMOUNT register when FIFO was empty
1520 * a workaround is applied which checks RXSTARTED event. If that event is set it
1521 * means that FIFO was not empty.
1522 *
1523 * @param dev Device.
1524 *
1525 * @return number of bytes flushed from the fifo.
1526 */
rx_flush(const struct device * dev)1527 static uint8_t rx_flush(const struct device *dev)
1528 {
1529 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1530 const struct uarte_nrfx_config *config = dev->config;
1531 uint32_t rx_amount;
1532
1533 nrf_uarte_rx_buffer_set(uarte, config->rx_flush_buf, UARTE_HW_RX_FIFO_SIZE);
1534 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_FLUSHRX);
1535 while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1536 /* empty */
1537 }
1538 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1539
1540 if (!IS_ENABLED(RX_FLUSH_WORKAROUND)) {
1541 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1542 rx_amount = nrf_uarte_rx_amount_get(uarte);
1543 } else if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
1544 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1545 rx_amount = nrf_uarte_rx_amount_get(uarte);
1546 } else {
1547 rx_amount = 0;
1548 }
1549
1550 if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE) &&
1551 rx_amount) {
1552 sys_cache_data_invd_range(config->rx_flush_buf, rx_amount);
1553 }
1554
1555 return rx_amount;
1556 }
1557
1558 /* This handler is called when the receiver is stopped. If rx was aborted
1559 * data from fifo is flushed.
1560 */
rxto_isr(const struct device * dev)1561 static void rxto_isr(const struct device *dev)
1562 {
1563 const struct uarte_nrfx_config *config = dev->config;
1564 struct uarte_nrfx_data *data = dev->data;
1565 struct uarte_async_rx *async_rx = &data->async->rx;
1566
1567 if (async_rx->buf) {
1568 #ifdef CONFIG_HAS_NORDIC_DMM
1569 (void)dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, 0, async_rx->buf);
1570 async_rx->buf = async_rx->usr_buf;
1571 #endif
1572 rx_buf_release(dev, async_rx->buf);
1573 async_rx->buf = NULL;
1574 }
1575
1576 /* This point can be reached in two cases:
1577 * 1. RX is disabled because all provided RX buffers have been filled.
1578 * 2. RX was explicitly disabled by a call to uart_rx_disable().
1579 * In both cases, the rx_enabled flag is cleared, so that RX can be
1580 * enabled again.
1581 * In the second case, additionally, data from the UARTE internal RX
1582 * FIFO need to be discarded.
1583 */
1584 async_rx->enabled = false;
1585 if (async_rx->discard_fifo) {
1586 async_rx->discard_fifo = false;
1587 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1588 if (HW_RX_COUNTING_ENABLED(config)) {
1589 /* It need to be included because TIMER+PPI got RXDRDY events
1590 * and counted those flushed bytes.
1591 */
1592 async_rx->total_user_byte_cnt += rx_flush(dev);
1593 }
1594 #endif
1595 } else if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(config)) {
1596 async_rx->flush_cnt = rx_flush(dev);
1597 }
1598
1599 #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
1600 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1601 #ifdef UARTE_HAS_FRAME_TIMEOUT
1602 nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX);
1603 #endif
1604 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY);
1605 #endif
1606
1607 if (LOW_POWER_ENABLED(config)) {
1608 uint32_t key = irq_lock();
1609
1610 uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX);
1611 irq_unlock(key);
1612 }
1613
1614 notify_rx_disable(dev);
1615 }
1616
txstopped_isr(const struct device * dev)1617 static void txstopped_isr(const struct device *dev)
1618 {
1619 const struct uarte_nrfx_config *config = dev->config;
1620 struct uarte_nrfx_data *data = dev->data;
1621 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1622 unsigned int key;
1623
1624 key = irq_lock();
1625
1626 size_t amount = (data->async->tx.amount >= 0) ?
1627 data->async->tx.amount : nrf_uarte_tx_amount_get(uarte);
1628
1629 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1630 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1631 if (data->flags & UARTE_FLAG_POLL_OUT) {
1632 pm_device_runtime_put_async(dev, K_NO_WAIT);
1633 data->flags &= ~UARTE_FLAG_POLL_OUT;
1634 }
1635 } else if (LOW_POWER_ENABLED(config)) {
1636 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1637 uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_TX);
1638 }
1639
1640 irq_unlock(key);
1641
1642 if (!data->async->tx.buf) {
1643 return;
1644 }
1645
1646 /* If there is a pending tx request, it means that uart_tx()
1647 * was called when there was ongoing uart_poll_out. Handling
1648 * TXSTOPPED interrupt means that uart_poll_out has completed.
1649 */
1650 if (data->async->tx.pending) {
1651 key = irq_lock();
1652 start_tx_locked(dev, data);
1653 irq_unlock(key);
1654 return;
1655 }
1656
1657 /* Cache buffer is used because tx_buf wasn't in RAM. */
1658 if (data->async->tx.buf != data->async->tx.xfer_buf) {
1659 /* In that case setup next chunk. If that was the last chunk
1660 * fall back to reporting TX_DONE.
1661 */
1662 if (amount == data->async->tx.xfer_len) {
1663 data->async->tx.cache_offset += amount;
1664 if (setup_tx_cache(dev)) {
1665 key = irq_lock();
1666 start_tx_locked(dev, data);
1667 irq_unlock(key);
1668 return;
1669 }
1670
1671 /* Amount is already included in cache_offset. */
1672 amount = data->async->tx.cache_offset;
1673 } else {
1674 /* TX was aborted, include cache_offset in amount. */
1675 amount += data->async->tx.cache_offset;
1676 }
1677 }
1678
1679 k_timer_stop(&data->async->tx.timer);
1680
1681 struct uart_event evt = {
1682 .data.tx.buf = data->async->tx.buf,
1683 .data.tx.len = amount,
1684 };
1685 if (amount == data->async->tx.len) {
1686 evt.type = UART_TX_DONE;
1687 } else {
1688 evt.type = UART_TX_ABORTED;
1689 }
1690
1691 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1692 data->async->tx.buf = NULL;
1693 data->async->tx.len = 0;
1694
1695 user_callback(dev, &evt);
1696
1697 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1698 pm_device_runtime_put_async(dev, K_NO_WAIT);
1699 }
1700 }
1701
rxdrdy_isr(const struct device * dev)1702 static void rxdrdy_isr(const struct device *dev)
1703 {
1704 #if !defined(UARTE_HAS_FRAME_TIMEOUT)
1705 struct uarte_nrfx_data *data = dev->data;
1706
1707 #if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
1708 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1709
1710 data->async->rx.idle_cnt = 0;
1711 k_timer_start(&data->async->rx.timer, data->async->rx.timeout, K_NO_WAIT);
1712 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_RXDRDY_MASK);
1713 #else
1714 data->async->rx.cnt.cnt++;
1715 #endif
1716 #endif /* !UARTE_HAS_FRAME_TIMEOUT */
1717 }
1718
event_check_clear(NRF_UARTE_Type * uarte,nrf_uarte_event_t event,uint32_t int_mask,uint32_t int_en_mask)1719 static bool event_check_clear(NRF_UARTE_Type *uarte, nrf_uarte_event_t event,
1720 uint32_t int_mask, uint32_t int_en_mask)
1721 {
1722 if (nrf_uarte_event_check(uarte, event) && (int_mask & int_en_mask)) {
1723 nrf_uarte_event_clear(uarte, event);
1724 return true;
1725 }
1726
1727 return false;
1728 }
1729
uarte_nrfx_isr_async(const void * arg)1730 static void uarte_nrfx_isr_async(const void *arg)
1731 {
1732 const struct device *dev = arg;
1733 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1734 const struct uarte_nrfx_config *config = dev->config;
1735 struct uarte_nrfx_data *data = dev->data;
1736 struct uarte_async_rx *async_rx = &data->async->rx;
1737 uint32_t imask = nrf_uarte_int_enable_check(uarte, UINT32_MAX);
1738
1739 if (!(HW_RX_COUNTING_ENABLED(config) || IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT))
1740 && event_check_clear(uarte, NRF_UARTE_EVENT_RXDRDY, NRF_UARTE_INT_RXDRDY_MASK, imask)) {
1741 rxdrdy_isr(dev);
1742
1743 }
1744
1745 if (event_check_clear(uarte, NRF_UARTE_EVENT_ERROR, NRF_UARTE_INT_ERROR_MASK, imask)) {
1746 error_isr(dev);
1747 }
1748
1749 if (event_check_clear(uarte, NRF_UARTE_EVENT_ENDRX, NRF_UARTE_INT_ENDRX_MASK, imask)) {
1750 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1751 endrx_isr(dev);
1752 }
1753
1754 /* RXSTARTED must be handled after ENDRX because it starts the RX timeout
1755 * and if order is swapped then ENDRX will stop this timeout.
1756 * Skip if ENDRX is set when RXSTARTED is set. It means that
1757 * ENDRX occurred after check for ENDRX in isr which may happen when
1758 * UARTE interrupt got preempted. Events are not cleared
1759 * and isr will be called again. ENDRX will be handled first.
1760 */
1761 if ((imask & NRF_UARTE_INT_RXSTARTED_MASK) &&
1762 nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED) &&
1763 !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1764 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
1765 rxstarted_isr(dev);
1766 }
1767
1768 /* RXTO must be handled after ENDRX which should notify the buffer.
1769 * Skip if ENDRX is set when RXTO is set. It means that
1770 * ENDRX occurred after check for ENDRX in isr which may happen when
1771 * UARTE interrupt got preempted. Events are not cleared
1772 * and isr will be called again. ENDRX will be handled first.
1773 */
1774 if ((imask & NRF_UARTE_INT_RXTO_MASK) &&
1775 nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) &&
1776 !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1777 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
1778 rxto_isr(dev);
1779 }
1780
1781 if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT) &&
1782 (imask & NRF_UARTE_INT_ENDTX_MASK) &&
1783 nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
1784 endtx_isr(dev);
1785 }
1786
1787 if ((imask & NRF_UARTE_INT_TXSTOPPED_MASK) &&
1788 nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
1789 txstopped_isr(dev);
1790 }
1791
1792 if (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO) {
1793 #ifdef CONFIG_HAS_NORDIC_DMM
1794 int ret;
1795
1796 ret = dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, async_rx->buf_len,
1797 async_rx->buf);
1798
1799 (void)ret;
1800 __ASSERT_NO_MSG(ret == 0);
1801 async_rx->buf = async_rx->usr_buf;
1802 #endif
1803 notify_uart_rx_rdy(dev, async_rx->buf_len);
1804 rx_buf_release(dev, async_rx->buf);
1805 async_rx->buf_len = 0;
1806 async_rx->buf = NULL;
1807 notify_rx_disable(dev);
1808 }
1809 }
1810
1811 #endif /* UARTE_ANY_ASYNC */
1812
1813 /**
1814 * @brief Poll the device for input.
1815 *
1816 * @param dev UARTE device struct
1817 * @param c Pointer to character
1818 *
1819 * @return 0 if a character arrived, -1 if the input buffer is empty.
1820 */
uarte_nrfx_poll_in(const struct device * dev,unsigned char * c)1821 static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c)
1822 {
1823 const struct uarte_nrfx_config *config = dev->config;
1824 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1825
1826 #ifdef UARTE_ANY_ASYNC
1827 struct uarte_nrfx_data *data = dev->data;
1828
1829 if (data->async) {
1830 return -ENOTSUP;
1831 }
1832 #endif
1833
1834 if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1835 return -1;
1836 }
1837
1838 if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1839 sys_cache_data_invd_range(config->poll_in_byte, 1);
1840 }
1841
1842 *c = *config->poll_in_byte;
1843
1844 /* clear the interrupt */
1845 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1846 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1847
1848 return 0;
1849 }
1850
1851 /**
1852 * @brief Output a character in polled mode.
1853 *
1854 * @param dev UARTE device struct
1855 * @param c Character to send
1856 */
uarte_nrfx_poll_out(const struct device * dev,unsigned char c)1857 static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c)
1858 {
1859 const struct uarte_nrfx_config *config = dev->config;
1860 bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
1861 struct uarte_nrfx_data *data = dev->data;
1862 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1863 unsigned int key;
1864
1865 if (isr_mode) {
1866 while (1) {
1867 key = irq_lock();
1868 if (is_tx_ready(dev)) {
1869 #if UARTE_ANY_ASYNC
1870 if (data->async && data->async->tx.len &&
1871 data->async->tx.amount < 0) {
1872 data->async->tx.amount = nrf_uarte_tx_amount_get(uarte);
1873 }
1874 #endif
1875 break;
1876 }
1877
1878 irq_unlock(key);
1879 Z_SPIN_DELAY(3);
1880 }
1881 } else {
1882 key = wait_tx_ready(dev);
1883 }
1884
1885 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1886 if (!IS_PM_ISR_SAFE(dev) && k_is_in_isr()) {
1887 /* If instance does not support PM from ISR device shall
1888 * already be turned on.
1889 */
1890 enum pm_device_state state;
1891 int err;
1892
1893 err = pm_device_state_get(dev, &state);
1894 (void)err;
1895 __ASSERT_NO_MSG(err == 0);
1896 if (state != PM_DEVICE_STATE_ACTIVE) {
1897 irq_unlock(key);
1898 return;
1899 }
1900 }
1901
1902 if (!(data->flags & UARTE_FLAG_POLL_OUT)) {
1903 data->flags |= UARTE_FLAG_POLL_OUT;
1904 pm_device_runtime_get(dev);
1905 }
1906 }
1907
1908 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(config)) {
1909 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1910 }
1911
1912 *config->poll_out_byte = c;
1913 tx_start(dev, config->poll_out_byte, 1);
1914
1915 irq_unlock(key);
1916 }
1917
1918
1919 #ifdef UARTE_INTERRUPT_DRIVEN
1920 /** Interrupt driven FIFO fill function */
uarte_nrfx_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)1921 static int uarte_nrfx_fifo_fill(const struct device *dev,
1922 const uint8_t *tx_data,
1923 int len)
1924 {
1925 struct uarte_nrfx_data *data = dev->data;
1926
1927 len = MIN(len, data->int_driven->tx_buff_size);
1928 if (!atomic_cas(&data->int_driven->fifo_fill_lock, 0, 1)) {
1929 return 0;
1930 }
1931
1932 /* Copy data to RAM buffer for EasyDMA transfer */
1933 memcpy(data->int_driven->tx_buffer, tx_data, len);
1934
1935 unsigned int key = irq_lock();
1936
1937 if (!is_tx_ready(dev)) {
1938 data->int_driven->fifo_fill_lock = 0;
1939 len = 0;
1940 } else {
1941 tx_start(dev, data->int_driven->tx_buffer, len);
1942 }
1943
1944 irq_unlock(key);
1945
1946 return len;
1947 }
1948
1949 /** Interrupt driven FIFO read function */
uarte_nrfx_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)1950 static int uarte_nrfx_fifo_read(const struct device *dev,
1951 uint8_t *rx_data,
1952 const int size)
1953 {
1954 int num_rx = 0;
1955 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1956 const struct uarte_nrfx_config *config = dev->config;
1957
1958 if (size > 0 && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
1959 /* Clear the interrupt */
1960 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
1961
1962 if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1963 sys_cache_data_invd_range(config->poll_in_byte, 1);
1964 }
1965
1966 /* Receive a character */
1967 rx_data[num_rx++] = *config->poll_in_byte;
1968
1969 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
1970 }
1971
1972 return num_rx;
1973 }
1974
1975 /** Interrupt driven transfer enabling function */
uarte_nrfx_irq_tx_enable(const struct device * dev)1976 static void uarte_nrfx_irq_tx_enable(const struct device *dev)
1977 {
1978 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1979 struct uarte_nrfx_data *data = dev->data;
1980
1981 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1982 pm_device_runtime_get(dev);
1983 }
1984
1985 unsigned int key = irq_lock();
1986
1987 data->int_driven->disable_tx_irq = false;
1988 data->int_driven->tx_irq_enabled = true;
1989 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1990
1991 irq_unlock(key);
1992 }
1993
1994 /** Interrupt driven transfer disabling function */
uarte_nrfx_irq_tx_disable(const struct device * dev)1995 static void uarte_nrfx_irq_tx_disable(const struct device *dev)
1996 {
1997 struct uarte_nrfx_data *data = dev->data;
1998 /* TX IRQ will be disabled after current transmission is finished */
1999 data->int_driven->disable_tx_irq = true;
2000 data->int_driven->tx_irq_enabled = false;
2001 }
2002
2003 /** Interrupt driven transfer ready function */
uarte_nrfx_irq_tx_ready_complete(const struct device * dev)2004 static int uarte_nrfx_irq_tx_ready_complete(const struct device *dev)
2005 {
2006 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2007 struct uarte_nrfx_data *data = dev->data;
2008
2009 /* ENDTX flag is always on so that ISR is called when we enable TX IRQ.
2010 * Because of that we have to explicitly check if ENDTX interrupt is
2011 * enabled, otherwise this function would always return true no matter
2012 * what would be the source of interrupt.
2013 */
2014 bool ready = data->int_driven->tx_irq_enabled &&
2015 nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED);
2016
2017 if (ready) {
2018 data->int_driven->fifo_fill_lock = 0;
2019 }
2020
2021 return ready ? data->int_driven->tx_buff_size : 0;
2022 }
2023
uarte_nrfx_irq_rx_ready(const struct device * dev)2024 static int uarte_nrfx_irq_rx_ready(const struct device *dev)
2025 {
2026 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2027
2028 return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX);
2029 }
2030
2031 /** Interrupt driven receiver enabling function */
uarte_nrfx_irq_rx_enable(const struct device * dev)2032 static void uarte_nrfx_irq_rx_enable(const struct device *dev)
2033 {
2034 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2035
2036 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK);
2037 }
2038
2039 /** Interrupt driven receiver disabling function */
uarte_nrfx_irq_rx_disable(const struct device * dev)2040 static void uarte_nrfx_irq_rx_disable(const struct device *dev)
2041 {
2042 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2043
2044 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK);
2045 }
2046
2047 /** Interrupt driven error enabling function */
uarte_nrfx_irq_err_enable(const struct device * dev)2048 static void uarte_nrfx_irq_err_enable(const struct device *dev)
2049 {
2050 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2051
2052 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ERROR_MASK);
2053 }
2054
2055 /** Interrupt driven error disabling function */
uarte_nrfx_irq_err_disable(const struct device * dev)2056 static void uarte_nrfx_irq_err_disable(const struct device *dev)
2057 {
2058 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2059
2060 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ERROR_MASK);
2061 }
2062
2063 /** Interrupt driven pending status function */
uarte_nrfx_irq_is_pending(const struct device * dev)2064 static int uarte_nrfx_irq_is_pending(const struct device *dev)
2065 {
2066 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2067
2068 return ((nrf_uarte_int_enable_check(uarte,
2069 NRF_UARTE_INT_TXSTOPPED_MASK) &&
2070 uarte_nrfx_irq_tx_ready_complete(dev))
2071 ||
2072 (nrf_uarte_int_enable_check(uarte,
2073 NRF_UARTE_INT_ENDRX_MASK) &&
2074 uarte_nrfx_irq_rx_ready(dev)));
2075 }
2076
2077 /** Interrupt driven interrupt update function */
uarte_nrfx_irq_update(const struct device * dev)2078 static int uarte_nrfx_irq_update(const struct device *dev)
2079 {
2080 return 1;
2081 }
2082
2083 /** Set the callback function */
uarte_nrfx_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)2084 static void uarte_nrfx_irq_callback_set(const struct device *dev,
2085 uart_irq_callback_user_data_t cb,
2086 void *cb_data)
2087 {
2088 struct uarte_nrfx_data *data = dev->data;
2089
2090 data->int_driven->cb = cb;
2091 data->int_driven->cb_data = cb_data;
2092 }
2093 #endif /* UARTE_INTERRUPT_DRIVEN */
2094
2095 static DEVICE_API(uart, uart_nrfx_uarte_driver_api) = {
2096 .poll_in = uarte_nrfx_poll_in,
2097 .poll_out = uarte_nrfx_poll_out,
2098 .err_check = uarte_nrfx_err_check,
2099 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
2100 .configure = uarte_nrfx_configure,
2101 .config_get = uarte_nrfx_config_get,
2102 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
2103 #ifdef UARTE_ANY_ASYNC
2104 .callback_set = uarte_nrfx_callback_set,
2105 .tx = uarte_nrfx_tx,
2106 .tx_abort = uarte_nrfx_tx_abort,
2107 .rx_enable = uarte_nrfx_rx_enable,
2108 .rx_buf_rsp = uarte_nrfx_rx_buf_rsp,
2109 .rx_disable = uarte_nrfx_rx_disable,
2110 #endif /* UARTE_ANY_ASYNC */
2111 #ifdef UARTE_INTERRUPT_DRIVEN
2112 .fifo_fill = uarte_nrfx_fifo_fill,
2113 .fifo_read = uarte_nrfx_fifo_read,
2114 .irq_tx_enable = uarte_nrfx_irq_tx_enable,
2115 .irq_tx_disable = uarte_nrfx_irq_tx_disable,
2116 .irq_tx_ready = uarte_nrfx_irq_tx_ready_complete,
2117 .irq_rx_enable = uarte_nrfx_irq_rx_enable,
2118 .irq_rx_disable = uarte_nrfx_irq_rx_disable,
2119 .irq_tx_complete = uarte_nrfx_irq_tx_ready_complete,
2120 .irq_rx_ready = uarte_nrfx_irq_rx_ready,
2121 .irq_err_enable = uarte_nrfx_irq_err_enable,
2122 .irq_err_disable = uarte_nrfx_irq_err_disable,
2123 .irq_is_pending = uarte_nrfx_irq_is_pending,
2124 .irq_update = uarte_nrfx_irq_update,
2125 .irq_callback_set = uarte_nrfx_irq_callback_set,
2126 #endif /* UARTE_INTERRUPT_DRIVEN */
2127 };
2128
2129 #ifdef UARTE_ENHANCED_POLL_OUT
endtx_stoptx_ppi_init(NRF_UARTE_Type * uarte,struct uarte_nrfx_data * data)2130 static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte,
2131 struct uarte_nrfx_data *data)
2132 {
2133 nrfx_err_t ret;
2134
2135 ret = nrfx_gppi_channel_alloc(&data->ppi_ch_endtx);
2136 if (ret != NRFX_SUCCESS) {
2137 LOG_ERR("Failed to allocate PPI Channel");
2138 return -EIO;
2139 }
2140
2141 nrfx_gppi_channel_endpoints_setup(data->ppi_ch_endtx,
2142 nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_ENDTX),
2143 nrf_uarte_task_address_get(uarte, NRF_UARTE_TASK_STOPTX));
2144 nrfx_gppi_channels_enable(BIT(data->ppi_ch_endtx));
2145
2146 return 0;
2147 }
2148 #endif /* UARTE_ENHANCED_POLL_OUT */
2149
2150 /** @brief Pend until TX is stopped.
2151 *
2152 * There are 2 configurations that must be handled:
2153 * - ENDTX->TXSTOPPED PPI enabled - just pend until TXSTOPPED event is set
2154 * - disable ENDTX interrupt and manually trigger STOPTX, pend for TXSTOPPED
2155 */
wait_for_tx_stopped(const struct device * dev)2156 static void wait_for_tx_stopped(const struct device *dev)
2157 {
2158 const struct uarte_nrfx_config *config = dev->config;
2159 bool ppi_endtx = (config->flags & UARTE_CFG_FLAG_PPI_ENDTX) ||
2160 IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT);
2161 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2162 bool res;
2163
2164 if (!ppi_endtx) {
2165 /* We assume here that it can be called from any context,
2166 * including the one that uarte interrupt will not preempt.
2167 * Disable endtx interrupt to ensure that it will not be triggered
2168 * (if in lower priority context) and stop TX if necessary.
2169 */
2170 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDTX_MASK);
2171 NRFX_WAIT_FOR(is_tx_ready(dev), 1000, 1, res);
2172 if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
2173 if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) {
2174 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
2175 }
2176 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
2177 }
2178 }
2179
2180 NRFX_WAIT_FOR(nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED),
2181 1000, 1, res);
2182
2183 if (!ppi_endtx) {
2184 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK);
2185 }
2186 }
2187
uarte_pm_resume(const struct device * dev)2188 static void uarte_pm_resume(const struct device *dev)
2189 {
2190 const struct uarte_nrfx_config *cfg = dev->config;
2191
2192 (void)pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
2193
2194 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || !LOW_POWER_ENABLED(cfg)) {
2195 uarte_periph_enable(dev);
2196 }
2197 }
2198
uarte_pm_suspend(const struct device * dev)2199 static void uarte_pm_suspend(const struct device *dev)
2200 {
2201 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2202 const struct uarte_nrfx_config *cfg = dev->config;
2203 struct uarte_nrfx_data *data = dev->data;
2204
2205 (void)data;
2206 #ifdef UARTE_ANY_ASYNC
2207 if (data->async) {
2208 /* Entering inactive state requires device to be no
2209 * active asynchronous calls.
2210 */
2211 __ASSERT_NO_MSG(!data->async->rx.enabled);
2212 __ASSERT_NO_MSG(!data->async->tx.len);
2213 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
2214 /* If runtime PM is enabled then reference counting ensures that
2215 * suspend will not occur when TX is active.
2216 */
2217 __ASSERT_NO_MSG(nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED));
2218 } else {
2219 wait_for_tx_stopped(dev);
2220 }
2221
2222 #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX)
2223 if (data->async && HW_RX_COUNTING_ENABLED(cfg)) {
2224 nrfx_timer_disable(&cfg->timer);
2225 /* Timer/counter value is reset when disabled. */
2226 data->async->rx.total_byte_cnt = 0;
2227 data->async->rx.total_user_byte_cnt = 0;
2228 }
2229 #endif
2230 } else if (IS_ENABLED(UARTE_ANY_NONE_ASYNC))
2231 #endif
2232 {
2233 if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
2234 #if defined(UARTE_INTERRUPT_DRIVEN) && defined(CONFIG_PM_DEVICE)
2235 if (data->int_driven) {
2236 data->int_driven->rx_irq_enabled =
2237 nrf_uarte_int_enable_check(uarte,
2238 NRF_UARTE_INT_ENDRX_MASK);
2239 if (data->int_driven->rx_irq_enabled) {
2240 nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK);
2241 }
2242 }
2243 #endif
2244 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
2245 while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO)) {
2246 /* Busy wait for event to register */
2247 Z_SPIN_DELAY(2);
2248 }
2249 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
2250 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
2251 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
2252 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR);
2253 }
2254
2255 wait_for_tx_stopped(dev);
2256 }
2257
2258 #ifdef CONFIG_SOC_NRF54H20_GPD
2259 nrf_gpd_retain_pins_set(cfg->pcfg, true);
2260 #endif
2261
2262 nrf_uarte_disable(uarte);
2263
2264 (void)pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP);
2265 }
2266
uarte_nrfx_pm_action(const struct device * dev,enum pm_device_action action)2267 static int uarte_nrfx_pm_action(const struct device *dev, enum pm_device_action action)
2268 {
2269 if (action == PM_DEVICE_ACTION_RESUME) {
2270 uarte_pm_resume(dev);
2271 } else if (IS_ENABLED(CONFIG_PM_DEVICE) && (action == PM_DEVICE_ACTION_SUSPEND)) {
2272 uarte_pm_suspend(dev);
2273 } else {
2274 return -ENOTSUP;
2275 }
2276
2277 return 0;
2278 }
2279
uarte_tx_path_init(const struct device * dev)2280 static int uarte_tx_path_init(const struct device *dev)
2281 {
2282 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2283 const struct uarte_nrfx_config *cfg = dev->config;
2284 bool auto_endtx = false;
2285
2286 #ifdef UARTE_HAS_ENDTX_STOPTX_SHORT
2287 nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDTX_STOPTX);
2288 auto_endtx = true;
2289 #elif defined(UARTE_ENHANCED_POLL_OUT)
2290 if (cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) {
2291 struct uarte_nrfx_data *data = dev->data;
2292 int err;
2293
2294 err = endtx_stoptx_ppi_init(uarte, data);
2295 if (err < 0) {
2296 return err;
2297 }
2298 auto_endtx = true;
2299 }
2300 #endif
2301
2302 /* Get to the point where TXSTOPPED event is set but TXSTOPPED interrupt is
2303 * disabled. This trick is later on used to handle TX path and determine
2304 * using HW if TX is active (TXSTOPPED event set means TX is inactive).
2305 *
2306 * Set TXSTOPPED event by requesting fake (zero-length) transfer.
2307 * Pointer to RAM variable is set because otherwise such operation may
2308 * result in HardFault or RAM corruption.
2309 */
2310 nrf_uarte_enable(uarte);
2311 nrf_uarte_tx_buffer_set(uarte, cfg->poll_out_byte, 0);
2312 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
2313 if (!auto_endtx) {
2314 while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
2315 }
2316 nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
2317 nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
2318 nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK);
2319 }
2320 while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
2321 }
2322 nrf_uarte_disable(uarte);
2323
2324 return 0;
2325 }
2326
uarte_instance_init(const struct device * dev,uint8_t interrupts_active)2327 static int uarte_instance_init(const struct device *dev,
2328 uint8_t interrupts_active)
2329 {
2330 int err;
2331 const struct uarte_nrfx_config *cfg = dev->config;
2332
2333 if (IS_ENABLED(CONFIG_ARCH_POSIX)) {
2334 /* For simulation the DT provided peripheral address needs to be corrected */
2335 ((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)cfg->uarte_regs;
2336 }
2337
2338 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
2339 err = uarte_nrfx_configure(dev, &((struct uarte_nrfx_data *)dev->data)->uart_config);
2340 if (err) {
2341 return err;
2342 }
2343 #else
2344 NRF_UARTE_Type *uarte = get_uarte_instance(dev);
2345
2346 nrf_uarte_baudrate_set(uarte, cfg->nrf_baudrate);
2347 nrf_uarte_configure(uarte, &cfg->hw_config);
2348 #endif
2349
2350 #ifdef UARTE_ANY_ASYNC
2351 struct uarte_nrfx_data *data = dev->data;
2352
2353 if (data->async) {
2354 err = uarte_async_init(dev);
2355 if (err < 0) {
2356 return err;
2357 }
2358 }
2359 #endif
2360
2361 err = uarte_tx_path_init(dev);
2362 if (err) {
2363 return err;
2364 }
2365
2366 return pm_device_driver_init(dev, uarte_nrfx_pm_action);
2367 }
2368
2369 #define UARTE_IRQ_CONFIGURE(idx, isr_handler) \
2370 do { \
2371 IRQ_CONNECT(DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority), \
2372 isr_handler, DEVICE_DT_GET(UARTE(idx)), 0); \
2373 irq_enable(DT_IRQN(UARTE(idx))); \
2374 } while (false)
2375
2376 /* Low power mode is used when disable_rx is not defined or in async mode if
2377 * kconfig option is enabled.
2378 */
2379 #define USE_LOW_POWER(idx) \
2380 COND_CODE_1(CONFIG_PM_DEVICE, (0), \
2381 (((!UARTE_PROP(idx, disable_rx) && \
2382 COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \
2383 (!IS_ENABLED(CONFIG_UART_##idx##_NRF_ASYNC_LOW_POWER)),\
2384 (1))) ? 0 : UARTE_CFG_FLAG_LOW_POWER)))
2385
2386 #define UARTE_DISABLE_RX_INIT(node_id) \
2387 .disable_rx = DT_PROP(node_id, disable_rx)
2388
2389 #define UARTE_GET_FREQ(idx) DT_PROP(DT_CLOCKS_CTLR(UARTE(idx)), clock_frequency)
2390
2391 #define UARTE_GET_BAUDRATE_DIV(idx) \
2392 COND_CODE_1(DT_CLOCKS_HAS_IDX(UARTE(idx), 0), \
2393 ((UARTE_GET_FREQ(idx) / NRF_UARTE_BASE_FREQUENCY_16MHZ)), (1))
2394
2395 /* When calculating baudrate we need to take into account that high speed instances
2396 * must have baudrate adjust to the ratio between UARTE clocking frequency and 16 MHz.
2397 */
2398 #define UARTE_GET_BAUDRATE(idx) \
2399 (NRF_BAUDRATE(UARTE_PROP(idx, current_speed)) / UARTE_GET_BAUDRATE_DIV(idx))
2400
2401
2402 /* Macro for setting nRF specific configuration structures. */
2403 #define UARTE_NRF_CONFIG(idx) { \
2404 .hwfc = (UARTE_PROP(idx, hw_flow_control) == \
2405 UART_CFG_FLOW_CTRL_RTS_CTS) ? \
2406 NRF_UARTE_HWFC_ENABLED : NRF_UARTE_HWFC_DISABLED, \
2407 .parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) ? \
2408 NRF_UARTE_PARITY_INCLUDED : NRF_UARTE_PARITY_EXCLUDED, \
2409 IF_ENABLED(UARTE_HAS_STOP_CONFIG, (.stop = NRF_UARTE_STOP_ONE,))\
2410 IF_ENABLED(UARTE_ODD_PARITY_ALLOWED, \
2411 (.paritytype = NRF_UARTE_PARITYTYPE_EVEN,)) \
2412 IF_ENABLED(UARTE_HAS_FRAME_TIMEOUT, \
2413 (.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_EN,)) \
2414 }
2415
2416 /* Macro for setting zephyr specific configuration structures. */
2417 #define UARTE_CONFIG(idx) { \
2418 .baudrate = UARTE_PROP(idx, current_speed), \
2419 .data_bits = UART_CFG_DATA_BITS_8, \
2420 .stop_bits = UART_CFG_STOP_BITS_1, \
2421 .parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) \
2422 ? UART_CFG_PARITY_EVEN \
2423 : UART_CFG_PARITY_NONE, \
2424 .flow_ctrl = UARTE_PROP(idx, hw_flow_control) \
2425 ? UART_CFG_FLOW_CTRL_RTS_CTS \
2426 : UART_CFG_FLOW_CTRL_NONE, \
2427 }
2428
2429 #define UART_NRF_UARTE_DEVICE(idx) \
2430 NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(UARTE(idx)); \
2431 UARTE_INT_DRIVEN(idx); \
2432 PINCTRL_DT_DEFINE(UARTE(idx)); \
2433 IF_ENABLED(CONFIG_UART_##idx##_ASYNC, ( \
2434 static uint8_t \
2435 uarte##idx##_tx_cache[CONFIG_UART_ASYNC_TX_CACHE_SIZE] \
2436 DMM_MEMORY_SECTION(UARTE(idx)); \
2437 static uint8_t uarte##idx##_flush_buf[UARTE_HW_RX_FIFO_SIZE] \
2438 DMM_MEMORY_SECTION(UARTE(idx)); \
2439 struct uarte_async_cb uarte##idx##_async;)) \
2440 static uint8_t uarte##idx##_poll_out_byte DMM_MEMORY_SECTION(UARTE(idx));\
2441 static uint8_t uarte##idx##_poll_in_byte DMM_MEMORY_SECTION(UARTE(idx)); \
2442 static struct uarte_nrfx_data uarte_##idx##_data = { \
2443 IF_ENABLED(CONFIG_UART_USE_RUNTIME_CONFIGURE, \
2444 (.uart_config = UARTE_CONFIG(idx),)) \
2445 IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \
2446 (.async = &uarte##idx##_async,)) \
2447 IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \
2448 (.int_driven = &uarte##idx##_int_driven,)) \
2449 }; \
2450 COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, (), \
2451 (BUILD_ASSERT(NRF_BAUDRATE(UARTE_PROP(idx, current_speed)) > 0,\
2452 "Unsupported baudrate");)) \
2453 static const struct uarte_nrfx_config uarte_##idx##z_config = { \
2454 COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, \
2455 (IF_ENABLED(DT_CLOCKS_HAS_IDX(UARTE(idx), 0), \
2456 (.clock_freq = UARTE_GET_FREQ(idx),))), \
2457 (IF_ENABLED(UARTE_HAS_FRAME_TIMEOUT, \
2458 (.baudrate = UARTE_PROP(idx, current_speed),)) \
2459 .nrf_baudrate = UARTE_GET_BAUDRATE(idx), \
2460 .hw_config = UARTE_NRF_CONFIG(idx),)) \
2461 .pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)), \
2462 .uarte_regs = _CONCAT(NRF_UARTE, idx), \
2463 IF_ENABLED(CONFIG_HAS_NORDIC_DMM, \
2464 (.mem_reg = DMM_DEV_TO_REG(UARTE(idx)),)) \
2465 .flags = \
2466 (IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT) ? \
2467 UARTE_CFG_FLAG_PPI_ENDTX : 0) | \
2468 (IS_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC) ? \
2469 UARTE_CFG_FLAG_HW_BYTE_COUNTING : 0) | \
2470 (!IS_ENABLED(CONFIG_HAS_NORDIC_DMM) ? 0 : \
2471 (UARTE_IS_CACHEABLE(idx) ? \
2472 UARTE_CFG_FLAG_CACHEABLE : 0)) | \
2473 USE_LOW_POWER(idx), \
2474 UARTE_DISABLE_RX_INIT(UARTE(idx)), \
2475 .poll_out_byte = &uarte##idx##_poll_out_byte, \
2476 .poll_in_byte = &uarte##idx##_poll_in_byte, \
2477 IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \
2478 (.tx_cache = uarte##idx##_tx_cache, \
2479 .rx_flush_buf = uarte##idx##_flush_buf,)) \
2480 IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \
2481 (.timer = NRFX_TIMER_INSTANCE( \
2482 CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \
2483 }; \
2484 static int uarte_##idx##_init(const struct device *dev) \
2485 { \
2486 COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \
2487 (UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_async);), \
2488 (UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_int);)) \
2489 return uarte_instance_init( \
2490 dev, \
2491 IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN)); \
2492 } \
2493 \
2494 PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action, \
2495 COND_CODE_1(INSTANCE_IS_FAST(_, /*empty*/, idx, _),\
2496 (0), (PM_DEVICE_ISR_SAFE))); \
2497 \
2498 DEVICE_DT_DEFINE(UARTE(idx), \
2499 uarte_##idx##_init, \
2500 PM_DEVICE_DT_GET(UARTE(idx)), \
2501 &uarte_##idx##_data, \
2502 &uarte_##idx##z_config, \
2503 PRE_KERNEL_1, \
2504 CONFIG_SERIAL_INIT_PRIORITY, \
2505 &uart_nrfx_uarte_driver_api)
2506
2507 #define UARTE_INT_DRIVEN(idx) \
2508 IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \
2509 (static uint8_t uarte##idx##_tx_buffer \
2510 [MIN(CONFIG_UART_##idx##_NRF_TX_BUFFER_SIZE, \
2511 BIT_MASK(UARTE##idx##_EASYDMA_MAXCNT_SIZE))] \
2512 DMM_MEMORY_SECTION(UARTE(idx)); \
2513 static struct uarte_nrfx_int_driven \
2514 uarte##idx##_int_driven = { \
2515 .tx_buffer = uarte##idx##_tx_buffer, \
2516 .tx_buff_size = sizeof(uarte##idx##_tx_buffer),\
2517 };))
2518
2519 #define COND_UART_NRF_UARTE_DEVICE(unused, prefix, i, _) \
2520 IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i, (UART_NRF_UARTE_DEVICE(prefix##i);))
2521
2522 UARTE_FOR_EACH_INSTANCE(COND_UART_NRF_UARTE_DEVICE, (), ())
2523