1 /*
2  * Copyright (c) 2015 - 2025, Nordic Semiconductor ASA
3  * All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice, this
11  *    list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the copyright holder nor the names of its
18  *    contributors may be used to endorse or promote products derived from this
19  *    software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <nrfx.h>
35 
36 #if NRFX_CHECK(NRFX_UARTE_ENABLED)
37 
38 #if !NRFX_FEATURE_PRESENT(NRFX_UARTE, _ENABLED)
39 #error "No enabled UARTE instances. Check <nrfx_config.h>."
40 #endif
41 
42 #include <nrfx_uarte.h>
43 #include "prs/nrfx_prs.h"
44 #include <haly/nrfy_gpio.h>
45 #include <string.h>
46 #include <soc/nrfx_coredep.h>
47 
48 #define NRFX_LOG_MODULE UARTE
49 #include <nrfx_log.h>
50 
51 #define UARTEX_LENGTH_VALIDATE(periph_name, prefix, i, drv_inst_idx, len1, len2) \
52     (((drv_inst_idx) == NRFX_CONCAT(NRFX_, periph_name, prefix, i, _INST_IDX)) && \
53      NRFX_EASYDMA_LENGTH_VALIDATE(NRFX_CONCAT(periph_name, prefix, i), len1, len2))
54 
55 #define UARTE_LENGTH_VALIDATE(drv_inst_idx, len)    \
56         (NRFX_FOREACH_ENABLED(UARTE, UARTEX_LENGTH_VALIDATE, (||), (0), drv_inst_idx, len, 0))
57 
58 #if NRFX_CHECK(NRFX_UARTE_CONFIG_RX_CACHE_ENABLED)
59 // Internal cache buffer is used if buffers provided by a user cannot be used in DMA. This is a
60 // HW limitation on some platforms but for testing purposes it can be emulated on any platform.
61 #define RX_CACHE_SUPPORTED 1
62 #else
63 #define RX_CACHE_SUPPORTED 0
64 #endif
65 
66 #define MIN_RX_CACHE_SIZE 8
67 // There is a HW bug which results in RX amount value not being updated when FIFO was empty.
68 // It is then hard to determine if FIFO contained anything or not.
69 #define USE_WORKAROUND_FOR_FLUSHRX_ANOMALY 1
70 
71 // Size of the RX HW FIFO
72 #define UARTE_HW_RX_FIFO_SIZE 5
73 
74 // Set of flags which if needed are atomically controlled. Flags maintain current state and
75 // configuration.
76 #define UARTE_FLAG_TX_CNT      12 // Max amount of TX flags
77 #define UARTE_FLAG_RX_CNT      12 // Max amount of RX flags
78 #define UARTE_FLAG_MISC_CNT    8  // Max amount of miscellaneous flags
79 
80 #define UARTE_FLAG_TX_OFFSET   0
81 #define UARTE_FLAG_RX_OFFSET   UARTE_FLAG_TX_CNT
82 #define UARTE_FLAG_MISC_OFFSET (UARTE_FLAG_RX_OFFSET + UARTE_FLAG_RX_CNT)
83 
84 #define UARTE_FLAG(type, i) \
85     NRFX_BIT(NRFX_CONCAT(UARTE_FLAG_,type,_OFFSET) + i)
86 
87 // Mask with all RX flags
88 #define UARTE_RX_FLAGS (NRFX_BIT_MASK(UARTE_FLAG_RX_CNT) << UARTE_FLAG_RX_OFFSET)
89 
90 // Flag set when there is a PPI connection set up between ENDTX event and TXSTOP task.
91 #define UARTE_FLAG_TX_STOP_ON_END          UARTE_FLAG(TX, 0)
92 //
93 // Flag is set when internal TX buffer is used as a cache/proxy buffer. It is used when user buffer
94 // is in the memory that cannot be used by the DMA.
95 #define UARTE_FLAG_TX_USE_CACHE            UARTE_FLAG(TX, 1)
96 
97 // Flag is used to indicate that asynchronous TX transfer request occured during blocking transfer.
98 // Asynchronous pending transfer is started immediately after the current blocking transfer is
99 // completed.
100 #define UARTE_FLAG_TX_PENDING              UARTE_FLAG(TX, 2)
101 
102 // Flag indicates that TX abort is in progress.
103 #define UARTE_FLAG_TX_ABORTED              UARTE_FLAG(TX, 3)
104 
105 // Flag indicates that TX transfers are linked (by ENDTX-STARTTX PPI connection set up by the user).
106 #define UARTE_FLAG_TX_LINKED               UARTE_FLAG(TX, 4)
107 
108 // Flag is set when receiver is active.
109 #define UARTE_FLAG_RX_ENABLED              UARTE_FLAG(RX, 0)
110 
111 // Flag is set if receiver is enabled with flag NRFX_UARTE_RX_ENABLE_STOP_ON_END.
112 #define UARTE_FLAG_RX_STOP_ON_END          UARTE_FLAG(RX, 1)
113 
114 // Flag is set if receiver is enabled with flag NRFX_UARTE_RX_ENABLE_CONT.
115 #define UARTE_FLAG_RX_CONT                 UARTE_FLAG(RX, 2)
116 
117 // Flag is set if receiver is enabled with flag NRFX_UARTE_RX_ENABLE_KEEP_FIFO_CONTENT.
118 #define UARTE_FLAG_RX_KEEP_FIFO_CONTENT    UARTE_FLAG(RX, 3)
119 
120 // Flag indicates that RX abort was done to seamlessly switch the DMA buffer and not to abort the
121 // transfer.
122 #define UARTE_FLAG_RX_RESTARTED            UARTE_FLAG(RX, 4)
123 
124 // Flag is set when internal RX buffer is used as a cache/proxy buffer. It is used when user buffer
125 // is in the memory that cannot be used by the DMA.
126 #define UARTE_FLAG_RX_USE_CACHE            UARTE_FLAG(RX, 5)
127 
128 // Flag indicates that RX was aborted.
129 #define UARTE_FLAG_RX_ABORTED              UARTE_FLAG(RX, 6)
130 
131 // Flag indicates that there are new bytes from flushed buffer copied to the user buffer.
132 #define UARTE_FLAG_RX_FROM_FLUSH           UARTE_FLAG(RX, 7)
133 
134 // Flag indicates user explicitly aborted RX.
135 #define UARTE_FLAG_RX_FORCED_ABORT         UARTE_FLAG(RX, 8)
136 
137 // Flag is set if instance was configured to control PSEL pins during the initialization.
138 #define UARTE_FLAG_PSEL_UNINIT             UARTE_FLAG(MISC, 0)
139 
140 // Flag is set if instance was configured to control GPIO pins during the initialization.
141 #define UARTE_FLAG_GPIO_UNINIT             UARTE_FLAG(MISC, 1)
142 
143 // Flag is atomically set when nrfx_uarte_int_trigger is called.
144 #define UARTE_FLAG_TRIGGER                 UARTE_FLAG(MISC, 2)
145 
146 // Flag indicates that HWFC pins are being configured.
147 #define UARTE_FLAG_HWFC_PINS               UARTE_FLAG(MISC, 3)
148 
149 typedef struct
150 {
151     /* User provided buffers. */
152     nrfy_uarte_buffer_t     curr;
153     nrfy_uarte_buffer_t     next;
154     nrfy_uarte_buffer_t     flush;
155     nrfx_uarte_rx_cache_t * p_cache;
156     size_t                  off;
157 } uarte_rx_data_t;
158 
159 typedef struct
160 {
161     nrfy_uarte_buffer_t curr;
162     nrfy_uarte_buffer_t next;
163     nrfy_uarte_buffer_t cache;
164     size_t              off;
165     int                 amount;
166 } uarte_tx_data_t;
167 
168 typedef struct
169 {
170     void                     * p_context;
171     nrfx_uarte_event_handler_t handler;
172     uarte_rx_data_t            rx;
173     uarte_tx_data_t            tx;
174     nrfx_drv_state_t           state;
175     nrfx_atomic_t              flags;
176 } uarte_control_block_t;
177 
178 static uarte_control_block_t m_cb[NRFX_UARTE_ENABLED_COUNT];
179 
180 static const uint32_t rx_int_mask = NRF_UARTE_INT_ERROR_MASK |
181                                     NRF_UARTE_INT_ENDRX_MASK |
182                                     NRF_UARTE_INT_RXTO_MASK |
183                                     NRF_UARTE_INT_RXSTARTED_MASK;
184 
185 static void apply_workaround_for_enable_anomaly(nrfx_uarte_t const * p_instance);
186 
uarte_configure(nrfx_uarte_t const * p_instance,nrfx_uarte_config_t const * p_config)187 static void uarte_configure(nrfx_uarte_t        const * p_instance,
188                             nrfx_uarte_config_t const * p_config)
189 {
190     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
191 
192     if (!NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_SKIP_GPIO_CONFIG) &&
193         (p_config->skip_gpio_cfg == false))
194     {
195         p_cb->flags |= UARTE_FLAG_GPIO_UNINIT;
196         if (p_config->txd_pin != NRF_UARTE_PSEL_DISCONNECTED)
197         {
198             nrfy_gpio_pin_set(p_config->txd_pin);
199             nrfy_gpio_cfg_output(p_config->txd_pin);
200 #if NRF_GPIO_HAS_CLOCKPIN && defined(NRF_UARTE_CLOCKPIN_TXD_NEEDED)
201             nrfy_gpio_pin_clock_set(p_config->txd_pin, true);
202 #endif
203         }
204         if (p_config->rxd_pin != NRF_UARTE_PSEL_DISCONNECTED)
205         {
206             nrfy_gpio_cfg_input(p_config->rxd_pin, NRF_GPIO_PIN_NOPULL);
207         }
208     }
209 
210     if (!NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_SKIP_GPIO_CONFIG) &&
211         (p_config->config.hwfc == NRF_UARTE_HWFC_ENABLED) && (!p_config->skip_gpio_cfg))
212     {
213         p_cb->flags |= UARTE_FLAG_HWFC_PINS;
214         if (p_config->cts_pin != NRF_UARTE_PSEL_DISCONNECTED)
215         {
216             nrfy_gpio_cfg_input(p_config->cts_pin, NRF_GPIO_PIN_NOPULL);
217         }
218         if (p_config->rts_pin != NRF_UARTE_PSEL_DISCONNECTED)
219         {
220             nrfy_gpio_pin_set(p_config->rts_pin);
221             nrfy_gpio_cfg_output(p_config->rts_pin);
222         }
223     }
224 
225     if (!NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_SKIP_PSEL_CONFIG) && !p_config->skip_psel_cfg)
226     {
227         p_cb->flags |= UARTE_FLAG_PSEL_UNINIT;
228     }
229 
230     nrfy_uarte_config_t nrfy_config =
231     {
232         .pins =
233         {
234             .txd_pin = p_config->txd_pin,
235             .rxd_pin = p_config->rxd_pin,
236             .rts_pin = p_config->rts_pin,
237             .cts_pin = p_config->cts_pin
238         },
239         .skip_psel_cfg = NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_SKIP_PSEL_CONFIG) ?
240             true : p_config->skip_psel_cfg
241     };
242     nrfy_config.config = p_config->config;
243 
244 #if defined(LUMOS_XXAA)
245     uint32_t base_frequency = NRF_UARTE_BASE_FREQUENCY_GET(p_instance->p_reg);
246     if (base_frequency != NRF_UARTE_BASE_FREQUENCY_16MHZ)
247     {
248         uint32_t baudrate_factor = base_frequency / NRF_UARTE_BASE_FREQUENCY_16MHZ;
249         nrfy_config.baudrate =
250             (nrf_uarte_baudrate_t)((uint32_t)p_config->baudrate / baudrate_factor);
251     }
252     else
253 #endif
254     {
255         nrfy_config.baudrate = p_config->baudrate;
256     }
257 
258     nrfy_uarte_periph_configure(p_instance->p_reg, &nrfy_config);
259 
260     apply_workaround_for_enable_anomaly(p_instance);
261 
262     nrfy_uarte_int_init(p_instance->p_reg,
263                         NRFY_EVENT_TO_INT_BITMASK(NRF_UARTE_EVENT_ENDRX) |
264                         NRFY_EVENT_TO_INT_BITMASK(NRF_UARTE_EVENT_ENDTX) |
265                         NRFY_EVENT_TO_INT_BITMASK(NRF_UARTE_EVENT_ERROR) |
266                         NRFY_EVENT_TO_INT_BITMASK(NRF_UARTE_EVENT_RXTO)  |
267                         NRFY_EVENT_TO_INT_BITMASK(NRF_UARTE_EVENT_TXSTOPPED),
268                         p_config->interrupt_priority,
269                         false);
270 }
271 
pins_to_default(nrfx_uarte_t const * p_instance)272 static void pins_to_default(nrfx_uarte_t const * p_instance)
273 {
274     uarte_control_block_t const * p_cb = &m_cb[p_instance->drv_inst_idx];
275     nrfy_uarte_pins_t pins;
276 
277     // Need to read pins before they are reset.
278     if (!NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_SKIP_GPIO_CONFIG))
279     {
280         nrfy_uarte_pins_get(p_instance->p_reg, &pins);
281     }
282 
283     // Reset pins to default states.
284     if (!NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_SKIP_PSEL_CONFIG) &&
285         (p_cb->flags & UARTE_FLAG_PSEL_UNINIT))
286     {
287         nrfy_uarte_pins_disconnect(p_instance->p_reg);
288     }
289 
290     if (!NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_SKIP_GPIO_CONFIG))
291     {
292         if (p_cb->flags & UARTE_FLAG_GPIO_UNINIT)
293         {
294             if (pins.txd_pin != NRF_UARTE_PSEL_DISCONNECTED)
295             {
296                 nrfy_gpio_cfg_default(pins.txd_pin);
297             }
298             if (pins.rxd_pin != NRF_UARTE_PSEL_DISCONNECTED)
299             {
300                 nrfy_gpio_cfg_default(pins.rxd_pin);
301             }
302 
303             if (p_cb->flags & UARTE_FLAG_HWFC_PINS)
304             {
305                 if (pins.cts_pin != NRF_UARTE_PSEL_DISCONNECTED)
306                 {
307                     nrfy_gpio_cfg_default(pins.cts_pin);
308                 }
309                 if (pins.rts_pin != NRF_UARTE_PSEL_DISCONNECTED)
310                 {
311                     nrfy_gpio_cfg_default(pins.rts_pin);
312                 }
313             }
314         }
315     }
316 }
317 
apply_workaround_for_enable_anomaly(nrfx_uarte_t const * p_instance)318 static void apply_workaround_for_enable_anomaly(nrfx_uarte_t const * p_instance)
319 {
320 #if defined(NRF53_SERIES) || defined(NRF91_SERIES)
321     // Apply workaround for anomalies:
322     // - nRF91 - anomaly 23
323     // - nRF53 - anomaly 44
324     volatile uint32_t const * rxenable_reg =
325         (volatile uint32_t *)(((uint32_t)p_instance->p_reg) + 0x564);
326     volatile uint32_t const * txenable_reg =
327         (volatile uint32_t *)(((uint32_t)p_instance->p_reg) + 0x568);
328 
329     if (*txenable_reg == 1)
330     {
331         nrfy_uarte_task_trigger(p_instance->p_reg, NRF_UARTE_TASK_STOPTX);
332     }
333 
334     if (*rxenable_reg == 1)
335     {
336         nrfy_uarte_enable(p_instance->p_reg);
337         nrfy_uarte_task_trigger(p_instance->p_reg, NRF_UARTE_TASK_STOPRX);
338 
339         bool workaround_succeded;
340         // The UARTE is able to receive up to four bytes after the STOPRX task has been triggered.
341         // On lowest supported baud rate (1200 baud), with parity bit and two stop bits configured
342         // (resulting in 12 bits per data byte sent), this may take up to 40 ms.
343         NRFX_WAIT_FOR(*rxenable_reg == 0, 40000, 1, workaround_succeded);
344         if (!workaround_succeded)
345         {
346             NRFX_LOG_ERROR("Failed to apply workaround for instance with base address: %p.",
347                            (void *)p_instance->p_reg);
348         }
349 
350         (void)nrfy_uarte_errorsrc_get_and_clear(p_instance->p_reg);
351         nrfy_uarte_disable(p_instance->p_reg);
352     }
353 #else
354     (void)(p_instance);
355 #endif // defined(NRF53_SERIES) || defined(NRF91_SERIES)
356 }
357 
uarte_int_lock(NRF_UARTE_Type * p_uarte)358 static uint32_t uarte_int_lock(NRF_UARTE_Type * p_uarte)
359 {
360     uint32_t int_enabled = nrfy_uarte_int_enable_check(p_uarte, UINT32_MAX);
361 
362     nrfy_uarte_int_disable(p_uarte, int_enabled);
363 
364     return int_enabled;
365 }
366 
uarte_int_unlock(NRF_UARTE_Type * p_uarte,uint32_t int_mask)367 static void uarte_int_unlock(NRF_UARTE_Type * p_uarte, uint32_t int_mask)
368 {
369     nrfy_uarte_int_enable(p_uarte, int_mask);
370 }
371 
372 /* Function returns true if new transfer can be started. Since TXSTOPPED
373  * (and ENDTX) is cleared before triggering new transfer, TX is ready for new
374  * transfer if any event is set.
375  *
376  * @param stop_on_end TXSTOP is PPIed with ENDTX. Check only TXSTOPPED.
377  */
is_tx_ready(NRF_UARTE_Type * p_uarte,bool stop_on_end)378 static bool is_tx_ready(NRF_UARTE_Type * p_uarte, bool stop_on_end)
379 {
380     return nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_TXSTOPPED) ||
381         (!stop_on_end && nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_ENDTX));
382 }
383 
prepare_rx(NRF_UARTE_Type * p_uarte)384 static bool prepare_rx(NRF_UARTE_Type * p_uarte)
385 {
386     /**
387      * Stop any currently running RX operations. This can occur when a
388      * bootloader sets up the UART hardware and does not clean it up
389      * before jumping to the next application.
390      */
391     if (nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_RXSTARTED))
392     {
393         bool res;
394 
395         nrfy_uarte_enable(p_uarte);
396         nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPRX);
397 
398         NRFX_WAIT_FOR(nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_RXTO) ||
399                       nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_ERROR), 100, 10, res);
400 
401         nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXSTARTED);
402         nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDRX);
403         nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXTO);
404         nrfy_uarte_disable(p_uarte);
405 
406         return res;
407     }
408     return true;
409 }
410 
prepare_tx(NRF_UARTE_Type * p_uarte,bool stop_on_end)411 static bool prepare_tx(NRF_UARTE_Type * p_uarte, bool stop_on_end)
412 {
413     (void)stop_on_end;
414     uint8_t dummy = 0;
415 
416     nrfy_uarte_enable(p_uarte);
417 
418     /* Set TXSTOPPED event by requesting fake (zero-length) transfer.
419      * Pointer to RAM variable (data->tx_buffer) is set because otherwise
420      * such operation may result in HardFault or RAM corruption.
421      */
422     nrfy_uarte_tx_buffer_set(p_uarte, &dummy, 0);
423     nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STARTTX);
424 
425     /* switch off transmitter to save an energy */
426     nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPTX);
427 
428     bool res;
429 
430     NRFX_WAIT_FOR(is_tx_ready(p_uarte, true), 10, 1, res);
431 
432     if (!res)
433     {
434         return false;
435     }
436 
437     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDTX);
438     nrfy_uarte_disable(p_uarte);
439     return true;
440 }
441 
nrfx_uarte_init(nrfx_uarte_t const * p_instance,nrfx_uarte_config_t const * p_config,nrfx_uarte_event_handler_t event_handler)442 nrfx_err_t nrfx_uarte_init(nrfx_uarte_t const *        p_instance,
443                            nrfx_uarte_config_t const * p_config,
444                            nrfx_uarte_event_handler_t  event_handler)
445 {
446     NRFX_ASSERT(p_config);
447     uint32_t inst_idx = p_instance->drv_inst_idx;
448     uarte_control_block_t * p_cb = &m_cb[inst_idx];
449     nrfx_err_t err_code;
450 
451     if (p_cb->state != NRFX_DRV_STATE_UNINITIALIZED)
452     {
453 #if NRFX_API_VER_AT_LEAST(3, 2, 0)
454         err_code = NRFX_ERROR_ALREADY;
455 #else
456         err_code = NRFX_ERROR_INVALID_STATE;
457 #endif
458         NRFX_LOG_WARNING("Function: %s, error code: %s.",
459                          __func__,
460                          NRFX_LOG_ERROR_STRING_GET(err_code));
461         return err_code;
462     }
463 
464 #if NRFX_CHECK(NRFX_PRS_ENABLED)
465     static nrfx_irq_handler_t const irq_handlers[NRFX_UARTE_ENABLED_COUNT] = {
466         NRFX_INSTANCE_IRQ_HANDLERS_LIST(UARTE, uarte)
467     };
468     if (nrfx_prs_acquire(p_instance->p_reg, irq_handlers[inst_idx]) != NRFX_SUCCESS)
469     {
470         err_code = NRFX_ERROR_BUSY;
471         NRFX_LOG_WARNING("Function: %s, error code: %s.",
472                          __func__,
473                          NRFX_LOG_ERROR_STRING_GET(err_code));
474         return err_code;
475     }
476 #endif // NRFX_CHECK(NRFX_PRS_ENABLED)
477 
478     memset(p_cb, 0, sizeof(uarte_control_block_t));
479 
480     p_cb->p_context = p_config->p_context;
481     p_cb->tx.cache.p_buffer = p_config->tx_cache.p_buffer;
482     if (p_config->tx_cache.length == 0)
483     {
484         p_cb->tx.cache.length = 0;
485     }
486     else
487     {
488         p_cb->tx.cache.length = p_config->tx_cache.length - 1;
489     }
490     if (p_config->rx_cache.length >= UARTE_HW_RX_FIFO_SIZE)
491     {
492         p_cb->rx.flush.p_buffer = p_config->rx_cache.p_buffer;
493         p_cb->rx.flush.length = 0;
494         if (RX_CACHE_SUPPORTED && p_config->p_rx_cache_scratch)
495         {
496             if (p_config->rx_cache.length < (UARTE_HW_RX_FIFO_SIZE + MIN_RX_CACHE_SIZE))
497             {
498                 return NRFX_ERROR_INVALID_PARAM;
499             }
500             size_t cache_len = p_config->rx_cache.length - UARTE_HW_RX_FIFO_SIZE;
501             size_t buf_len = cache_len / 2;
502 
503             p_cb->rx.p_cache = p_config->p_rx_cache_scratch;
504 
505             memset(p_cb->rx.p_cache, 0, sizeof(*p_cb->rx.p_cache));
506             // Split provided cache space into two equal buffers. Cache buffers can overlap with
507             // flush buffer as they are not used simultaneously.
508             p_cb->rx.p_cache->cache_len = buf_len;
509             p_cb->rx.p_cache->cache[0].p_buffer =
510                 &p_config->rx_cache.p_buffer[UARTE_HW_RX_FIFO_SIZE];
511             p_cb->rx.p_cache->cache[1].p_buffer =
512                 &p_config->rx_cache.p_buffer[UARTE_HW_RX_FIFO_SIZE + buf_len];
513         }
514     }
515 
516 #if NRF_UARTE_HAS_ENDTX_STOPTX_SHORT
517     uint32_t tx_int_mask = 0;
518 
519     nrfy_uarte_shorts_enable(p_instance->p_reg, NRF_UARTE_SHORT_ENDTX_STOPTX);
520 #else
521     uint32_t tx_int_mask = (!event_handler || p_config->tx_stop_on_end) ?
522                                0 : NRF_UARTE_INT_ENDTX_MASK;
523 
524     if (p_config->tx_stop_on_end)
525     {
526         p_cb->flags |= UARTE_FLAG_TX_STOP_ON_END;
527     }
528 #endif
529 
530     p_cb->handler = event_handler;
531     p_cb->state   = NRFX_DRV_STATE_INITIALIZED;
532 
533     // Handle case when other user (e.g. bootloader) left RX in active state.
534     if (!prepare_rx(p_instance->p_reg))
535     {
536 #if NRFX_CHECK(NRFX_PRS_ENABLED)
537         nrfx_prs_release(p_instance->p_reg);
538 #endif
539         return NRFX_ERROR_INTERNAL;
540     }
541 
542     uarte_configure(p_instance, p_config);
543 
544     if (!prepare_tx(p_instance->p_reg, p_config->tx_stop_on_end))
545     {
546 #if NRFX_CHECK(NRFX_PRS_ENABLED)
547         nrfx_prs_release(p_instance->p_reg);
548 #endif
549         return NRFX_ERROR_INTERNAL;
550     }
551 
552     uint32_t int_mask = tx_int_mask | ((event_handler) ? rx_int_mask : 0);
553 
554     nrfy_uarte_int_enable(p_instance->p_reg, int_mask);
555 
556     return NRFX_SUCCESS;
557 }
558 
nrfx_uarte_reconfigure(nrfx_uarte_t const * p_instance,nrfx_uarte_config_t const * p_config)559 nrfx_err_t nrfx_uarte_reconfigure(nrfx_uarte_t const *        p_instance,
560                                   nrfx_uarte_config_t const * p_config)
561 {
562     NRFX_ASSERT(p_config);
563 
564     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
565 
566     if (p_cb->state == NRFX_DRV_STATE_UNINITIALIZED)
567     {
568         return NRFX_ERROR_INVALID_STATE;
569     }
570     if (nrfx_uarte_tx_in_progress(p_instance))
571     {
572         return NRFX_ERROR_BUSY;
573     }
574     nrfy_uarte_disable(p_instance->p_reg);
575     if (p_cb->handler)
576     {
577         p_cb->p_context = p_config->p_context;
578     }
579     uarte_configure(p_instance, p_config);
580 
581     if (prepare_tx(p_instance->p_reg, p_cb->flags & UARTE_FLAG_TX_STOP_ON_END))
582     {
583         return NRFX_SUCCESS;
584     }
585     else
586     {
587         return NRFX_ERROR_INTERNAL;
588     }
589 }
590 
nrfx_uarte_uninit(nrfx_uarte_t const * p_instance)591 void nrfx_uarte_uninit(nrfx_uarte_t const * p_instance)
592 {
593     nrfx_err_t err;
594     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
595     NRF_UARTE_Type * p_uarte = p_instance->p_reg;
596 
597     NRFX_ASSERT(p_cb->state != NRFX_DRV_STATE_UNINITIALIZED);
598 
599     nrfy_uarte_int_disable(p_uarte,
600                            NRF_UARTE_INT_ENDRX_MASK |
601                            NRF_UARTE_INT_ENDTX_MASK |
602                            NRF_UARTE_INT_ERROR_MASK |
603                            NRF_UARTE_INT_RXTO_MASK  |
604                            NRF_UARTE_INT_RXSTARTED_MASK  |
605                            NRF_UARTE_INT_TXSTOPPED_MASK);
606     nrfy_uarte_int_uninit(p_uarte);
607 
608 #if NRF_UARTE_HAS_ENDTX_STOPTX_SHORT
609     nrfy_uarte_shorts_disable(p_uarte, NRF_UARTE_SHORT_ENDTX_STOPTX);
610 #endif
611 
612 #if NRFX_CHECK(NRFX_PRS_ENABLED)
613     nrfx_prs_release(p_uarte);
614 #endif
615 
616     err = nrfx_uarte_rx_abort(p_instance, true, true);
617     (void)err;
618 
619     err = nrfx_uarte_tx_abort(p_instance, true);
620     (void)err;
621 
622     pins_to_default(p_instance);
623 
624     p_cb->flags   = 0;
625     p_cb->state   = NRFX_DRV_STATE_UNINITIALIZED;
626     p_cb->handler = NULL;
627     NRFX_LOG_INFO("Instance uninitialized: %d.", p_instance->drv_inst_idx);
628 }
629 
nrfx_uarte_init_check(nrfx_uarte_t const * p_instance)630 bool nrfx_uarte_init_check(nrfx_uarte_t const * p_instance)
631 {
632     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
633 
634     return (p_cb->state != NRFX_DRV_STATE_UNINITIALIZED);
635 }
636 
tx_start(NRF_UARTE_Type * p_uarte,const uint8_t * buf,size_t len,bool en_int)637 static void tx_start(NRF_UARTE_Type * p_uarte, const uint8_t *buf, size_t len, bool en_int)
638 {
639     nrfy_uarte_tx_buffer_set(p_uarte, buf, len);
640     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDTX);
641     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_TXSTOPPED);
642     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_TXSTARTED);
643 
644     nrfy_uarte_enable(p_uarte);
645 
646     if (en_int)
647     {
648         nrfy_uarte_int_enable(p_uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
649     }
650 
651     nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STARTTX);
652 }
653 
is_rx_active(uarte_control_block_t * p_cb)654 static bool is_rx_active(uarte_control_block_t * p_cb)
655 {
656     return (p_cb->flags & UARTE_FLAG_RX_ENABLED) ? true : false;
657 }
658 
659 /* Must be called with interrupts locked. */
disable_hw_from_tx(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)660 static void disable_hw_from_tx(NRF_UARTE_Type *        p_uarte,
661                                   uarte_control_block_t * p_cb)
662 {
663     if (nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_TXSTOPPED) && !is_rx_active(p_cb))
664     {
665         nrfy_uarte_disable(p_uarte);
666     }
667 }
668 
669 /* Block until transfer is completed. Disable UARTE if RX is not active. */
block_on_tx(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)670 static void block_on_tx(NRF_UARTE_Type *        p_uarte,
671                         uarte_control_block_t * p_cb)
672 {
673     bool stop_on_end = p_cb->flags & UARTE_FLAG_TX_STOP_ON_END;
674     bool do_disable = true;
675 
676     while (!is_tx_ready(p_uarte, stop_on_end))
677     {}
678 
679     NRFX_CRITICAL_SECTION_ENTER();
680 
681     if (!stop_on_end)
682     {
683         if (nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_ENDTX))
684         {
685             nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPTX);
686             while (!is_tx_ready(p_uarte, true))
687             {}
688         }
689         else
690         {
691             do_disable = false;
692         }
693     }
694     else if (!nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_TXSTOPPED))
695     {
696         do_disable = false;
697     }
698 
699     if (do_disable)
700     {
701         disable_hw_from_tx(p_uarte, p_cb);
702     }
703     NRFX_CRITICAL_SECTION_EXIT();
704 }
705 
wait_for_endtx(NRF_UARTE_Type * p_uarte,uint8_t const * p_buf,uint32_t length,bool stop_on_end)706 static nrfx_err_t wait_for_endtx(NRF_UARTE_Type * p_uarte,
707                                  uint8_t const *  p_buf,
708                                  uint32_t         length,
709                                  bool             stop_on_end)
710 {
711     const uint8_t * p_tx;
712     bool ready;
713     uint32_t amount;
714     nrfx_err_t err;
715 
716     do {
717             // Pend until TX is ready again or TX buffer pointer is replaced with new
718             // address which indicates that current context got preempted and new
719             // request was started from a higher priority context.
720             ready = is_tx_ready(p_uarte, stop_on_end);
721             amount = nrfy_uarte_tx_amount_get(p_uarte);
722             p_tx = nrfy_uarte_tx_buffer_get(p_uarte);
723 
724             if (ready || (p_tx != p_buf))
725             {
726                 break;
727             }
728 #if defined(CONFIG_SOC_SERIES_BSIM_NRFXX)
729             nrfx_coredep_delay_us(3);
730 #endif
731     } while (true);
732 
733     // Check if transfer got aborted. Note that aborted transfer can only be
734     // detected if new transfer is not started.
735     err = ((p_tx == p_buf) && (length > amount)) ? NRFX_ERROR_FORBIDDEN : NRFX_SUCCESS;
736 
737     if ((err == NRFX_SUCCESS) && !stop_on_end)
738     {
739         nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPTX);
740     }
741 
742     return err;
743 }
744 
poll_out(nrfx_uarte_t const * p_instance,uint8_t const * p_byte,bool early_ret)745 static nrfx_err_t poll_out(nrfx_uarte_t const * p_instance, uint8_t const * p_byte, bool early_ret)
746 {
747     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
748     NRF_UARTE_Type * p_uarte = p_instance->p_reg;
749     NRFX_ASSERT(p_cb->state == NRFX_DRV_STATE_INITIALIZED);
750     bool use_cache;
751     nrfx_err_t err = NRFX_ERROR_BUSY;
752     uint8_t const * p_buf;
753 
754     if (p_cb->tx.cache.p_buffer == NULL)
755     {
756         if (!nrf_dma_accessible_check(p_uarte, p_byte))
757         {
758             return NRFX_ERROR_INVALID_PARAM;
759         }
760         use_cache = false;
761     }
762     else
763     {
764         use_cache = true;
765     }
766 
767     NRFX_CRITICAL_SECTION_ENTER();
768     bool tx_ready = is_tx_ready(p_uarte, p_cb->flags & UARTE_FLAG_TX_STOP_ON_END);
769 
770     if (tx_ready)
771     {
772         if (p_cb->tx.amount < 0)
773         {
774             p_cb->tx.amount = (int)nrfy_uarte_tx_amount_get(p_uarte);
775         }
776         if (use_cache)
777         {
778             // Last byte in cache buffer is used for polling out.
779             p_cb->tx.cache.p_buffer[p_cb->tx.cache.length] = *p_byte;
780             p_buf = &p_cb->tx.cache.p_buffer[p_cb->tx.cache.length];
781         }
782         else
783         {
784             p_buf = p_byte;
785         }
786         tx_start(p_uarte, p_buf, 1, early_ret);
787         err = NRFX_SUCCESS;
788     }
789     NRFX_CRITICAL_SECTION_EXIT();
790 
791     if ((err == NRFX_SUCCESS) && !early_ret)
792     {
793         err = wait_for_endtx(p_uarte, p_buf, 1, p_cb->flags & UARTE_FLAG_TX_STOP_ON_END);
794 
795         NRFX_CRITICAL_SECTION_ENTER();
796         disable_hw_from_tx(p_uarte, p_cb);
797         NRFX_CRITICAL_SECTION_EXIT();
798     }
799 
800     return err;
801 }
802 
tx_prepare_start(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb,bool use_cache)803 static bool tx_prepare_start(NRF_UARTE_Type *        p_uarte,
804                              uarte_control_block_t * p_cb,
805                              bool                    use_cache)
806 {
807     if (!is_tx_ready(p_uarte, p_cb->flags & UARTE_FLAG_TX_STOP_ON_END))
808     {
809         // Transmitter is busy, set pending flag.
810         p_cb->flags |= UARTE_FLAG_TX_PENDING;
811         nrfy_uarte_int_enable(p_uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
812         return false;
813     }
814 
815     uint8_t const * p_buf;
816     uint32_t xfer_len;
817 
818     if (use_cache)
819     {
820         uint32_t chunk_len = NRFX_MIN(p_cb->tx.cache.length, p_cb->tx.curr.length - p_cb->tx.off);
821 
822         memcpy(p_cb->tx.cache.p_buffer, &p_cb->tx.curr.p_buffer[p_cb->tx.off], chunk_len);
823         p_buf = p_cb->tx.cache.p_buffer;
824         xfer_len = chunk_len;
825     }
826     else
827     {
828         p_buf = p_cb->tx.curr.p_buffer;
829         xfer_len = p_cb->tx.curr.length;
830     }
831     p_cb->tx.amount = -1;
832     tx_start(p_uarte, p_buf, xfer_len, true);
833 
834     return true;
835 }
836 
blocking_tx(nrfx_uarte_t const * p_instance,uint8_t const * p_buffer,uint32_t length,uint32_t flags)837 static nrfx_err_t blocking_tx(nrfx_uarte_t const * p_instance,
838                               uint8_t const *      p_buffer,
839                               uint32_t             length,
840                               uint32_t             flags)
841 {
842     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
843     bool early_ret = p_cb->handler && (flags & NRFX_UARTE_TX_EARLY_RETURN);
844     nrfx_err_t err = NRFX_SUCCESS;
845 
846     if ((early_ret && !p_cb->tx.cache.p_buffer) || (p_cb->flags & UARTE_FLAG_TX_LINKED))
847     {
848         return NRFX_ERROR_FORBIDDEN;
849     }
850 
851     for (uint32_t i = 0; i < length; i++)
852     {
853         do {
854             err = poll_out(p_instance, &p_buffer[i], early_ret);
855             if (err == NRFX_SUCCESS)
856             {
857                 break;
858             }
859             if (err != NRFX_ERROR_BUSY)
860             {
861                 // TX aborted or other error
862                 return err;
863             }
864 #if defined(CONFIG_SOC_SERIES_BSIM_NRFXX)
865             nrfx_coredep_delay_us(3);
866 #endif
867         } while (true);
868 
869         if (!p_cb->handler && (p_cb->flags & UARTE_FLAG_TX_ABORTED))
870         {
871             NRFX_ATOMIC_FETCH_AND(&p_cb->flags, ~UARTE_FLAG_TX_ABORTED);
872             err = NRFX_ERROR_FORBIDDEN;
873             break;
874         }
875     }
876     return err;
877 }
878 
nrfx_uarte_tx(nrfx_uarte_t const * p_instance,uint8_t const * p_data,size_t length,uint32_t flags)879 nrfx_err_t nrfx_uarte_tx(nrfx_uarte_t const * p_instance,
880                          uint8_t const *      p_data,
881                          size_t               length,
882                          uint32_t             flags)
883 {
884     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
885     NRF_UARTE_Type * p_uarte = p_instance->p_reg;
886 
887     NRFX_ASSERT(p_cb->state == NRFX_DRV_STATE_INITIALIZED);
888     NRFX_ASSERT(UARTE_LENGTH_VALIDATE(p_instance->drv_inst_idx, length));
889     NRFX_ASSERT(p_data);
890     NRFX_ASSERT(length > 0);
891 
892     nrfx_err_t err_code = NRFX_SUCCESS;
893     bool use_cache;
894 
895     if (length == 0)
896     {
897         return NRFX_ERROR_INVALID_LENGTH;
898     }
899 
900     if (!p_cb->handler && (flags == 0))
901     {
902         if (p_cb->tx.curr.length == 0)
903         {
904             p_cb->tx.curr.length = length;
905         }
906         else
907         {
908             return NRFX_ERROR_BUSY;
909         }
910 
911         err_code = blocking_tx(p_instance, p_data, length, 0);
912         p_cb->tx.curr.length = 0;
913         return err_code;
914     }
915 
916     // Handle case when transfer is blocking.
917     if (flags & (NRFX_UARTE_TX_EARLY_RETURN | NRFX_UARTE_TX_BLOCKING))
918     {
919         return blocking_tx(p_instance, p_data, length, flags);
920     }
921 
922     // EasyDMA requires that transfer buffers are placed in DataRAM,
923     // signal error if the are not.
924     if (!nrf_dma_accessible_check(p_uarte, p_data))
925     {
926         if (!p_cb->tx.cache.p_buffer ||
927             (NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_TX_LINK) && (flags & NRFX_UARTE_TX_LINK)))
928         {
929             err_code = NRFX_ERROR_INVALID_ADDR;
930         }
931 
932         use_cache = true;
933     }
934     else
935     {
936         if ((p_cb->flags & UARTE_FLAG_TX_STOP_ON_END) &&
937             (NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_TX_LINK) && (flags & NRFX_UARTE_TX_LINK)))
938         {
939             // STOPTX on ENDTX connection cannot be used together with linking.
940             err_code = NRFX_ERROR_FORBIDDEN;
941         }
942 
943         use_cache = false;
944     }
945 
946     if (err_code != NRFX_SUCCESS)
947     {
948         return err_code;
949     }
950 
951     NRFX_CRITICAL_SECTION_ENTER();
952     if (p_cb->tx.curr.length == 0)
953     {
954         p_cb->tx.curr.length = length;
955 
956 #if defined(__GNUC__)
957 #pragma GCC diagnostic push
958 #pragma GCC diagnostic ignored "-Wcast-qual"
959 #endif
960 
961         p_cb->tx.curr.p_buffer = (uint8_t *)p_data;
962 
963 #if defined(__GNUC__)
964 #pragma GCC diagnostic pop
965 #endif
966 
967         if (use_cache)
968         {
969             p_cb->flags |= UARTE_FLAG_TX_USE_CACHE;
970         }
971 
972         tx_prepare_start(p_uarte, p_cb, use_cache);
973     }
974     else if (NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_TX_LINK) &&
975              !p_cb->tx.next.length && (flags & NRFX_UARTE_TX_LINK))
976     {
977         if (nrf_dma_accessible_check(p_uarte, p_cb->tx.curr.p_buffer))
978         {
979             bool res;
980 
981             p_cb->flags |= UARTE_FLAG_TX_LINKED;
982 
983 #if defined(__GNUC__)
984 #pragma GCC diagnostic push
985 #pragma GCC diagnostic ignored "-Wcast-qual"
986 #endif
987 
988             p_cb->tx.next.p_buffer = (uint8_t *)p_data;
989 
990 #if defined(__GNUC__)
991 #pragma GCC diagnostic pop
992 #endif
993 
994             p_cb->tx.next.length = length;
995             NRFX_WAIT_FOR(nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_TXSTARTED), 10, 1, res);
996             if (res)
997             {
998 #if NRF_UARTE_HAS_ENDTX_STOPTX_SHORT
999                 nrfy_uarte_int_enable(p_instance->p_reg, NRF_UARTE_INT_ENDTX_MASK);
1000                 nrfy_uarte_shorts_disable(p_uarte, NRF_UARTE_SHORT_ENDTX_STOPTX);
1001 #endif
1002                 nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_TXSTARTED);
1003                 nrfy_uarte_tx_buffer_set(p_uarte, p_data, length);
1004                 err_code = NRFX_SUCCESS;
1005             }
1006             else
1007             {
1008                 err_code = NRFX_ERROR_INTERNAL;
1009             }
1010         }
1011         else
1012         {
1013             err_code = NRFX_ERROR_FORBIDDEN;
1014         }
1015     }
1016     else
1017     {
1018         err_code = NRFX_ERROR_BUSY;
1019     }
1020     NRFX_CRITICAL_SECTION_EXIT();
1021 
1022     return err_code;
1023 }
1024 
nrfx_uarte_tx_in_progress(nrfx_uarte_t const * p_instance)1025 bool nrfx_uarte_tx_in_progress(nrfx_uarte_t const * p_instance)
1026 {
1027     NRFX_ASSERT(m_cb[p_instance->drv_inst_idx].state != NRFX_DRV_STATE_UNINITIALIZED);
1028 
1029     return (m_cb[p_instance->drv_inst_idx].tx.curr.length != 0);
1030 }
1031 
nrfx_uarte_tx_abort(nrfx_uarte_t const * p_instance,bool sync)1032 nrfx_err_t nrfx_uarte_tx_abort(nrfx_uarte_t const * p_instance, bool sync)
1033 {
1034     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
1035     NRF_UARTE_Type * p_uarte = p_instance->p_reg;
1036     uint32_t int_mask;
1037 
1038     int_mask = uarte_int_lock(p_uarte);
1039     if (p_cb->tx.curr.length == 0)
1040     {
1041         uarte_int_unlock(p_uarte, int_mask);
1042         return NRFX_ERROR_INVALID_STATE;
1043     }
1044 
1045     NRFX_ATOMIC_FETCH_OR(&p_cb->flags, UARTE_FLAG_TX_ABORTED);
1046 
1047     nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPTX);
1048 
1049     if (sync)
1050     {
1051         block_on_tx(p_uarte, p_cb);
1052         nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDTX);
1053         p_cb->tx.curr.length = 0;
1054     }
1055 
1056     uarte_int_unlock(p_uarte, int_mask);
1057 
1058     NRFX_LOG_INFO("TX transaction aborted.");
1059 
1060     return NRFX_SUCCESS;
1061 }
1062 
user_handler(uarte_control_block_t * p_cb,nrfx_uarte_evt_type_t type)1063 static void user_handler(uarte_control_block_t * p_cb, nrfx_uarte_evt_type_t type)
1064 {
1065     nrfx_uarte_event_t event = {
1066         .type = type
1067     };
1068 
1069     p_cb->handler(&event, p_cb->p_context);
1070 }
1071 
user_handler_on_rx_disabled(uarte_control_block_t * p_cb,size_t flush_cnt)1072 static void user_handler_on_rx_disabled(uarte_control_block_t * p_cb, size_t flush_cnt)
1073 {
1074     nrfx_uarte_event_t event = {
1075         .type = NRFX_UARTE_EVT_RX_DISABLED,
1076         .data = {
1077             .rx_disabled = {
1078                 .flush_cnt = flush_cnt
1079             }
1080         }
1081     };
1082 
1083     p_cb->handler(&event, p_cb->p_context);
1084 }
1085 
user_handler_on_error(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)1086 static void user_handler_on_error(NRF_UARTE_Type * p_uarte, uarte_control_block_t * p_cb)
1087 {
1088     nrfx_uarte_event_t event = {
1089         .type = NRFX_UARTE_EVT_ERROR,
1090         .data = {
1091             .error = {
1092                 .error_mask = nrfy_uarte_errorsrc_get_and_clear(p_uarte)
1093             }
1094         }
1095     };
1096 
1097     p_cb->handler(&event, p_cb->p_context);
1098 }
1099 
user_handler_on_rx_done(uarte_control_block_t * p_cb,const uint8_t * p_data,size_t len)1100 static void user_handler_on_rx_done(uarte_control_block_t * p_cb,
1101                                     const uint8_t *         p_data,
1102                                     size_t                  len)
1103 {
1104 #if defined(__GNUC__)
1105 #pragma GCC diagnostic push
1106 #pragma GCC diagnostic ignored "-Wcast-qual"
1107 #endif
1108 
1109     nrfx_uarte_event_t event = {
1110         .type = NRFX_UARTE_EVT_RX_DONE,
1111         .data = {
1112             .rx = {
1113                 .p_buffer = (uint8_t *)p_data,
1114                 .length = len
1115             }
1116         }
1117     };
1118 
1119 #if defined(__GNUC__)
1120 #pragma GCC diagnostic pop
1121 #endif
1122 
1123     p_cb->handler(&event, p_cb->p_context);
1124 }
1125 
user_handler_on_tx_done(uarte_control_block_t * p_cb,const uint8_t * p_data,size_t len,bool abort)1126 static void user_handler_on_tx_done(uarte_control_block_t * p_cb,
1127                                     const uint8_t *         p_data,
1128                                     size_t                  len,
1129                                     bool                    abort)
1130 {
1131 #if defined(__GNUC__)
1132 #pragma GCC diagnostic push
1133 #pragma GCC diagnostic ignored "-Wcast-qual"
1134 #endif
1135 
1136     nrfx_uarte_event_t event = {
1137         .type = NRFX_UARTE_EVT_TX_DONE,
1138         .data = {
1139             .tx = {
1140                 .p_buffer = (uint8_t *)p_data,
1141                 .length = len,
1142                 .flags = abort ? NRFX_UARTE_TX_DONE_ABORTED : 0
1143             }
1144         }
1145     };
1146 
1147 #if defined(__GNUC__)
1148 #pragma GCC diagnostic pop
1149 #endif
1150 
1151     p_cb->handler(&event, p_cb->p_context);
1152 }
1153 
release_rx(uarte_control_block_t * p_cb)1154 static void release_rx(uarte_control_block_t * p_cb)
1155 {
1156     /* Clear all RX flags. */
1157     NRFX_ATOMIC_FETCH_AND(&p_cb->flags, ~UARTE_RX_FLAGS);
1158 }
1159 
is_tx_active(NRF_UARTE_Type * p_uarte)1160 static bool is_tx_active(NRF_UARTE_Type * p_uarte)
1161 {
1162     return !nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_TXSTOPPED) ||
1163         nrfy_uarte_int_enable_check(p_uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1164 }
1165 
disable_hw_from_rx(NRF_UARTE_Type * p_uarte)1166 static void disable_hw_from_rx(NRF_UARTE_Type * p_uarte)
1167 {
1168     NRFX_CRITICAL_SECTION_ENTER();
1169 
1170     if (!is_tx_active(p_uarte))
1171     {
1172         nrfy_uarte_disable(p_uarte);
1173     }
1174 
1175     NRFX_CRITICAL_SECTION_EXIT();
1176 }
1177 
on_rx_disabled(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb,size_t flush_cnt)1178 static void on_rx_disabled(NRF_UARTE_Type        * p_uarte,
1179                            uarte_control_block_t * p_cb,
1180                            size_t                  flush_cnt)
1181 {
1182     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXDRDY);
1183     nrfy_uarte_shorts_disable(p_uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1184     nrfy_uarte_int_disable(p_uarte, rx_int_mask);
1185     disable_hw_from_rx(p_uarte);
1186 
1187     p_cb->rx.curr.p_buffer = NULL;
1188     p_cb->rx.next.p_buffer = NULL;
1189     release_rx(p_cb);
1190     user_handler_on_rx_disabled(p_cb, flush_cnt);
1191 }
1192 
handler_on_rx_done(uarte_control_block_t * p_cb,uint8_t * p_data,size_t len,bool abort)1193 static void handler_on_rx_done(uarte_control_block_t * p_cb,
1194                                uint8_t *               p_data,
1195                                size_t                  len,
1196                                bool                    abort)
1197 {
1198     bool cache_used = RX_CACHE_SUPPORTED && (p_cb->flags & UARTE_FLAG_RX_USE_CACHE);
1199     nrfx_uarte_rx_cache_t * p_cache = p_cb->rx.p_cache;
1200 
1201     if (!cache_used)
1202     {
1203         user_handler_on_rx_done(p_cb, p_data, len);
1204         return;
1205     }
1206     else if (!p_cache->user[0].p_buffer)
1207     {
1208         return;
1209     }
1210 
1211     memcpy(&p_cache->user[0].p_buffer[p_cache->received], p_data, len);
1212     p_cache->received += len;
1213 
1214     bool user_buf_end = p_cache->user[0].length == p_cache->received;
1215 
1216     if (user_buf_end || abort)
1217     {
1218         uint8_t *p_buf = p_cache->user[0].p_buffer;
1219         size_t buf_len = p_cache->received;
1220 
1221         p_cache->received = 0;
1222         p_cache->user[0] = p_cache->user[1];
1223         p_cache->user[1] = (nrfy_uarte_buffer_t){ NULL, 0 };
1224         if (p_cache->user[0].length)
1225         {
1226             p_cache->buf_req = true;
1227         }
1228         user_handler_on_rx_done(p_cb, p_buf, buf_len);
1229     }
1230 }
1231 
1232 /* Some data may be left in flush buffer. It need to be copied into rx buffer.
1233  * If flushed data exceeds input buffer rx enabled is terminated.
1234  * Returns true when flushed did not filled whole user buffer.
1235  */
rx_flushed_handler(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)1236 static bool rx_flushed_handler(NRF_UARTE_Type * p_uarte, uarte_control_block_t * p_cb)
1237 {
1238     if (p_cb->rx.flush.length == 0)
1239     {
1240         return true;
1241     }
1242 
1243     if ((uint32_t)p_cb->rx.flush.length >= p_cb->rx.curr.length)
1244     {
1245         uint8_t * p_buf = p_cb->rx.curr.p_buffer;
1246         size_t len = p_cb->rx.curr.length;
1247 
1248         p_cb->rx.curr.p_buffer = NULL;
1249         p_cb->rx.curr.length = 0;
1250         memcpy(p_buf, p_cb->rx.flush.p_buffer, len);
1251         p_cb->rx.flush.length -= len;
1252         memmove(p_cb->rx.flush.p_buffer, &p_cb->rx.flush.p_buffer[len], p_cb->rx.flush.length);
1253 
1254         if (p_cb->handler)
1255         {
1256             bool stop_on_end = p_cb->flags & UARTE_FLAG_RX_STOP_ON_END;
1257 
1258             if (stop_on_end)
1259             {
1260                 NRFX_ATOMIC_FETCH_OR(&p_cb->flags, UARTE_FLAG_RX_ABORTED);
1261             }
1262 
1263             handler_on_rx_done(p_cb, p_buf, len, false);
1264             if (stop_on_end)
1265             {
1266                     on_rx_disabled(p_uarte, p_cb, 0);
1267             }
1268         }
1269 
1270         return false;
1271     }
1272     else
1273     {
1274         memcpy(p_cb->rx.curr.p_buffer, p_cb->rx.flush.p_buffer, p_cb->rx.flush.length);
1275         p_cb->rx.off = p_cb->rx.flush.length;
1276         p_cb->rx.flush.length = 0;
1277         NRFX_ATOMIC_FETCH_OR(&p_cb->flags, UARTE_FLAG_RX_FROM_FLUSH);
1278     }
1279 
1280     return true;
1281 }
1282 
nrfx_uarte_rx_enable(nrfx_uarte_t const * p_instance,uint32_t flags)1283 nrfx_err_t nrfx_uarte_rx_enable(nrfx_uarte_t const * p_instance, uint32_t flags)
1284 {
1285     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
1286     NRF_UARTE_Type * p_uarte = p_instance->p_reg;
1287     uint32_t prev_flags;
1288 
1289     prev_flags = NRFX_ATOMIC_FETCH_OR(&p_cb->flags, UARTE_FLAG_RX_ENABLED);
1290     if (prev_flags & UARTE_FLAG_RX_ENABLED)
1291     {
1292         return NRFX_ERROR_BUSY;
1293     }
1294 
1295     if ((flags & NRFX_UARTE_RX_ENABLE_KEEP_FIFO_CONTENT) && !p_cb->rx.flush.p_buffer)
1296     {
1297         return NRFX_ERROR_FORBIDDEN;
1298     }
1299 
1300     nrfy_uarte_int_disable(p_uarte, rx_int_mask);
1301     nrfy_uarte_enable(p_uarte);
1302     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDRX);
1303     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXSTARTED);
1304     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXTO);
1305 
1306     uint32_t rt_flags = (flags & NRFX_UARTE_RX_ENABLE_CONT ? UARTE_FLAG_RX_CONT : 0) |
1307                         (flags & NRFX_UARTE_RX_ENABLE_STOP_ON_END ? UARTE_FLAG_RX_STOP_ON_END : 0) |
1308                         (flags & NRFX_UARTE_RX_ENABLE_KEEP_FIFO_CONTENT ?
1309                                                 UARTE_FLAG_RX_KEEP_FIFO_CONTENT : 0);
1310 
1311     NRFX_ATOMIC_FETCH_OR(&p_cb->flags, rt_flags);
1312 
1313     if ((p_cb->rx.curr.p_buffer == NULL) && p_cb->handler)
1314     {
1315         user_handler(p_cb, NRFX_UARTE_EVT_RX_BUF_REQUEST);
1316     }
1317 
1318     // Expecting to get buffer set as a response to the request.
1319     if (p_cb->rx.curr.p_buffer == NULL)
1320     {
1321         // If RX is not active then it means that reception is already stopped.
1322         // It may happen if RX flush had enough data to fill the user buffer.
1323         // In that case reception is disabled from the context of RX buffer set function.
1324         if (!is_rx_active(p_cb))
1325         {
1326             return NRFX_SUCCESS;
1327         }
1328 
1329         release_rx(p_cb);
1330         return NRFX_ERROR_NO_MEM;
1331     }
1332 
1333     if (nrfy_uarte_int_enable_check(p_uarte, NRF_UARTE_INT_RXDRDY_MASK) && p_cb->handler &&
1334         (p_cb->flags & UARTE_FLAG_RX_FROM_FLUSH))
1335     {
1336         NRFX_ATOMIC_FETCH_AND(&p_cb->flags, ~UARTE_FLAG_RX_FROM_FLUSH);
1337         user_handler(p_cb, NRFX_UARTE_EVT_RX_BYTE);
1338     }
1339 
1340     /* Check if instance is still enabled. It might get disabled at some point. */
1341     if (p_cb->flags & UARTE_FLAG_RX_ENABLED)
1342     {
1343         if (!nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_RXSTARTED))
1344         {
1345             /* Manually trigger RX if it was not yet started. */
1346             nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STARTRX);
1347         }
1348 
1349         if (p_cb->handler)
1350         {
1351             nrfy_uarte_int_enable(p_uarte, rx_int_mask);
1352         }
1353     }
1354 
1355     return NRFX_SUCCESS;
1356 }
1357 
rx_buffer_set(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb,uint8_t * p_data,size_t length)1358 static nrfx_err_t rx_buffer_set(NRF_UARTE_Type *        p_uarte,
1359                                 uarte_control_block_t * p_cb,
1360                                 uint8_t *               p_data,
1361                                 size_t                  length)
1362 {
1363     nrfx_err_t err_code = NRFX_SUCCESS;
1364 
1365     if (p_cb->rx.curr.p_buffer == NULL ||
1366         (!p_cb->handler && nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_ENDRX)))
1367     {
1368         if (p_cb->rx.curr.p_buffer)
1369         {
1370             nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDRX);
1371         }
1372 
1373         p_cb->rx.curr.p_buffer = p_data;
1374         p_cb->rx.curr.length = length;
1375 
1376         if (rx_flushed_handler(p_uarte, p_cb))
1377         {
1378             nrfy_uarte_rx_buffer_set(p_uarte,
1379                                     &p_cb->rx.curr.p_buffer[p_cb->rx.off],
1380                                     p_cb->rx.curr.length - p_cb->rx.off);
1381             if (p_cb->flags & UARTE_FLAG_RX_ENABLED)
1382             {
1383                 NRFX_ATOMIC_FETCH_AND(&p_cb->flags, ~UARTE_FLAG_RX_ABORTED);
1384                 nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STARTRX);
1385                 if (nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_RXTO))
1386                 {
1387                     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXTO);
1388                 }
1389             }
1390         }
1391     }
1392     else if (p_cb->rx.next.p_buffer == NULL)
1393     {
1394         p_cb->rx.next.p_buffer = p_data;
1395         p_cb->rx.next.length = length;
1396 
1397         nrfy_uarte_rx_buffer_set(p_uarte, p_data, length);
1398         if (p_cb->flags & UARTE_FLAG_RX_CONT)
1399         {
1400             nrfy_uarte_shorts_enable(p_uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1401         }
1402     }
1403     else
1404     {
1405         err_code = NRFX_ERROR_BUSY;
1406     }
1407 
1408     return err_code;
1409 }
1410 
get_curr_cache_buf_len(size_t cache_len,size_t len,size_t curr)1411 static size_t get_curr_cache_buf_len(size_t cache_len, size_t len, size_t curr)
1412 {
1413     if (len == 0)
1414     {
1415         return 0;
1416     }
1417 
1418     size_t rem = len - curr;
1419 
1420     return (rem > cache_len) ? NRFX_MIN(cache_len, rem / 2) : rem;
1421 }
1422 
get_cache_buf_len(nrfx_uarte_rx_cache_t * p_cache)1423 static size_t get_cache_buf_len(nrfx_uarte_rx_cache_t * p_cache)
1424 {
1425     size_t user_len = p_cache->user[0].length;
1426     size_t len = get_curr_cache_buf_len(p_cache->cache_len, user_len, p_cache->started);
1427 
1428     if (!len)
1429     {
1430         if (p_cache->user[1].length) {
1431             p_cache->started = 0;
1432             len = get_curr_cache_buf_len(p_cache->cache_len, p_cache->user[1].length, 0);
1433         }
1434     }
1435 
1436     p_cache->started += len;
1437 
1438     return len;
1439 }
1440 
nrfx_uarte_rx_buffer_set(nrfx_uarte_t const * p_instance,uint8_t * p_data,size_t length)1441 nrfx_err_t nrfx_uarte_rx_buffer_set(nrfx_uarte_t const * p_instance,
1442                                     uint8_t *            p_data,
1443                                     size_t               length)
1444 {
1445     NRFX_ASSERT(m_cb[p_instance->drv_inst_idx].state == NRFX_DRV_STATE_INITIALIZED);
1446     NRFX_ASSERT(UARTE_LENGTH_VALIDATE(p_instance->drv_inst_idx, length));
1447     NRFX_ASSERT(p_data);
1448     NRFX_ASSERT(length > 0);
1449 
1450     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
1451     NRF_UARTE_Type * p_uarte = p_instance->p_reg;
1452     bool cont = false;
1453     uint32_t int_enabled;
1454     nrfx_err_t err = NRFX_SUCCESS;
1455 
1456     int_enabled = uarte_int_lock(p_uarte);
1457 
1458     if (p_cb->flags & UARTE_FLAG_RX_ABORTED)
1459     {
1460         err = NRFX_ERROR_INVALID_STATE;
1461     }
1462     else if (!nrf_dma_accessible_check(p_uarte, p_data))
1463     {
1464         // No cache buffer provided or blocking mode, transfer cannot be handled.
1465         if (!RX_CACHE_SUPPORTED || !p_cb->rx.p_cache || !p_cb->handler)
1466         {
1467             err = NRFX_ERROR_INVALID_ADDR;
1468         }
1469         else
1470         {
1471             nrfx_uarte_rx_cache_t * p_cache = p_cb->rx.p_cache;
1472 
1473             if (!p_cache->user[0].p_buffer)
1474             {
1475                 p_cache->started = 0;
1476                 p_cache->received = 0;
1477                 p_cache->user[0].p_buffer = p_data;
1478                 p_cache->user[0].length = length;
1479                 p_data = p_cache->cache[0].p_buffer;
1480                 length = get_cache_buf_len(p_cache);
1481                 p_cache->idx = 1;
1482                 p_cache->buf_req = true;
1483                 NRFX_ATOMIC_FETCH_OR(&p_cb->flags, UARTE_FLAG_RX_USE_CACHE);
1484                 cont = true;
1485             }
1486             else if (!p_cache->user[1].p_buffer)
1487             {
1488                 p_cache->user[1].p_buffer = p_data;
1489                 p_cache->user[1].length = length;
1490                 err = NRFX_SUCCESS;
1491                 if (!p_cb->rx.next.p_buffer)
1492                 {
1493                     length = get_cache_buf_len(p_cache);
1494                     p_data = p_cache->cache[p_cache->idx++ & 0x1].p_buffer;
1495                     cont = true;
1496                 }
1497             }
1498             else
1499             {
1500                 err = NRFX_ERROR_BUSY;
1501             }
1502         }
1503     }
1504     else if (RX_CACHE_SUPPORTED && (p_cb->flags & UARTE_FLAG_RX_USE_CACHE))
1505     {
1506         // For first buffer cache was used. It is expected that following buffer will also
1507         // be cached.
1508         err = NRFX_ERROR_FORBIDDEN;
1509     }
1510     else
1511     {
1512         cont = true;
1513     }
1514 
1515     if (cont)
1516     {
1517         err = rx_buffer_set(p_uarte, p_cb, p_data, length);
1518     }
1519 
1520     uarte_int_unlock(p_uarte, int_enabled);
1521 
1522     return err;
1523 }
1524 
rx_flush(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)1525 static void rx_flush(NRF_UARTE_Type * p_uarte, uarte_control_block_t * p_cb)
1526 {
1527     if (!(p_cb->flags & UARTE_FLAG_RX_KEEP_FIFO_CONTENT))
1528     {
1529         p_cb->rx.flush.length = 0;
1530         return;
1531     }
1532 
1533     if (USE_WORKAROUND_FOR_FLUSHRX_ANOMALY)
1534     {
1535         /* There is a HW bug which results in rx amount value not being updated
1536          * when fifo was empty. However, RXSTARTED event is only set if there was any data in
1537          * the FIFO. */
1538         nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXSTARTED);
1539     }
1540 
1541     nrfy_uarte_rx_buffer_set(p_uarte, p_cb->rx.flush.p_buffer, UARTE_HW_RX_FIFO_SIZE);
1542     /* Final part of handling RXTO event is in ENDRX interrupt
1543      * handler. ENDRX is generated as a result of FLUSHRX task.
1544      */
1545     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDRX);
1546     nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_FLUSHRX);
1547     while (!nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_ENDRX))
1548     {
1549         /* empty */
1550     }
1551     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDRX);
1552 
1553     if (USE_WORKAROUND_FOR_FLUSHRX_ANOMALY)
1554     {
1555         p_cb->rx.flush.length = nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_RXSTARTED) ?
1556                                 nrfy_uarte_rx_amount_get(p_uarte) : 0;
1557     }
1558     else
1559     {
1560         p_cb->rx.flush.length = nrfy_uarte_rx_amount_get(p_uarte);
1561     }
1562     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXSTARTED);
1563 }
1564 
wait_for_rx_completion(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)1565 static void wait_for_rx_completion(NRF_UARTE_Type *        p_uarte,
1566                                    uarte_control_block_t * p_cb)
1567 {
1568     while(nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_RXTO) == false)
1569     {}
1570 
1571     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXSTARTED);
1572     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDRX);
1573     nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXTO);
1574 
1575     rx_flush(p_uarte, p_cb);
1576     disable_hw_from_rx(p_uarte);
1577 
1578     p_cb->rx.curr.p_buffer = NULL;
1579     p_cb->rx.next.p_buffer = NULL;
1580     release_rx(p_cb);
1581 }
1582 
rx_abort(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb,bool disable_all,bool sync)1583 static nrfx_err_t rx_abort(NRF_UARTE_Type *        p_uarte,
1584                            uarte_control_block_t * p_cb,
1585                            bool                    disable_all,
1586                            bool                    sync)
1587 {
1588     uint32_t flag;
1589     bool endrx_startrx = nrfy_uarte_shorts_get(p_uarte, NRF_UARTE_SHORT_ENDRX_STARTRX) != 0;
1590     uint32_t int_enabled;
1591 
1592     // We need to ensure that operation is not interrupted by the UARTE interrupt since we
1593     // are changing state flags. Otherwise interrupt may be executed with RX_ABORTED flag set
1594     // but before STOPRX task is triggered which may lead to unexpected behavior.
1595     if (!((p_cb->flags & (UARTE_FLAG_RX_ENABLED | UARTE_FLAG_RX_ABORTED)) ==
1596         UARTE_FLAG_RX_ENABLED))
1597     {
1598         // If we late with the abort we still want to clean flushed data since explicit
1599         // abort indicates that we are not interested in the data that ended up in the
1600         // FIFO.
1601         if (disable_all) {
1602             p_cb->rx.flush.length = 0;
1603         }
1604 
1605         return NRFX_ERROR_INVALID_STATE;
1606     }
1607 
1608     int_enabled = uarte_int_lock(p_uarte);
1609 
1610     if (disable_all || !endrx_startrx)
1611     {
1612         nrfy_uarte_shorts_disable(p_uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1613         flag = UARTE_FLAG_RX_STOP_ON_END | UARTE_FLAG_RX_ABORTED | UARTE_FLAG_RX_FORCED_ABORT;
1614     }
1615     else
1616     {
1617         flag = UARTE_FLAG_RX_RESTARTED;
1618     }
1619 
1620     NRFX_ATOMIC_FETCH_OR(&p_cb->flags, flag);
1621 
1622     if (sync || !p_cb->handler)
1623     {
1624         nrfy_uarte_int_disable(p_uarte, rx_int_mask);
1625         nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPRX);
1626         wait_for_rx_completion(p_uarte, p_cb);
1627         int_enabled &= ~rx_int_mask;
1628     }
1629     else
1630     {
1631         nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPRX);
1632     }
1633 
1634     uarte_int_unlock(p_uarte, int_enabled);
1635 
1636     return NRFX_SUCCESS;
1637 }
1638 
nrfx_uarte_rx_abort(nrfx_uarte_t const * p_instance,bool disable_all,bool sync)1639 nrfx_err_t nrfx_uarte_rx_abort(nrfx_uarte_t const * p_instance, bool disable_all, bool sync)
1640 {
1641     NRFX_ASSERT(p_instance);
1642     NRFX_ASSERT(m_cb[p_instance->drv_inst_idx].state == NRFX_DRV_STATE_INITIALIZED);
1643 
1644     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
1645     NRF_UARTE_Type * p_uarte = p_instance->p_reg;
1646 
1647     return rx_abort(p_uarte, p_cb, disable_all, sync);
1648 }
1649 
nrfx_uarte_rx(nrfx_uarte_t const * p_instance,uint8_t * p_data,size_t length)1650 nrfx_err_t nrfx_uarte_rx(nrfx_uarte_t const * p_instance,
1651                          uint8_t *            p_data,
1652                          size_t               length)
1653 {
1654     nrfx_err_t err_code = nrfx_uarte_rx_buffer_set(p_instance, p_data, length);
1655     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
1656 
1657     NRFX_ASSERT(p_cb->state == NRFX_DRV_STATE_INITIALIZED);
1658     NRFX_ASSERT(UARTE_LENGTH_VALIDATE(p_instance->drv_inst_idx, length));
1659     NRFX_ASSERT(p_data);
1660     NRFX_ASSERT(length > 0);
1661 
1662     if (err_code != NRFX_SUCCESS)
1663     {
1664         return err_code;
1665     }
1666 
1667     uint32_t flags = NRFX_UARTE_RX_ENABLE_CONT | NRFX_UARTE_RX_ENABLE_STOP_ON_END;
1668 
1669     err_code = nrfx_uarte_rx_enable(p_instance, flags);
1670     if (err_code != NRFX_ERROR_BUSY && err_code != NRFX_SUCCESS)
1671     {
1672         return err_code;
1673     }
1674     err_code = NRFX_SUCCESS;
1675 
1676     if (p_cb->handler == NULL)
1677     {
1678         size_t rx_amount = 0;
1679 
1680         do
1681         {
1682            err_code = nrfx_uarte_rx_ready(p_instance, &rx_amount);
1683         } while (err_code == NRFX_ERROR_BUSY);
1684 
1685         if ((err_code == NRFX_ERROR_ALREADY) || (length > rx_amount))
1686         {
1687             err_code = NRFX_ERROR_FORBIDDEN;
1688         }
1689         else
1690         {
1691             err_code = nrfx_uarte_rx_abort(p_instance, true, true);
1692             NRFX_ASSERT(err_code == NRFX_SUCCESS);
1693         }
1694     }
1695 
1696     return err_code;
1697 }
1698 
nrfx_uarte_rx_ready(nrfx_uarte_t const * p_instance,size_t * p_rx_amount)1699 nrfx_err_t nrfx_uarte_rx_ready(nrfx_uarte_t const * p_instance, size_t * p_rx_amount)
1700 {
1701     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
1702 
1703     NRFX_ASSERT(p_cb->state == NRFX_DRV_STATE_INITIALIZED);
1704 
1705     if (p_cb->handler)
1706     {
1707         return NRFX_ERROR_FORBIDDEN;
1708     }
1709 
1710     if (!nrfy_uarte_enable_check(p_instance->p_reg))
1711     {
1712         if (p_rx_amount)
1713         {
1714             *p_rx_amount = nrfy_uarte_rx_amount_get(p_instance->p_reg);
1715         }
1716 
1717         return NRFX_ERROR_ALREADY;
1718     }
1719     else if (nrfy_uarte_event_check(p_instance->p_reg, NRF_UARTE_EVENT_ENDRX))
1720     {
1721         if (p_rx_amount)
1722         {
1723             *p_rx_amount = nrfy_uarte_rx_amount_get(p_instance->p_reg);
1724         }
1725         return NRFX_SUCCESS;
1726     }
1727     else
1728     {
1729         return NRFX_ERROR_BUSY;
1730     }
1731 }
1732 
nrfx_uarte_int_trigger(nrfx_uarte_t const * p_instance)1733 nrfx_err_t nrfx_uarte_int_trigger(nrfx_uarte_t const * p_instance)
1734 {
1735     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
1736 
1737     if (!p_cb->handler)
1738     {
1739         return NRFX_ERROR_FORBIDDEN;
1740     }
1741 
1742     if (!(NRFX_ATOMIC_FETCH_OR(&p_cb->flags, UARTE_FLAG_TRIGGER) & UARTE_FLAG_TRIGGER))
1743     {
1744         NRFX_IRQ_PENDING_SET(nrfx_get_irq_number((void *)p_instance->p_reg));
1745     }
1746 
1747     return NRFX_SUCCESS;
1748 }
1749 
nrfx_uarte_errorsrc_get(nrfx_uarte_t const * p_instance)1750 uint32_t nrfx_uarte_errorsrc_get(nrfx_uarte_t const * p_instance)
1751 {
1752     NRFX_ASSERT(m_cb[p_instance->drv_inst_idx].state != NRFX_DRV_STATE_UNINITIALIZED);
1753     /* Function must be used in blocking mode only. */
1754     NRFX_ASSERT(m_cb[p_instance->drv_inst_idx].handler == NULL);
1755 
1756     nrfy_uarte_event_clear(p_instance->p_reg, NRF_UARTE_EVENT_ERROR);
1757     return nrfy_uarte_errorsrc_get_and_clear(p_instance->p_reg);
1758 }
1759 
nrfx_uarte_rx_new_data_check(nrfx_uarte_t const * p_instance)1760 bool nrfx_uarte_rx_new_data_check(nrfx_uarte_t const * p_instance)
1761 {
1762     uarte_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx];
1763     bool flushed_data = (NRFX_ATOMIC_FETCH_AND(&p_cb->flags, ~UARTE_FLAG_RX_FROM_FLUSH) &
1764                         UARTE_FLAG_RX_FROM_FLUSH) != 0;
1765 
1766     if (nrfy_uarte_event_check(p_instance->p_reg, NRF_UARTE_EVENT_RXDRDY) || flushed_data)
1767     {
1768         nrfy_uarte_event_clear(p_instance->p_reg, NRF_UARTE_EVENT_RXDRDY);
1769         return true;
1770     }
1771     return false;
1772 }
1773 
rxstarted_irq_handler(NRF_UARTE_Type * p_reg,uarte_control_block_t * p_cb)1774 static void rxstarted_irq_handler(NRF_UARTE_Type * p_reg, uarte_control_block_t * p_cb)
1775 {
1776     bool cache_used = RX_CACHE_SUPPORTED && (p_cb->flags & UARTE_FLAG_RX_USE_CACHE);
1777 
1778     // If both buffers are already setup just leave. It is possible in case
1779     // of following scenario. 1 byte was requested and received. RX done event
1780     // is generated and from that event nrfx_uarte_rx_buffer_set is called to
1781     // receive next RX. This sets current buffer. After ENDRX event is processed
1782     // then RXSTARTED event is processed (from that 1 byte transfer). From there
1783     // RX buffer request is called and next buffer is provided - both buffers are
1784     // set. Next RX is started and RXSTARTED event is triggered and we are in
1785     // the situation where both buffers are already set.
1786     if ((p_cb->rx.curr.p_buffer != NULL) && (p_cb->rx.next.p_buffer != NULL))
1787     {
1788         return;
1789     }
1790 
1791     if (!cache_used)
1792     {
1793         user_handler(p_cb, NRFX_UARTE_EVT_RX_BUF_REQUEST);
1794         return;
1795     }
1796 
1797     size_t len = get_cache_buf_len(p_cb->rx.p_cache);
1798     nrfx_uarte_rx_cache_t * p_cache = p_cb->rx.p_cache;
1799 
1800      if (len)
1801      {
1802         uint8_t * p_buf = p_cache->cache[p_cache->idx++ & 0x1].p_buffer;
1803         nrfx_err_t err = rx_buffer_set(p_reg, p_cb, p_buf, len);
1804 
1805         (void)err;
1806         NRFX_ASSERT(err == NRFX_SUCCESS);
1807     }
1808 
1809     if (p_cache->buf_req)
1810     {
1811         user_handler(p_cb, NRFX_UARTE_EVT_RX_BUF_REQUEST);
1812         p_cache->buf_req = false;
1813     }
1814 }
1815 
rxto_irq_handler(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)1816 static void rxto_irq_handler(NRF_UARTE_Type *        p_uarte,
1817                              uarte_control_block_t * p_cb)
1818 {
1819     if (p_cb->rx.curr.p_buffer)
1820     {
1821         handler_on_rx_done(p_cb, p_cb->rx.curr.p_buffer, 0, true);
1822         p_cb->rx.curr.p_buffer = NULL;
1823     }
1824 
1825     if (RX_CACHE_SUPPORTED && (p_cb->flags & UARTE_FLAG_RX_USE_CACHE))
1826     {
1827         p_cb->rx.p_cache->user[0] = (nrfy_uarte_buffer_t){ NULL, 0 };
1828     }
1829 
1830     if (!(p_cb->flags & UARTE_FLAG_RX_FORCED_ABORT)) {
1831         rx_flush(p_uarte, p_cb);
1832     }
1833 
1834     on_rx_disabled(p_uarte, p_cb, p_cb->rx.flush.length);
1835 }
1836 
endrx_irq_handler(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb,bool rxstarted)1837 static bool endrx_irq_handler(NRF_UARTE_Type *        p_uarte,
1838                               uarte_control_block_t * p_cb,
1839                               bool                    rxstarted)
1840 {
1841     size_t rx_amount = (size_t)nrfy_uarte_rx_amount_get(p_uarte);
1842     bool premature = p_cb->flags & (UARTE_FLAG_RX_RESTARTED | UARTE_FLAG_RX_ABORTED);
1843     bool aborted = false;
1844     bool late = false;
1845     uint8_t *p_buf = p_cb->rx.curr.p_buffer;
1846     size_t len = rx_amount + p_cb->rx.off;
1847 
1848     p_cb->rx.curr = p_cb->rx.next;
1849     p_cb->rx.next = (nrfy_uarte_buffer_t){ NULL, 0 };
1850     handler_on_rx_done(p_cb, p_buf, len, premature);
1851     p_cb->rx.off = 0;
1852 
1853     NRFX_CRITICAL_SECTION_ENTER();
1854 
1855     p_cb->flags &= ~UARTE_FLAG_RX_RESTARTED;
1856     nrfy_uarte_shorts_disable(p_uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
1857     if (p_cb->flags & UARTE_FLAG_RX_ABORTED)
1858     {
1859             aborted = true;
1860     }
1861     else if (p_cb->rx.curr.p_buffer == NULL)
1862     {
1863         if (p_cb->flags & UARTE_FLAG_RX_STOP_ON_END)
1864         {
1865             nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPRX);
1866             p_cb->flags |= UARTE_FLAG_RX_ABORTED;
1867         }
1868     }
1869     else if (!((p_cb->flags & UARTE_FLAG_RX_CONT) &&
1870                 (rxstarted || nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_RXSTARTED))))
1871     {
1872         // If continuous transfer is set then we expect that new RX is already started by short.
1873         // However it is possible that buffer was provided after ENDRX occurred. It can be checked
1874         // by examining RXSTARTED event. If it did not occur that indicates that second buffer did
1875         // not start but was set. It must be started manually. Usually RXSTARTED occurs almost
1876         // immediately after ENDRX (if short is set) so rxstarted variable check seems to be enough
1877         // but there are cases when RXSTARTED may be slightly delayed so if it is not set we
1878         // need to check HW register to make sure that it did not occur.
1879         nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STARTRX);
1880         if (nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_RXTO))
1881         {
1882             late = true;
1883         }
1884     }
1885 
1886     NRFX_CRITICAL_SECTION_EXIT();
1887 
1888     if (late)
1889     {
1890         nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXTO);
1891         nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_RXSTARTED);
1892         nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPRX);
1893         user_handler(p_cb, NRFX_UARTE_EVT_RX_BUF_TOO_LATE);
1894     }
1895 
1896     return aborted;
1897 }
1898 
pending_tx_handler(NRF_UARTE_Type * p_uarte,uarte_tx_data_t * p_tx)1899 static void pending_tx_handler(NRF_UARTE_Type *  p_uarte,
1900                                uarte_tx_data_t * p_tx)
1901 {
1902     /* If there is a pending tx request, it means that uart_tx()
1903      * was called when there was ongoing blocking transfer. Handling
1904      * TXSTOPPED interrupt means that blocking transfer has completed.
1905      */
1906     NRFX_ASSERT(p_tx->next.p_buffer);
1907 
1908     NRFX_CRITICAL_SECTION_ENTER();
1909 
1910     if (nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_TXSTOPPED))
1911     {
1912         p_tx->curr.p_buffer = p_tx->next.p_buffer;
1913         p_tx->next.p_buffer = NULL;
1914         p_tx->amount = -1;
1915         tx_start(p_uarte, p_tx->curr.p_buffer, p_tx->curr.length, true);
1916     }
1917 
1918     NRFX_CRITICAL_SECTION_EXIT();
1919 }
1920 
txstopped_irq_handler(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)1921 static void txstopped_irq_handler(NRF_UARTE_Type *        p_uarte,
1922                                   uarte_control_block_t * p_cb)
1923 {
1924     nrfy_uarte_int_disable(p_uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
1925 
1926     NRFX_CRITICAL_SECTION_ENTER();
1927     disable_hw_from_tx(p_uarte, p_cb);
1928     NRFX_CRITICAL_SECTION_EXIT();
1929 
1930     // If no length set, it means that it was a blocking transfer.
1931     if (p_cb->tx.curr.length == 0)
1932     {
1933         return;
1934     }
1935 
1936     // if p_buf is null it indicates that tx setup interrupted poll out and
1937     // tx buffer is pending.
1938     if (p_cb->tx.curr.p_buffer == NULL)
1939     {
1940         pending_tx_handler(p_uarte, &p_cb->tx);
1941         return;
1942     }
1943 
1944     size_t amount;
1945     bool use_cache;
1946     bool aborted = p_cb->flags & UARTE_FLAG_TX_ABORTED;
1947     NRFX_CRITICAL_SECTION_ENTER();
1948     if (p_cb->flags & UARTE_FLAG_TX_PENDING)
1949     {
1950         amount = 0;
1951         use_cache = !nrf_dma_accessible_check(p_uarte, p_cb->tx.curr.p_buffer);
1952     }
1953     else
1954     {
1955         amount = p_cb->tx.amount >= 0 ? (size_t)p_cb->tx.amount : nrfy_uarte_tx_amount_get(p_uarte);
1956         use_cache = true;
1957     }
1958     NRFX_CRITICAL_SECTION_EXIT();
1959 
1960     p_cb->tx.off += amount;
1961     if (p_cb->tx.off == p_cb->tx.curr.length || aborted)
1962     {
1963         uint32_t off = p_cb->tx.off;
1964 
1965         // Transfer completed.
1966         p_cb->flags &= ~UARTE_FLAG_TX_ABORTED;
1967         p_cb->tx.curr.length = 0;
1968         p_cb->tx.off = 0;
1969         user_handler_on_tx_done(p_cb, p_cb->tx.curr.p_buffer, off, aborted);
1970     }
1971     else
1972     {
1973         NRFX_CRITICAL_SECTION_ENTER();
1974         p_cb->flags &= ~UARTE_FLAG_TX_PENDING;
1975         tx_prepare_start(p_uarte, p_cb, use_cache);
1976         NRFX_CRITICAL_SECTION_EXIT();
1977     }
1978 }
1979 
error_irq_handler(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)1980 static void error_irq_handler(NRF_UARTE_Type *        p_uarte,
1981                               uarte_control_block_t * p_cb)
1982 {
1983     user_handler_on_error(p_uarte, p_cb);
1984 }
1985 
endtx_irq_handler(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)1986 static void endtx_irq_handler(NRF_UARTE_Type * p_uarte, uarte_control_block_t * p_cb)
1987 {
1988     if (NRFX_IS_ENABLED(NRFX_UARTE_CONFIG_TX_LINK) && (p_cb->flags & UARTE_FLAG_TX_LINKED))
1989     {
1990         uint8_t const * p_buf = p_cb->tx.curr.p_buffer;
1991         uint32_t len = p_cb->tx.curr.length;
1992         bool aborted = p_cb->flags & UARTE_FLAG_TX_ABORTED;
1993 
1994         nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDTX);
1995         // If we have no PPI connection then start the transfer ASAP.
1996         if (!nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_TXSTARTED) && !aborted)
1997         {
1998             nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STARTTX);
1999         }
2000 
2001 #if NRF_UARTE_HAS_ENDTX_STOPTX_SHORT
2002         nrfy_uarte_int_disable(p_uarte, NRF_UARTE_INT_ENDTX_MASK);
2003         nrfy_uarte_shorts_enable(p_uarte, NRF_UARTE_SHORT_ENDTX_STOPTX);
2004 #endif
2005         NRFX_CRITICAL_SECTION_ENTER();
2006         p_cb->tx.curr = p_cb->tx.next;
2007         p_cb->tx.next.length = 0;
2008         p_cb->tx.next.p_buffer = NULL;
2009         p_cb->flags &= ~UARTE_FLAG_TX_LINKED;
2010         aborted = p_cb->flags & UARTE_FLAG_TX_ABORTED;
2011         NRFX_CRITICAL_SECTION_EXIT();
2012 
2013         if (aborted)
2014         {
2015             p_cb->tx.amount = 0;
2016             len = nrfy_uarte_tx_amount_get(p_uarte);
2017         }
2018 
2019         user_handler_on_tx_done(p_cb, p_buf, len, aborted);
2020     }
2021     else
2022     {
2023         // Locking since blocking transfer can interrupt at anytime. In that case we don't
2024         // want to stop ongoing blocking transfer.
2025         NRFX_CRITICAL_SECTION_ENTER();
2026         if (nrfy_uarte_event_check(p_uarte, NRF_UARTE_EVENT_ENDTX))
2027         {
2028             nrfy_uarte_event_clear(p_uarte, NRF_UARTE_EVENT_ENDTX);
2029             nrfy_uarte_task_trigger(p_uarte, NRF_UARTE_TASK_STOPTX);
2030         }
2031         NRFX_CRITICAL_SECTION_EXIT();
2032     }
2033 }
2034 
int_trigger_handler(uarte_control_block_t * p_cb)2035 static void int_trigger_handler(uarte_control_block_t * p_cb)
2036 {
2037     user_handler(p_cb, NRFX_UARTE_EVT_TRIGGER);
2038 }
2039 
event_check_and_clear(NRF_UARTE_Type * p_uarte,nrf_uarte_event_t event,uint32_t int_mask)2040 static inline bool event_check_and_clear(NRF_UARTE_Type * p_uarte,
2041                                          nrf_uarte_event_t event,
2042                                          uint32_t int_mask)
2043 {
2044     if (nrfy_uarte_event_check(p_uarte, event) && (int_mask & NRFY_EVENT_TO_INT_BITMASK(event))) {
2045         nrfy_uarte_event_clear(p_uarte, event);
2046         return true;
2047     }
2048 
2049     return false;
2050 }
2051 
event_check(NRF_UARTE_Type * p_uarte,nrf_uarte_event_t event,uint32_t int_mask)2052 static inline bool event_check(NRF_UARTE_Type * p_uarte,
2053                                nrf_uarte_event_t event,
2054                                uint32_t int_mask)
2055 {
2056     return nrfy_uarte_event_check(p_uarte, event) &&
2057            (int_mask & NRFY_EVENT_TO_INT_BITMASK(event));
2058 }
2059 
irq_handler(NRF_UARTE_Type * p_uarte,uarte_control_block_t * p_cb)2060 static void irq_handler(NRF_UARTE_Type * p_uarte, uarte_control_block_t * p_cb)
2061 {
2062     // ENDTX must be handled before TXSTOPPED so we read event status in the reversed order of
2063     // handling.
2064     uint32_t int_mask = nrfy_uarte_int_enable_check(p_uarte, UINT32_MAX);
2065     bool txstopped = event_check(p_uarte, NRF_UARTE_EVENT_TXSTOPPED, int_mask);
2066     bool endtx =  event_check(p_uarte, NRF_UARTE_EVENT_ENDTX, int_mask);
2067 
2068     if (p_cb->handler)
2069     {
2070         if (event_check_and_clear(p_uarte, NRF_UARTE_EVENT_ERROR, int_mask))
2071         {
2072             error_irq_handler(p_uarte, p_cb);
2073         }
2074 
2075         // ENDRX must be handled before RXSTARTED. RXTO must be handled as the last one. We collect
2076         // state of all 3 events before processing to prevent reordering in case of higher interrupt
2077         // preemption. We read event status in the reversed order of handling.
2078         bool rxto = event_check_and_clear(p_uarte, NRF_UARTE_EVENT_RXTO, int_mask);
2079         bool rxstarted = event_check_and_clear(p_uarte, NRF_UARTE_EVENT_RXSTARTED, int_mask);
2080         bool endrx = event_check_and_clear(p_uarte, NRF_UARTE_EVENT_ENDRX, int_mask);
2081         bool rxdrdy = event_check_and_clear(p_uarte, NRF_UARTE_EVENT_RXDRDY, int_mask);
2082 
2083         if (rxdrdy)
2084         {
2085             user_handler(p_cb, NRFX_UARTE_EVT_RX_BYTE);
2086         }
2087 
2088         if (endrx)
2089         {
2090             // If interrupt was executed exactly when ENDRX occurred it is possible
2091             // that RXSTARTED (which is read before ENDRX) is read as false but it
2092             // actually occurred (if there is a linked reception). Read again to be sure.
2093             if (!rxstarted)
2094             {
2095                 rxstarted = event_check_and_clear(p_uarte, NRF_UARTE_EVENT_RXSTARTED, int_mask);
2096             }
2097 
2098             if (endrx_irq_handler(p_uarte, p_cb, rxstarted) == true)
2099             {
2100                 rxstarted = false;
2101             }
2102         }
2103 
2104         if (rxstarted)
2105         {
2106             rxstarted_irq_handler(p_uarte, p_cb);
2107         }
2108 
2109         if (rxto)
2110         {
2111             rxto_irq_handler(p_uarte, p_cb);
2112         }
2113     }
2114 
2115     if (endtx)
2116     {
2117         endtx_irq_handler(p_uarte, p_cb);
2118     }
2119 
2120     if (txstopped)
2121     {
2122         txstopped_irq_handler(p_uarte, p_cb);
2123     }
2124 
2125     if (NRFX_ATOMIC_FETCH_AND(&p_cb->flags, ~UARTE_FLAG_TRIGGER) & UARTE_FLAG_TRIGGER)
2126     {
2127         int_trigger_handler(p_cb);
2128     }
2129 }
2130 
2131 NRFX_INSTANCE_IRQ_HANDLERS(UARTE, uarte)
2132 
2133 #endif // NRFX_CHECK(NRFX_UARTE_ENABLED)
2134