Lines Matching +full:hw +full:- +full:rx +full:- +full:buffer +full:- +full:offset

2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
4 * SPDX-License-Identifier: Apache-2.0
72 /* Determine if any instance is using asynchronous API with HW byte counting. */
90 /* Driver supports case when all or none instances support that HW feature. */
136 * RX timeout is divided into time slabs, this define tells how many divisions
137 * should be made. More divisions - higher timeout accuracy and processor usage.
141 /* Size of hardware fifo in RX path. */
165 size_t offset; member
183 /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
194 struct uarte_async_rx rx; member
283 (_config->flags & UARTE_CFG_FLAG_LOW_POWER))
296 ((dev->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE))), \
311 /* None-zero in case of high speed instances. Baudrate is adjusted by that ratio. */
335 (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false)
339 const struct uarte_nrfx_config *config = dev->config; in get_uarte_instance()
341 return config->uarte_regs; in get_uarte_instance()
359 /** @brief Disable UARTE peripheral is not used by RX or TX.
367 * @param dis_mask Mask of direction (RX or TX) which now longer uses the UARTE instance.
371 struct uarte_nrfx_data *data = dev->data; in uarte_disable_locked()
373 data->flags &= ~dis_mask; in uarte_disable_locked()
374 if (data->flags & UARTE_FLAG_LOW_POWER) { in uarte_disable_locked()
379 const struct uarte_nrfx_config *config = dev->config; in uarte_disable_locked()
381 if (data->async && HW_RX_COUNTING_ENABLED(config)) { in uarte_disable_locked()
382 nrfx_timer_disable(&config->timer); in uarte_disable_locked()
384 data->async->rx.total_byte_cnt = 0; in uarte_disable_locked()
385 data->async->rx.total_user_byte_cnt = 0; in uarte_disable_locked()
390 const struct uarte_nrfx_config *cfg = dev->config; in uarte_disable_locked()
392 nrf_gpd_retain_pins_set(cfg->pcfg, true); in uarte_disable_locked()
408 const struct uarte_nrfx_config *config = dev->config; in uarte_nrfx_isr_int()
409 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_isr_int()
427 if (data->flags & UARTE_FLAG_POLL_OUT) { in uarte_nrfx_isr_int()
428 data->flags &= ~UARTE_FLAG_POLL_OUT; in uarte_nrfx_isr_int()
435 if (!data->int_driven) in uarte_nrfx_isr_int()
445 if (!data->int_driven) { in uarte_nrfx_isr_int()
450 data->int_driven->fifo_fill_lock = 0; in uarte_nrfx_isr_int()
451 if (!data->int_driven->tx_irq_enabled) { in uarte_nrfx_isr_int()
456 if (data->int_driven->disable_tx_irq) { in uarte_nrfx_isr_int()
457 data->int_driven->disable_tx_irq = false; in uarte_nrfx_isr_int()
469 if (data->int_driven->cb) { in uarte_nrfx_isr_int()
470 data->int_driven->cb(dev, data->int_driven->cb_data); in uarte_nrfx_isr_int()
489 const struct uarte_nrfx_config *config = dev->config; in baudrate_set()
494 return -EINVAL; in baudrate_set()
498 if (config->clock_freq > 0U) { in baudrate_set()
499 nrf_baudrate /= config->clock_freq / NRF_UARTE_BASE_FREQUENCY_16MHZ; in baudrate_set()
503 struct uarte_nrfx_data *data = dev->data; in baudrate_set()
505 data->nrf_baudrate = nrf_baudrate; in baudrate_set()
516 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_configure()
520 switch (cfg->stop_bits) { in uarte_nrfx_configure()
528 return -ENOTSUP; in uarte_nrfx_configure()
531 if (cfg->stop_bits != UART_CFG_STOP_BITS_1) { in uarte_nrfx_configure()
532 return -ENOTSUP; in uarte_nrfx_configure()
536 if (cfg->data_bits != UART_CFG_DATA_BITS_8) { in uarte_nrfx_configure()
537 return -ENOTSUP; in uarte_nrfx_configure()
540 switch (cfg->flow_ctrl) { in uarte_nrfx_configure()
548 return -ENOTSUP; in uarte_nrfx_configure()
554 switch (cfg->parity) { in uarte_nrfx_configure()
568 return -ENOTSUP; in uarte_nrfx_configure()
571 if (baudrate_set(dev, cfg->baudrate) != 0) { in uarte_nrfx_configure()
572 return -ENOTSUP; in uarte_nrfx_configure()
580 data->uart_config = *cfg; in uarte_nrfx_configure()
588 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_config_get()
590 *cfg = data->uart_config; in uarte_nrfx_config_get()
609 const struct uarte_nrfx_config *config = dev->config; in is_tx_ready()
611 bool ppi_endtx = config->flags & UARTE_CFG_FLAG_PPI_ENDTX || in is_tx_ready()
655 const struct uarte_nrfx_config *config = dev->config; in uarte_periph_enable()
656 struct uarte_nrfx_data *data = dev->data; in uarte_periph_enable()
661 nrf_gpd_retain_pins_set(config->pcfg, false); in uarte_periph_enable()
666 (data->nrf_baudrate), (config->nrf_baudrate))); in uarte_periph_enable()
670 if (data->async) { in uarte_periph_enable()
672 const nrfx_timer_t *timer = &config->timer; in uarte_periph_enable()
676 for (int i = 0; i < data->async->rx.flush_cnt; i++) { in uarte_periph_enable()
684 if (IS_ENABLED(UARTE_ANY_NONE_ASYNC) && !config->disable_rx) { in uarte_periph_enable()
685 nrf_uarte_rx_buffer_set(uarte, config->poll_in_byte, 1); in uarte_periph_enable()
689 if (data->int_driven && data->int_driven->rx_irq_enabled) { in uarte_periph_enable()
698 struct uarte_nrfx_data *data = dev->data; in uarte_enable_locked()
699 bool already_active = (data->flags & UARTE_FLAG_LOW_POWER) != 0; in uarte_enable_locked()
701 data->flags |= act_mask; in uarte_enable_locked()
715 const struct uarte_nrfx_config *config = dev->config; in tx_start()
727 if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) { in tx_start()
753 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_rx_counting_init()
754 const struct uarte_nrfx_config *cfg = dev->config; in uarte_nrfx_rx_counting_init()
760 NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg)); in uarte_nrfx_rx_counting_init()
762 uint32_t tsk_addr = nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT); in uarte_nrfx_rx_counting_init()
766 ret = nrfx_timer_init(&cfg->timer, in uarte_nrfx_rx_counting_init()
771 return -EINVAL; in uarte_nrfx_rx_counting_init()
773 nrfx_timer_clear(&cfg->timer); in uarte_nrfx_rx_counting_init()
776 ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi); in uarte_nrfx_rx_counting_init()
779 nrfx_timer_uninit(&cfg->timer); in uarte_nrfx_rx_counting_init()
780 return -EINVAL; in uarte_nrfx_rx_counting_init()
783 nrfx_gppi_channel_endpoints_setup(data->async->rx.cnt.ppi, evt_addr, tsk_addr); in uarte_nrfx_rx_counting_init()
784 nrfx_gppi_channels_enable(BIT(data->async->rx.cnt.ppi)); in uarte_nrfx_rx_counting_init()
795 struct uarte_nrfx_data *data = dev->data; in uarte_async_init()
815 k_timer_init(&data->async->rx.timer, rx_timeout, NULL); in uarte_async_init()
816 k_timer_user_data_set(&data->async->rx.timer, (void *)dev); in uarte_async_init()
817 k_timer_init(&data->async->tx.timer, tx_timeout, NULL); in uarte_async_init()
818 k_timer_user_data_set(&data->async->tx.timer, (void *)dev); in uarte_async_init()
832 data->async->tx.pending = true; in start_tx_locked()
834 data->async->tx.pending = false; in start_tx_locked()
835 data->async->tx.amount = -1; in start_tx_locked()
836 tx_start(dev, data->async->tx.xfer_buf, data->async->tx.xfer_len); in start_tx_locked()
840 /* Setup cache buffer (used for sending data outside of RAM memory).
841 * During setup data is copied to cache buffer and transfer length is set.
847 struct uarte_nrfx_data *data = dev->data; in setup_tx_cache()
848 const struct uarte_nrfx_config *config = dev->config; in setup_tx_cache()
849 size_t remaining = data->async->tx.len - data->async->tx.cache_offset; in setup_tx_cache()
857 data->async->tx.xfer_len = len; in setup_tx_cache()
858 data->async->tx.xfer_buf = config->tx_cache; in setup_tx_cache()
859 memcpy(config->tx_cache, &data->async->tx.buf[data->async->tx.cache_offset], len); in setup_tx_cache()
867 struct uarte_nrfx_data *data = dev->data; in has_hwfc()
869 return data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS; in has_hwfc()
871 const struct uarte_nrfx_config *config = dev->config; in has_hwfc()
873 return config->hw_config.hwfc == NRF_UARTE_HWFC_ENABLED; in has_hwfc()
881 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_tx()
886 if (data->async->tx.len) { in uarte_nrfx_tx()
888 return -EBUSY; in uarte_nrfx_tx()
891 data->async->tx.len = len; in uarte_nrfx_tx()
892 data->async->tx.buf = buf; in uarte_nrfx_tx()
895 data->async->tx.xfer_buf = buf; in uarte_nrfx_tx()
896 data->async->tx.xfer_len = len; in uarte_nrfx_tx()
898 data->async->tx.cache_offset = 0; in uarte_nrfx_tx()
914 return -ENOTSUP; in uarte_nrfx_tx()
925 k_timer_start(&data->async->tx.timer, K_USEC(timeout), K_NO_WAIT); in uarte_nrfx_tx()
932 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_tx_abort()
935 if (data->async->tx.buf == NULL) { in uarte_nrfx_tx_abort()
936 return -EFAULT; in uarte_nrfx_tx_abort()
939 data->async->tx.pending = false; in uarte_nrfx_tx_abort()
940 k_timer_stop(&data->async->tx.timer); in uarte_nrfx_tx_abort()
948 struct uarte_nrfx_data *data = dev->data; in user_callback()
950 if (data->async->user_callback) { in user_callback()
951 data->async->user_callback(dev, evt, data->async->user_data); in user_callback()
957 struct uarte_nrfx_data *data = dev->data; in notify_uart_rx_rdy()
960 .data.rx.buf = data->async->rx.buf, in notify_uart_rx_rdy()
961 .data.rx.len = len, in notify_uart_rx_rdy()
962 .data.rx.offset = data->async->rx.offset in notify_uart_rx_rdy()
1004 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_rx_enable()
1005 struct uarte_async_rx *async_rx = &data->async->rx; in uarte_nrfx_rx_enable()
1006 const struct uarte_nrfx_config *cfg = dev->config; in uarte_nrfx_rx_enable()
1009 if (cfg->disable_rx) { in uarte_nrfx_rx_enable()
1011 return -ENOTSUP; in uarte_nrfx_rx_enable()
1014 /* Signal error if RX is already enabled or if the driver is waiting in uarte_nrfx_rx_enable()
1016 * data from the UARTE internal RX FIFO. in uarte_nrfx_rx_enable()
1018 if (async_rx->enabled || async_rx->discard_fifo) { in uarte_nrfx_rx_enable()
1019 return -EBUSY; in uarte_nrfx_rx_enable()
1026 ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf); in uarte_nrfx_rx_enable()
1031 async_rx->usr_buf = buf; in uarte_nrfx_rx_enable()
1039 (data->uart_config.baudrate), (cfg->baudrate)); in uarte_nrfx_rx_enable()
1041 async_rx->timeout = K_USEC(timeout); in uarte_nrfx_rx_enable()
1045 async_rx->timeout = K_NO_WAIT; in uarte_nrfx_rx_enable()
1048 async_rx->timeout = (timeout == SYS_FOREVER_US) ? in uarte_nrfx_rx_enable()
1050 async_rx->idle_cnt = 0; in uarte_nrfx_rx_enable()
1053 async_rx->timeout_us = timeout; in uarte_nrfx_rx_enable()
1054 async_rx->timeout_slab = timeout / RX_TIMEOUT_DIV; in uarte_nrfx_rx_enable()
1057 async_rx->buf = buf; in uarte_nrfx_rx_enable()
1058 async_rx->buf_len = len; in uarte_nrfx_rx_enable()
1059 async_rx->offset = 0; in uarte_nrfx_rx_enable()
1060 async_rx->next_buf = NULL; in uarte_nrfx_rx_enable()
1061 async_rx->next_buf_len = 0; in uarte_nrfx_rx_enable()
1075 return -ENOTSUP; in uarte_nrfx_rx_enable()
1082 if (async_rx->flush_cnt) { in uarte_nrfx_rx_enable()
1083 int cpy_len = MIN(len, async_rx->flush_cnt); in uarte_nrfx_rx_enable()
1086 (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { in uarte_nrfx_rx_enable()
1087 sys_cache_data_invd_range(cfg->rx_flush_buf, cpy_len); in uarte_nrfx_rx_enable()
1090 memcpy(buf, cfg->rx_flush_buf, cpy_len); in uarte_nrfx_rx_enable()
1093 (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { in uarte_nrfx_rx_enable()
1098 len -= cpy_len; in uarte_nrfx_rx_enable()
1100 /* If flush content filled whole new buffer trigger interrupt in uarte_nrfx_rx_enable()
1101 * to notify about received data and disabled RX from there. in uarte_nrfx_rx_enable()
1104 async_rx->flush_cnt -= cpy_len; in uarte_nrfx_rx_enable()
1105 memmove(cfg->rx_flush_buf, &cfg->rx_flush_buf[cpy_len], in uarte_nrfx_rx_enable()
1106 async_rx->flush_cnt); in uarte_nrfx_rx_enable()
1108 (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { in uarte_nrfx_rx_enable()
1109 sys_cache_data_flush_range(cfg->rx_flush_buf, in uarte_nrfx_rx_enable()
1110 async_rx->flush_cnt); in uarte_nrfx_rx_enable()
1112 atomic_or(&data->flags, UARTE_FLAG_TRIG_RXTO); in uarte_nrfx_rx_enable()
1117 if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) { in uarte_nrfx_rx_enable()
1119 k_timer_start(&async_rx->timer, async_rx->timeout, in uarte_nrfx_rx_enable()
1132 async_rx->enabled = true; in uarte_nrfx_rx_enable()
1149 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_rx_buf_rsp()
1150 struct uarte_async_rx *async_rx = &data->async->rx; in uarte_nrfx_rx_buf_rsp()
1155 if (async_rx->buf == NULL) { in uarte_nrfx_rx_buf_rsp()
1156 err = -EACCES; in uarte_nrfx_rx_buf_rsp()
1157 } else if (async_rx->next_buf == NULL) { in uarte_nrfx_rx_buf_rsp()
1160 const struct uarte_nrfx_config *config = dev->config; in uarte_nrfx_rx_buf_rsp()
1162 err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf); in uarte_nrfx_rx_buf_rsp()
1166 async_rx->next_usr_buf = buf; in uarte_nrfx_rx_buf_rsp()
1169 async_rx->next_buf = buf; in uarte_nrfx_rx_buf_rsp()
1170 async_rx->next_buf_len = len; in uarte_nrfx_rx_buf_rsp()
1172 /* If buffer is shorter than RX FIFO then there is a risk that due in uarte_nrfx_rx_buf_rsp()
1176 * manually start RX for that buffer. Thanks to RX FIFO there is in uarte_nrfx_rx_buf_rsp()
1185 err = -EBUSY; in uarte_nrfx_rx_buf_rsp()
1197 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_callback_set()
1199 if (!data->async) { in uarte_nrfx_callback_set()
1200 return -ENOTSUP; in uarte_nrfx_callback_set()
1203 data->async->user_callback = callback; in uarte_nrfx_callback_set()
1204 data->async->user_data = user_data; in uarte_nrfx_callback_set()
1211 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_rx_disable()
1212 struct uarte_async_rx *async_rx = &data->async->rx; in uarte_nrfx_rx_disable()
1216 if (async_rx->buf == NULL) { in uarte_nrfx_rx_disable()
1217 return -EFAULT; in uarte_nrfx_rx_disable()
1220 k_timer_stop(&async_rx->timer); in uarte_nrfx_rx_disable()
1224 if (async_rx->next_buf != NULL) { in uarte_nrfx_rx_disable()
1229 async_rx->enabled = false; in uarte_nrfx_rx_disable()
1230 async_rx->discard_fifo = true; in uarte_nrfx_rx_disable()
1265 struct uarte_nrfx_data *data = dev->data; in rx_timeout()
1266 struct uarte_async_rx *async_rx = &data->async->rx; in rx_timeout()
1270 async_rx->idle_cnt = 0; in rx_timeout()
1272 async_rx->idle_cnt++; in rx_timeout()
1273 /* We compare against RX_TIMEOUT_DIV - 1 to get rather earlier timeout in rx_timeout()
1274 * than late. idle_cnt is reset when last RX activity (RXDRDY event) is in rx_timeout()
1275 * detected. It may happen that it happens when RX is inactive for whole in rx_timeout()
1276 * RX timeout period (and it is the case when transmission is short compared in rx_timeout()
1279 * then RX notification would come after (RX_TIMEOUT_DIV + 1) * timeout. in rx_timeout()
1281 if (async_rx->idle_cnt == (RX_TIMEOUT_DIV - 1)) { in rx_timeout()
1287 k_timer_start(&async_rx->timer, async_rx->timeout, K_NO_WAIT); in rx_timeout()
1290 const struct uarte_nrfx_config *cfg = dev->config; in rx_timeout()
1291 struct uarte_nrfx_data *data = dev->data; in rx_timeout()
1292 struct uarte_async_rx *async_rx = &data->async->rx; in rx_timeout()
1295 if (async_rx->is_in_irq) { in rx_timeout()
1306 read = nrfx_timer_capture(&cfg->timer, 0); in rx_timeout()
1308 read = async_rx->cnt.cnt; in rx_timeout()
1312 if (read != async_rx->total_byte_cnt) { in rx_timeout()
1313 async_rx->total_byte_cnt = read; in rx_timeout()
1314 async_rx->timeout_left = async_rx->timeout_us; in rx_timeout()
1319 * necessarily the amount available in the current buffer in rx_timeout()
1321 int32_t len = async_rx->total_byte_cnt - async_rx->total_user_byte_cnt; in rx_timeout()
1330 async_rx->cnt.cnt = async_rx->total_user_byte_cnt; in rx_timeout()
1334 /* Check for current buffer being full. in rx_timeout()
1336 * and the 'next' buffer is set up, then the SHORT between ENDRX and in rx_timeout()
1337 * STARTRX will mean that data will be going into to the 'next' buffer in rx_timeout()
1342 if (len + async_rx->offset > async_rx->buf_len) { in rx_timeout()
1343 len = async_rx->buf_len - async_rx->offset; in rx_timeout()
1348 if (clipped || (async_rx->timeout_left < async_rx->timeout_slab)) { in rx_timeout()
1350 if (async_rx->buf != NULL) { in rx_timeout()
1352 async_rx->offset += len; in rx_timeout()
1353 async_rx->total_user_byte_cnt += len; in rx_timeout()
1356 async_rx->timeout_left -= async_rx->timeout_slab; in rx_timeout()
1363 k_timer_stop(&async_rx->timer); in rx_timeout()
1388 /* For VPR cores read and write may be reordered - barrier needed. */ in error_isr()
1403 struct uarte_nrfx_data *data = dev->data; in rxstarted_isr()
1404 struct uarte_async_rx *async_rx = &data->async->rx; in rxstarted_isr()
1409 if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) { in rxstarted_isr()
1413 if (async_rx->timeout_us != SYS_FOREVER_US) { in rxstarted_isr()
1414 k_timeout_t timeout = K_USEC(async_rx->timeout_slab); in rxstarted_isr()
1416 async_rx->timeout_left = async_rx->timeout_us; in rxstarted_isr()
1417 k_timer_start(&async_rx->timer, timeout, timeout); in rxstarted_isr()
1426 struct uarte_nrfx_data *data = dev->data; in endrx_isr()
1427 struct uarte_async_rx *async_rx = &data->async->rx; in endrx_isr()
1431 async_rx->is_in_irq = true; in endrx_isr()
1434 /* ensure rx timer is stopped - it will be restarted in RXSTARTED in endrx_isr()
1437 k_timer_stop(&async_rx->timer); in endrx_isr()
1440 * buffer in endrx_isr()
1442 const int rx_amount = nrf_uarte_rx_amount_get(uarte) + async_rx->flush_cnt; in endrx_isr()
1445 const struct uarte_nrfx_config *config = dev->config; in endrx_isr()
1447 dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, rx_amount, async_rx->buf); in endrx_isr()
1451 async_rx->buf = async_rx->usr_buf; in endrx_isr()
1453 async_rx->flush_cnt = 0; in endrx_isr()
1458 * events, which can occur already for the next buffer before we are in endrx_isr()
1459 * here to handle this buffer. (The next buffer is now already active in endrx_isr()
1462 int rx_len = rx_amount - async_rx->offset; in endrx_isr()
1469 async_rx->total_user_byte_cnt += rx_len; in endrx_isr()
1477 rx_buf_release(dev, async_rx->buf); in endrx_isr()
1478 async_rx->buf = async_rx->next_buf; in endrx_isr()
1479 async_rx->buf_len = async_rx->next_buf_len; in endrx_isr()
1481 async_rx->usr_buf = async_rx->next_usr_buf; in endrx_isr()
1483 async_rx->next_buf = NULL; in endrx_isr()
1484 async_rx->next_buf_len = 0; in endrx_isr()
1485 async_rx->offset = 0; in endrx_isr()
1487 if (async_rx->enabled) { in endrx_isr()
1488 /* If there is a next buffer, then STARTRX will have already been in endrx_isr()
1489 * invoked by the short (the next buffer will be filling up already) in endrx_isr()
1490 * and here we just do the swap of which buffer the driver is following, in endrx_isr()
1495 if (async_rx->buf) { in endrx_isr()
1503 /* Remove the short until the subsequent next buffer is setup */ in endrx_isr()
1513 async_rx->is_in_irq = false; in endrx_isr()
1517 /** @brief RX FIFO flushing
1519 * Due to the HW bug which does not update RX.AMOUNT register when FIFO was empty
1530 const struct uarte_nrfx_config *config = dev->config; in rx_flush()
1533 nrf_uarte_rx_buffer_set(uarte, config->rx_flush_buf, UARTE_HW_RX_FIFO_SIZE); in rx_flush()
1550 if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE) && in rx_flush()
1552 sys_cache_data_invd_range(config->rx_flush_buf, rx_amount); in rx_flush()
1558 /* This handler is called when the receiver is stopped. If rx was aborted
1563 const struct uarte_nrfx_config *config = dev->config; in rxto_isr()
1564 struct uarte_nrfx_data *data = dev->data; in rxto_isr()
1565 struct uarte_async_rx *async_rx = &data->async->rx; in rxto_isr()
1567 if (async_rx->buf) { in rxto_isr()
1569 (void)dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, 0, async_rx->buf); in rxto_isr()
1570 async_rx->buf = async_rx->usr_buf; in rxto_isr()
1572 rx_buf_release(dev, async_rx->buf); in rxto_isr()
1573 async_rx->buf = NULL; in rxto_isr()
1577 * 1. RX is disabled because all provided RX buffers have been filled. in rxto_isr()
1578 * 2. RX was explicitly disabled by a call to uart_rx_disable(). in rxto_isr()
1579 * In both cases, the rx_enabled flag is cleared, so that RX can be in rxto_isr()
1581 * In the second case, additionally, data from the UARTE internal RX in rxto_isr()
1584 async_rx->enabled = false; in rxto_isr()
1585 if (async_rx->discard_fifo) { in rxto_isr()
1586 async_rx->discard_fifo = false; in rxto_isr()
1592 async_rx->total_user_byte_cnt += rx_flush(dev); in rxto_isr()
1596 async_rx->flush_cnt = rx_flush(dev); in rxto_isr()
1619 const struct uarte_nrfx_config *config = dev->config; in txstopped_isr()
1620 struct uarte_nrfx_data *data = dev->data; in txstopped_isr()
1626 size_t amount = (data->async->tx.amount >= 0) ? in txstopped_isr()
1627 data->async->tx.amount : nrf_uarte_tx_amount_get(uarte); in txstopped_isr()
1631 if (data->flags & UARTE_FLAG_POLL_OUT) { in txstopped_isr()
1633 data->flags &= ~UARTE_FLAG_POLL_OUT; in txstopped_isr()
1642 if (!data->async->tx.buf) { in txstopped_isr()
1650 if (data->async->tx.pending) { in txstopped_isr()
1657 /* Cache buffer is used because tx_buf wasn't in RAM. */ in txstopped_isr()
1658 if (data->async->tx.buf != data->async->tx.xfer_buf) { in txstopped_isr()
1662 if (amount == data->async->tx.xfer_len) { in txstopped_isr()
1663 data->async->tx.cache_offset += amount; in txstopped_isr()
1672 amount = data->async->tx.cache_offset; in txstopped_isr()
1675 amount += data->async->tx.cache_offset; in txstopped_isr()
1679 k_timer_stop(&data->async->tx.timer); in txstopped_isr()
1682 .data.tx.buf = data->async->tx.buf, in txstopped_isr()
1685 if (amount == data->async->tx.len) { in txstopped_isr()
1692 data->async->tx.buf = NULL; in txstopped_isr()
1693 data->async->tx.len = 0; in txstopped_isr()
1705 struct uarte_nrfx_data *data = dev->data; in rxdrdy_isr()
1710 data->async->rx.idle_cnt = 0; in rxdrdy_isr()
1711 k_timer_start(&data->async->rx.timer, data->async->rx.timeout, K_NO_WAIT); in rxdrdy_isr()
1714 data->async->rx.cnt.cnt++; in rxdrdy_isr()
1734 const struct uarte_nrfx_config *config = dev->config; in uarte_nrfx_isr_async()
1735 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_isr_async()
1736 struct uarte_async_rx *async_rx = &data->async->rx; in uarte_nrfx_isr_async()
1754 /* RXSTARTED must be handled after ENDRX because it starts the RX timeout in uarte_nrfx_isr_async()
1768 /* RXTO must be handled after ENDRX which should notify the buffer. in uarte_nrfx_isr_async()
1792 if (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO) { in uarte_nrfx_isr_async()
1796 ret = dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, async_rx->buf_len, in uarte_nrfx_isr_async()
1797 async_rx->buf); in uarte_nrfx_isr_async()
1801 async_rx->buf = async_rx->usr_buf; in uarte_nrfx_isr_async()
1803 notify_uart_rx_rdy(dev, async_rx->buf_len); in uarte_nrfx_isr_async()
1804 rx_buf_release(dev, async_rx->buf); in uarte_nrfx_isr_async()
1805 async_rx->buf_len = 0; in uarte_nrfx_isr_async()
1806 async_rx->buf = NULL; in uarte_nrfx_isr_async()
1819 * @return 0 if a character arrived, -1 if the input buffer is empty.
1823 const struct uarte_nrfx_config *config = dev->config; in uarte_nrfx_poll_in()
1827 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_poll_in()
1829 if (data->async) { in uarte_nrfx_poll_in()
1830 return -ENOTSUP; in uarte_nrfx_poll_in()
1835 return -1; in uarte_nrfx_poll_in()
1838 if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) { in uarte_nrfx_poll_in()
1839 sys_cache_data_invd_range(config->poll_in_byte, 1); in uarte_nrfx_poll_in()
1842 *c = *config->poll_in_byte; in uarte_nrfx_poll_in()
1859 const struct uarte_nrfx_config *config = dev->config; in uarte_nrfx_poll_out()
1861 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_poll_out()
1870 if (data->async && data->async->tx.len && in uarte_nrfx_poll_out()
1871 data->async->tx.amount < 0) { in uarte_nrfx_poll_out()
1872 data->async->tx.amount = nrf_uarte_tx_amount_get(uarte); in uarte_nrfx_poll_out()
1902 if (!(data->flags & UARTE_FLAG_POLL_OUT)) { in uarte_nrfx_poll_out()
1903 data->flags |= UARTE_FLAG_POLL_OUT; in uarte_nrfx_poll_out()
1912 *config->poll_out_byte = c; in uarte_nrfx_poll_out()
1913 tx_start(dev, config->poll_out_byte, 1); in uarte_nrfx_poll_out()
1925 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_fifo_fill()
1927 len = MIN(len, data->int_driven->tx_buff_size); in uarte_nrfx_fifo_fill()
1928 if (!atomic_cas(&data->int_driven->fifo_fill_lock, 0, 1)) { in uarte_nrfx_fifo_fill()
1932 /* Copy data to RAM buffer for EasyDMA transfer */ in uarte_nrfx_fifo_fill()
1933 memcpy(data->int_driven->tx_buffer, tx_data, len); in uarte_nrfx_fifo_fill()
1938 data->int_driven->fifo_fill_lock = 0; in uarte_nrfx_fifo_fill()
1941 tx_start(dev, data->int_driven->tx_buffer, len); in uarte_nrfx_fifo_fill()
1956 const struct uarte_nrfx_config *config = dev->config; in uarte_nrfx_fifo_read()
1962 if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) { in uarte_nrfx_fifo_read()
1963 sys_cache_data_invd_range(config->poll_in_byte, 1); in uarte_nrfx_fifo_read()
1967 rx_data[num_rx++] = *config->poll_in_byte; in uarte_nrfx_fifo_read()
1979 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_irq_tx_enable()
1987 data->int_driven->disable_tx_irq = false; in uarte_nrfx_irq_tx_enable()
1988 data->int_driven->tx_irq_enabled = true; in uarte_nrfx_irq_tx_enable()
1997 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_irq_tx_disable()
1999 data->int_driven->disable_tx_irq = true; in uarte_nrfx_irq_tx_disable()
2000 data->int_driven->tx_irq_enabled = false; in uarte_nrfx_irq_tx_disable()
2007 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_irq_tx_ready_complete()
2014 bool ready = data->int_driven->tx_irq_enabled && in uarte_nrfx_irq_tx_ready_complete()
2018 data->int_driven->fifo_fill_lock = 0; in uarte_nrfx_irq_tx_ready_complete()
2021 return ready ? data->int_driven->tx_buff_size : 0; in uarte_nrfx_irq_tx_ready_complete()
2088 struct uarte_nrfx_data *data = dev->data; in uarte_nrfx_irq_callback_set()
2090 data->int_driven->cb = cb; in uarte_nrfx_irq_callback_set()
2091 data->int_driven->cb_data = cb_data; in uarte_nrfx_irq_callback_set()
2135 ret = nrfx_gppi_channel_alloc(&data->ppi_ch_endtx); in endtx_stoptx_ppi_init()
2138 return -EIO; in endtx_stoptx_ppi_init()
2141 nrfx_gppi_channel_endpoints_setup(data->ppi_ch_endtx, in endtx_stoptx_ppi_init()
2144 nrfx_gppi_channels_enable(BIT(data->ppi_ch_endtx)); in endtx_stoptx_ppi_init()
2153 * - ENDTX->TXSTOPPED PPI enabled - just pend until TXSTOPPED event is set
2154 * - disable ENDTX interrupt and manually trigger STOPTX, pend for TXSTOPPED
2158 const struct uarte_nrfx_config *config = dev->config; in wait_for_tx_stopped()
2159 bool ppi_endtx = (config->flags & UARTE_CFG_FLAG_PPI_ENDTX) || in wait_for_tx_stopped()
2190 const struct uarte_nrfx_config *cfg = dev->config; in uarte_pm_resume()
2192 (void)pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); in uarte_pm_resume()
2202 const struct uarte_nrfx_config *cfg = dev->config; in uarte_pm_suspend()
2203 struct uarte_nrfx_data *data = dev->data; in uarte_pm_suspend()
2207 if (data->async) { in uarte_pm_suspend()
2211 __ASSERT_NO_MSG(!data->async->rx.enabled); in uarte_pm_suspend()
2212 __ASSERT_NO_MSG(!data->async->tx.len); in uarte_pm_suspend()
2223 if (data->async && HW_RX_COUNTING_ENABLED(cfg)) { in uarte_pm_suspend()
2224 nrfx_timer_disable(&cfg->timer); in uarte_pm_suspend()
2226 data->async->rx.total_byte_cnt = 0; in uarte_pm_suspend()
2227 data->async->rx.total_user_byte_cnt = 0; in uarte_pm_suspend()
2235 if (data->int_driven) { in uarte_pm_suspend()
2236 data->int_driven->rx_irq_enabled = in uarte_pm_suspend()
2239 if (data->int_driven->rx_irq_enabled) { in uarte_pm_suspend()
2259 nrf_gpd_retain_pins_set(cfg->pcfg, true); in uarte_pm_suspend()
2264 (void)pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP); in uarte_pm_suspend()
2274 return -ENOTSUP; in uarte_nrfx_pm_action()
2283 const struct uarte_nrfx_config *cfg = dev->config; in uarte_tx_path_init()
2290 if (cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) { in uarte_tx_path_init()
2291 struct uarte_nrfx_data *data = dev->data; in uarte_tx_path_init()
2304 * using HW if TX is active (TXSTOPPED event set means TX is inactive). in uarte_tx_path_init()
2306 * Set TXSTOPPED event by requesting fake (zero-length) transfer. in uarte_tx_path_init()
2311 nrf_uarte_tx_buffer_set(uarte, cfg->poll_out_byte, 0); in uarte_tx_path_init()
2331 const struct uarte_nrfx_config *cfg = dev->config; in uarte_instance_init()
2335 ((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)cfg->uarte_regs; in uarte_instance_init()
2339 err = uarte_nrfx_configure(dev, &((struct uarte_nrfx_data *)dev->data)->uart_config); in uarte_instance_init()
2346 nrf_uarte_baudrate_set(uarte, cfg->nrf_baudrate); in uarte_instance_init()
2347 nrf_uarte_configure(uarte, &cfg->hw_config); in uarte_instance_init()
2351 struct uarte_nrfx_data *data = dev->data; in uarte_instance_init()
2353 if (data->async) { in uarte_instance_init()