1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <string.h>
8 #include <zephyr/drivers/uart.h>
9 #include <zephyr/drivers/serial/uart_async_rx.h>
10
inc(struct uart_async_rx * rx_data,uint8_t val)11 static uint8_t inc(struct uart_async_rx *rx_data, uint8_t val)
12 {
13 return (val + 1) & (rx_data->config->buf_cnt - 1);
14 }
15
get_buf(struct uart_async_rx * rx_data,uint8_t idx)16 static struct uart_async_rx_buf *get_buf(struct uart_async_rx *rx_data, uint8_t idx)
17 {
18 uint8_t *p = rx_data->config->buffer;
19
20 p += idx * (rx_data->buf_len + sizeof(struct uart_async_rx_buf));
21
22 return (struct uart_async_rx_buf *)p;
23 }
24
uart_async_rx_buf_req(struct uart_async_rx * rx_data)25 uint8_t *uart_async_rx_buf_req(struct uart_async_rx *rx_data)
26 {
27 uint8_t *data = NULL;
28
29 if (rx_data->free_buf_cnt != 0) {
30 struct uart_async_rx_buf *buf = get_buf(rx_data, rx_data->drv_buf_idx);
31
32 data = buf->buffer;
33 rx_data->drv_buf_idx = inc(rx_data, rx_data->drv_buf_idx);
34
35 atomic_dec(&rx_data->free_buf_cnt);
36 }
37
38 return data;
39 }
40
uart_async_rx_on_rdy(struct uart_async_rx * rx_data,uint8_t * buffer,size_t length)41 void uart_async_rx_on_rdy(struct uart_async_rx *rx_data, uint8_t *buffer, size_t length)
42 {
43 /* Cannot use CONTAINER_OF because validation fails due to type mismatch:
44 * uint8_t * vs uint8_t [].
45 */
46 struct uart_async_rx_buf *rx_buf =
47 (struct uart_async_rx_buf *)(buffer - offsetof(struct uart_async_rx_buf, buffer));
48
49 rx_buf->wr_idx += length;
50 __ASSERT_NO_MSG(rx_buf->wr_idx <= rx_data->buf_len);
51
52 atomic_add(&rx_data->pending_bytes, length);
53 }
54
buf_reset(struct uart_async_rx_buf * buf)55 static void buf_reset(struct uart_async_rx_buf *buf)
56 {
57 buf->wr_idx = 0;
58 buf->completed = 0;
59 }
60
usr_rx_buf_release(struct uart_async_rx * rx_data,struct uart_async_rx_buf * buf)61 static void usr_rx_buf_release(struct uart_async_rx *rx_data, struct uart_async_rx_buf *buf)
62 {
63 buf_reset(buf);
64 rx_data->rd_idx = 0;
65 rx_data->rd_buf_idx = inc(rx_data, rx_data->rd_buf_idx);
66 atomic_inc(&rx_data->free_buf_cnt);
67 __ASSERT_NO_MSG(rx_data->free_buf_cnt <= rx_data->config->buf_cnt);
68 }
69
uart_async_rx_on_buf_rel(struct uart_async_rx * rx_data,uint8_t * buffer)70 void uart_async_rx_on_buf_rel(struct uart_async_rx *rx_data, uint8_t *buffer)
71 {
72 /* Cannot use CONTAINER_OF because validation fails due to type mismatch:
73 * uint8_t * vs uint8_t [].
74 */
75 struct uart_async_rx_buf *rx_buf =
76 (struct uart_async_rx_buf *)(buffer - offsetof(struct uart_async_rx_buf, buffer));
77
78 rx_buf->completed = 1;
79 }
80
uart_async_rx_data_claim(struct uart_async_rx * rx_data,uint8_t ** data,size_t length)81 size_t uart_async_rx_data_claim(struct uart_async_rx *rx_data, uint8_t **data, size_t length)
82 {
83 struct uart_async_rx_buf *buf;
84 int rem;
85
86 if ((rx_data->pending_bytes == 0) || (length == 0)) {
87 return 0;
88 }
89
90 do {
91 buf = get_buf(rx_data, rx_data->rd_buf_idx);
92 /* Even though buffer is released in consume phase it is possible that
93 * it is required here as well (e.g. was not completed previously).
94 */
95 if ((buf->completed == 1) && (rx_data->rd_idx == buf->wr_idx)) {
96 usr_rx_buf_release(rx_data, buf);
97 } else {
98 break;
99 }
100 } while (1);
101
102 *data = &buf->buffer[rx_data->rd_idx];
103 rem = buf->wr_idx - rx_data->rd_idx;
104
105 return MIN(length, rem);
106 }
107
uart_async_rx_data_consume(struct uart_async_rx * rx_data,size_t length)108 bool uart_async_rx_data_consume(struct uart_async_rx *rx_data, size_t length)
109 {
110 struct uart_async_rx_buf *buf = get_buf(rx_data, rx_data->rd_buf_idx);
111
112 rx_data->rd_idx += length;
113 /* Attempt to release the buffer if it is completed and all data is consumed. */
114 if ((buf->completed == 1) && (rx_data->rd_idx == buf->wr_idx)) {
115 usr_rx_buf_release(rx_data, buf);
116 }
117
118 atomic_sub(&rx_data->pending_bytes, length);
119
120 __ASSERT_NO_MSG(rx_data->rd_idx <= buf->wr_idx);
121
122 return rx_data->free_buf_cnt > 0;
123 }
124
uart_async_rx_reset(struct uart_async_rx * rx_data)125 void uart_async_rx_reset(struct uart_async_rx *rx_data)
126 {
127 rx_data->free_buf_cnt = rx_data->config->buf_cnt;
128 rx_data->rd_idx = 0;
129 rx_data->rd_buf_idx = 0;
130 rx_data->drv_buf_idx = 0;
131 rx_data->pending_bytes = 0;
132 for (uint8_t i = 0; i < rx_data->config->buf_cnt; i++) {
133 buf_reset(get_buf(rx_data, i));
134 }
135 }
136
uart_async_rx_init(struct uart_async_rx * rx_data,const struct uart_async_rx_config * config)137 int uart_async_rx_init(struct uart_async_rx *rx_data,
138 const struct uart_async_rx_config *config)
139 {
140 __ASSERT_NO_MSG(config->buf_cnt > 0);
141 __ASSERT_NO_MSG(config->length / config->buf_cnt <= UINT8_MAX);
142 memset(rx_data, 0, sizeof(*rx_data));
143 rx_data->config = config;
144 rx_data->buf_len = (config->length / config->buf_cnt) - UART_ASYNC_RX_BUF_OVERHEAD;
145
146 if (rx_data->buf_len >= BIT(7)) {
147 return -EINVAL;
148 }
149 uart_async_rx_reset(rx_data);
150
151 return 0;
152 }
153