1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/drivers/serial/uart_async_to_irq.h>
7 #include <string.h>
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(UART_ASYNC_TO_IRQ_LOG_NAME, CONFIG_UART_LOG_LEVEL);
10
11 /* Internal state flags. */
12
13 /* RX interrupt enabled. */
14 #define A2I_RX_IRQ_ENABLED BIT(0)
15
16 /* TX interrupt enabled. */
17 #define A2I_TX_IRQ_ENABLED BIT(1)
18
19 /* Error interrupt enabled. */
20 #define A2I_ERR_IRQ_ENABLED BIT(2)
21
22 /* Receiver to be kept enabled. */
23 #define A2I_RX_ENABLE BIT(3)
24
25 /* TX busy. */
26 #define A2I_TX_BUSY BIT(4)
27
get_data(const struct device * dev)28 static struct uart_async_to_irq_data *get_data(const struct device *dev)
29 {
30 struct uart_async_to_irq_data **data = dev->data;
31
32 return *data;
33 }
34
get_config(const struct device * dev)35 static const struct uart_async_to_irq_config *get_config(const struct device *dev)
36 {
37 const struct uart_async_to_irq_config * const *config = dev->config;
38
39 return *config;
40 }
41
42 /* Function calculates RX timeout based on baudrate. */
get_rx_timeout(const struct device * dev)43 static uint32_t get_rx_timeout(const struct device *dev)
44 {
45 struct uart_config cfg = { 0 };
46 int err;
47 uint32_t baudrate;
48
49 err = uart_config_get(dev, &cfg);
50 if (err == 0) {
51 baudrate = cfg.baudrate;
52 } else {
53 baudrate = get_config(dev)->baudrate;
54 }
55
56 uint32_t us = (CONFIG_UART_ASYNC_TO_INT_DRIVEN_RX_TIMEOUT * 1000000) / baudrate;
57
58 return us;
59 }
60
rx_enable(const struct device * dev,struct uart_async_to_irq_data * data,uint8_t * buf,size_t len)61 static int rx_enable(const struct device *dev,
62 struct uart_async_to_irq_data *data,
63 uint8_t *buf,
64 size_t len)
65 {
66 int err;
67 const struct uart_async_to_irq_config *config = get_config(dev);
68
69 err = config->api->rx_enable(dev, buf, len, get_rx_timeout(dev));
70
71 return err;
72 }
73
try_rx_enable(const struct device * dev,struct uart_async_to_irq_data * data)74 static int try_rx_enable(const struct device *dev, struct uart_async_to_irq_data *data)
75 {
76 uint8_t *buf = uart_async_rx_buf_req(&data->rx.async_rx);
77 size_t len = uart_async_rx_get_buf_len(&data->rx.async_rx);
78
79 if (buf == NULL) {
80 return -EBUSY;
81 }
82
83 return rx_enable(dev, data, buf, len);
84 }
85
on_rx_buf_req(const struct device * dev,const struct uart_async_to_irq_config * config,struct uart_async_to_irq_data * data)86 static void on_rx_buf_req(const struct device *dev,
87 const struct uart_async_to_irq_config *config,
88 struct uart_async_to_irq_data *data)
89 {
90 struct uart_async_rx *async_rx = &data->rx.async_rx;
91 uint8_t *buf = uart_async_rx_buf_req(async_rx);
92 size_t len = uart_async_rx_get_buf_len(async_rx);
93
94 if (buf) {
95 int err = config->api->rx_buf_rsp(dev, buf, len);
96
97 if (err < 0) {
98 uart_async_rx_on_buf_rel(async_rx, buf);
99 }
100 } else {
101 atomic_inc(&data->rx.pending_buf_req);
102 }
103 }
104
on_rx_dis(const struct device * dev,struct uart_async_to_irq_data * data)105 static void on_rx_dis(const struct device *dev, struct uart_async_to_irq_data *data)
106 {
107 if (data->flags & A2I_RX_ENABLE) {
108 int err;
109
110 if (data->rx.async_rx.pending_bytes == 0) {
111 uart_async_rx_reset(&data->rx.async_rx);
112 }
113
114 err = try_rx_enable(dev, data);
115 if (err == 0) {
116 data->rx.pending_buf_req = 0;
117 }
118
119 LOG_INST_DBG(get_config(dev)->log, "Reenabling RX from RX_DISABLED (err:%d)", err);
120 __ASSERT((err >= 0) || (err == -EBUSY), "err: %d", err);
121 return;
122 }
123
124 k_sem_give(&data->rx.sem);
125 }
126
uart_async_to_irq_callback(const struct device * dev,struct uart_event * evt,void * user_data)127 static void uart_async_to_irq_callback(const struct device *dev,
128 struct uart_event *evt,
129 void *user_data)
130 {
131 struct uart_async_to_irq_data *data = (struct uart_async_to_irq_data *)user_data;
132 const struct uart_async_to_irq_config *config = get_config(dev);
133 bool call_handler = false;
134
135 switch (evt->type) {
136 case UART_TX_DONE:
137 atomic_and(&data->flags, ~A2I_TX_BUSY);
138 call_handler = data->flags & A2I_TX_IRQ_ENABLED;
139 break;
140 case UART_RX_RDY:
141 uart_async_rx_on_rdy(&data->rx.async_rx, evt->data.rx.buf, evt->data.rx.len);
142 call_handler = data->flags & A2I_RX_IRQ_ENABLED;
143 break;
144 case UART_RX_BUF_REQUEST:
145 on_rx_buf_req(dev, config, data);
146 break;
147 case UART_RX_BUF_RELEASED:
148 uart_async_rx_on_buf_rel(&data->rx.async_rx, evt->data.rx_buf.buf);
149 break;
150 case UART_RX_STOPPED:
151 call_handler = data->flags & A2I_ERR_IRQ_ENABLED;
152 break;
153 case UART_RX_DISABLED:
154 on_rx_dis(dev, data);
155 break;
156 default:
157 break;
158 }
159
160 if (data->callback && call_handler) {
161 atomic_inc(&data->irq_req);
162 config->trampoline(dev);
163 }
164 }
165
z_uart_async_to_irq_fifo_fill(const struct device * dev,const uint8_t * buf,int len)166 int z_uart_async_to_irq_fifo_fill(const struct device *dev, const uint8_t *buf, int len)
167 {
168 struct uart_async_to_irq_data *data = get_data(dev);
169 const struct uart_async_to_irq_config *config = get_config(dev);
170 int err;
171
172 len = MIN(len, data->tx.len);
173 if (atomic_or(&data->flags, A2I_TX_BUSY) & A2I_TX_BUSY) {
174 return 0;
175 }
176
177 memcpy(data->tx.buf, buf, len);
178
179 err = config->api->tx(dev, data->tx.buf, len, SYS_FOREVER_US);
180 if (err < 0) {
181 atomic_and(&data->flags, ~A2I_TX_BUSY);
182 return 0;
183 }
184
185 return len;
186 }
187
188 /** Interrupt driven FIFO read function */
z_uart_async_to_irq_fifo_read(const struct device * dev,uint8_t * buf,const int len)189 int z_uart_async_to_irq_fifo_read(const struct device *dev,
190 uint8_t *buf,
191 const int len)
192 {
193 struct uart_async_to_irq_data *data = get_data(dev);
194 const struct uart_async_to_irq_config *config = get_config(dev);
195 struct uart_async_rx *async_rx = &data->rx.async_rx;
196 size_t claim_len;
197 uint8_t *claim_buf;
198
199 claim_len = uart_async_rx_data_claim(async_rx, &claim_buf, len);
200 if (claim_len == 0) {
201 return 0;
202 }
203
204 memcpy(buf, claim_buf, claim_len);
205 bool buf_available = uart_async_rx_data_consume(async_rx, claim_len);
206
207 if (data->rx.pending_buf_req && buf_available) {
208 buf = uart_async_rx_buf_req(async_rx);
209 __ASSERT_NO_MSG(buf != NULL);
210 int err;
211 size_t rx_len = uart_async_rx_get_buf_len(async_rx);
212
213 atomic_dec(&data->rx.pending_buf_req);
214 err = config->api->rx_buf_rsp(dev, buf, rx_len);
215 if (err < 0) {
216 if (err == -EACCES) {
217 data->rx.pending_buf_req = 0;
218 err = rx_enable(dev, data, buf, rx_len);
219 }
220 if (err < 0) {
221 return err;
222 }
223 }
224 }
225
226 return (int)claim_len;
227 }
228
dir_disable(const struct device * dev,uint32_t flag)229 static void dir_disable(const struct device *dev, uint32_t flag)
230 {
231 struct uart_async_to_irq_data *data = get_data(dev);
232
233 atomic_and(&data->flags, ~flag);
234 }
235
dir_enable(const struct device * dev,uint32_t flag)236 static void dir_enable(const struct device *dev, uint32_t flag)
237 {
238 struct uart_async_to_irq_data *data = get_data(dev);
239
240 atomic_or(&data->flags, flag);
241
242 atomic_inc(&data->irq_req);
243 get_config(dev)->trampoline(dev);
244 }
245
246 /** Interrupt driven transfer enabling function */
z_uart_async_to_irq_irq_tx_enable(const struct device * dev)247 void z_uart_async_to_irq_irq_tx_enable(const struct device *dev)
248 {
249 dir_enable(dev, A2I_TX_IRQ_ENABLED);
250 }
251
252 /** Interrupt driven transfer disabling function */
z_uart_async_to_irq_irq_tx_disable(const struct device * dev)253 void z_uart_async_to_irq_irq_tx_disable(const struct device *dev)
254 {
255 dir_disable(dev, A2I_TX_IRQ_ENABLED);
256 }
257
258 /** Interrupt driven transfer ready function */
z_uart_async_to_irq_irq_tx_ready(const struct device * dev)259 int z_uart_async_to_irq_irq_tx_ready(const struct device *dev)
260 {
261 struct uart_async_to_irq_data *data = get_data(dev);
262
263 return (data->flags & A2I_TX_IRQ_ENABLED) && !(data->flags & A2I_TX_BUSY);
264 }
265
266 /** Interrupt driven receiver enabling function */
z_uart_async_to_irq_irq_rx_enable(const struct device * dev)267 void z_uart_async_to_irq_irq_rx_enable(const struct device *dev)
268 {
269 dir_enable(dev, A2I_RX_IRQ_ENABLED);
270 }
271
272 /** Interrupt driven receiver disabling function */
z_uart_async_to_irq_irq_rx_disable(const struct device * dev)273 void z_uart_async_to_irq_irq_rx_disable(const struct device *dev)
274 {
275 dir_disable(dev, A2I_RX_IRQ_ENABLED);
276 }
277
278 /** Interrupt driven transfer complete function */
z_uart_async_to_irq_irq_tx_complete(const struct device * dev)279 int z_uart_async_to_irq_irq_tx_complete(const struct device *dev)
280 {
281 return z_uart_async_to_irq_irq_tx_ready(dev);
282 }
283
284 /** Interrupt driven receiver ready function */
z_uart_async_to_irq_irq_rx_ready(const struct device * dev)285 int z_uart_async_to_irq_irq_rx_ready(const struct device *dev)
286 {
287 struct uart_async_to_irq_data *data = get_data(dev);
288
289 return (data->flags & A2I_RX_IRQ_ENABLED) && (data->rx.async_rx.pending_bytes > 0);
290 }
291
292 /** Interrupt driven error enabling function */
z_uart_async_to_irq_irq_err_enable(const struct device * dev)293 void z_uart_async_to_irq_irq_err_enable(const struct device *dev)
294 {
295 dir_enable(dev, A2I_ERR_IRQ_ENABLED);
296 }
297
298 /** Interrupt driven error disabling function */
z_uart_async_to_irq_irq_err_disable(const struct device * dev)299 void z_uart_async_to_irq_irq_err_disable(const struct device *dev)
300 {
301 dir_disable(dev, A2I_ERR_IRQ_ENABLED);
302 }
303
304 /** Interrupt driven pending status function */
z_uart_async_to_irq_irq_is_pending(const struct device * dev)305 int z_uart_async_to_irq_irq_is_pending(const struct device *dev)
306 {
307 return z_uart_async_to_irq_irq_tx_ready(dev) || z_uart_async_to_irq_irq_rx_ready(dev);
308 }
309
310 /** Interrupt driven interrupt update function */
z_uart_async_to_irq_irq_update(const struct device * dev)311 int z_uart_async_to_irq_irq_update(const struct device *dev)
312 {
313 return 1;
314 }
315
316 /** Set the irq callback function */
z_uart_async_to_irq_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)317 void z_uart_async_to_irq_irq_callback_set(const struct device *dev,
318 uart_irq_callback_user_data_t cb,
319 void *user_data)
320 {
321 struct uart_async_to_irq_data *data = get_data(dev);
322
323 data->callback = cb;
324 data->user_data = user_data;
325 }
326
uart_async_to_irq_rx_enable(const struct device * dev)327 int uart_async_to_irq_rx_enable(const struct device *dev)
328 {
329 struct uart_async_to_irq_data *data = get_data(dev);
330 const struct uart_async_to_irq_config *config = get_config(dev);
331 int err;
332
333 err = config->api->callback_set(dev, uart_async_to_irq_callback, data);
334 if (err < 0) {
335 return err;
336 }
337
338
339 err = try_rx_enable(dev, data);
340 if (err == 0) {
341 atomic_or(&data->flags, A2I_RX_ENABLE);
342 }
343
344 return err;
345 }
346
uart_async_to_irq_rx_disable(const struct device * dev)347 int uart_async_to_irq_rx_disable(const struct device *dev)
348 {
349 struct uart_async_to_irq_data *data = get_data(dev);
350 const struct uart_async_to_irq_config *config = get_config(dev);
351 int err;
352
353 if (atomic_and(&data->flags, ~A2I_RX_ENABLE) & A2I_RX_ENABLE) {
354 err = config->api->rx_disable(dev);
355 if (err < 0) {
356 return err;
357 }
358 k_sem_take(&data->rx.sem, K_FOREVER);
359 }
360
361 uart_async_rx_reset(&data->rx.async_rx);
362
363 return 0;
364 }
365
uart_async_to_irq_trampoline_cb(const struct device * dev)366 void uart_async_to_irq_trampoline_cb(const struct device *dev)
367 {
368 struct uart_async_to_irq_data *data = get_data(dev);
369
370 do {
371 data->callback(dev, data->user_data);
372 } while (atomic_dec(&data->irq_req) > 1);
373 }
374
uart_async_to_irq_init(struct uart_async_to_irq_data * data,const struct uart_async_to_irq_config * config)375 int uart_async_to_irq_init(struct uart_async_to_irq_data *data,
376 const struct uart_async_to_irq_config *config)
377 {
378 data->tx.buf = config->tx_buf;
379 data->tx.len = config->tx_len;
380
381 k_sem_init(&data->rx.sem, 0, 1);
382
383 return uart_async_rx_init(&data->rx.async_rx, &config->async_rx);
384 }
385