1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/drivers/serial/uart_async_to_irq.h>
7 #include <string.h>
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(UART_ASYNC_TO_IRQ_LOG_NAME, CONFIG_UART_LOG_LEVEL);
10
11 /* Internal state flags. */
12
13 /* RX interrupt enabled. */
14 #define A2I_RX_IRQ_ENABLED BIT(0)
15
16 /* TX interrupt enabled. */
17 #define A2I_TX_IRQ_ENABLED BIT(1)
18
19 /* Error interrupt enabled. */
20 #define A2I_ERR_IRQ_ENABLED BIT(2)
21
22 /* Receiver to be kept enabled. */
23 #define A2I_RX_ENABLE BIT(3)
24
25 /* TX busy. */
26 #define A2I_TX_BUSY BIT(4)
27
28 /* Error pending. */
29 #define A2I_ERR_PENDING BIT(5)
30
get_data(const struct device * dev)31 static struct uart_async_to_irq_data *get_data(const struct device *dev)
32 {
33 struct uart_async_to_irq_data **data = dev->data;
34
35 return *data;
36 }
37
get_config(const struct device * dev)38 static const struct uart_async_to_irq_config *get_config(const struct device *dev)
39 {
40 const struct uart_async_to_irq_config * const *config = dev->config;
41
42 return *config;
43 }
44
45 /* Function calculates RX timeout based on baudrate. */
get_rx_timeout(const struct device * dev)46 static uint32_t get_rx_timeout(const struct device *dev)
47 {
48 struct uart_config cfg = { 0 };
49 int err;
50 uint32_t baudrate;
51
52 err = uart_config_get(dev, &cfg);
53 if (err == 0) {
54 baudrate = cfg.baudrate;
55 } else {
56 baudrate = get_config(dev)->baudrate;
57 }
58
59 uint32_t us = (CONFIG_UART_ASYNC_TO_INT_DRIVEN_RX_TIMEOUT * 1000000) / baudrate;
60
61 return us;
62 }
63
rx_enable(const struct device * dev,struct uart_async_to_irq_data * data,uint8_t * buf,size_t len)64 static int rx_enable(const struct device *dev,
65 struct uart_async_to_irq_data *data,
66 uint8_t *buf,
67 size_t len)
68 {
69 int err;
70 const struct uart_async_to_irq_config *config = get_config(dev);
71
72 err = config->api->rx_enable(dev, buf, len, get_rx_timeout(dev));
73
74 return err;
75 }
76
try_rx_enable(const struct device * dev,struct uart_async_to_irq_data * data)77 static int try_rx_enable(const struct device *dev, struct uart_async_to_irq_data *data)
78 {
79 uint8_t *buf = uart_async_rx_buf_req(&data->rx.async_rx);
80 size_t len = uart_async_rx_get_buf_len(&data->rx.async_rx);
81
82 if (buf == NULL) {
83 return -EBUSY;
84 }
85
86 return rx_enable(dev, data, buf, len);
87 }
88
on_rx_buf_req(const struct device * dev,const struct uart_async_to_irq_config * config,struct uart_async_to_irq_data * data)89 static void on_rx_buf_req(const struct device *dev,
90 const struct uart_async_to_irq_config *config,
91 struct uart_async_to_irq_data *data)
92 {
93 struct uart_async_rx *async_rx = &data->rx.async_rx;
94 uint8_t *buf = uart_async_rx_buf_req(async_rx);
95 size_t len = uart_async_rx_get_buf_len(async_rx);
96
97 if (buf) {
98 int err = config->api->rx_buf_rsp(dev, buf, len);
99
100 if (err < 0) {
101 uart_async_rx_on_buf_rel(async_rx, buf);
102 }
103 } else {
104 atomic_inc(&data->rx.pending_buf_req);
105 }
106 }
107
on_rx_dis(const struct device * dev,struct uart_async_to_irq_data * data)108 static void on_rx_dis(const struct device *dev, struct uart_async_to_irq_data *data)
109 {
110 if (data->flags & A2I_RX_ENABLE) {
111 int err;
112
113 if (data->rx.async_rx.pending_bytes == 0) {
114 uart_async_rx_reset(&data->rx.async_rx);
115 }
116
117 err = try_rx_enable(dev, data);
118 if (err == 0) {
119 data->rx.pending_buf_req = 0;
120 }
121
122 LOG_INST_DBG(get_config(dev)->log, "Reenabling RX from RX_DISABLED (err:%d)", err);
123 __ASSERT((err >= 0) || (err == -EBUSY), "err: %d", err);
124 return;
125 }
126
127 k_sem_give(&data->rx.sem);
128 }
129
uart_async_to_irq_callback(const struct device * dev,struct uart_event * evt,void * user_data)130 static void uart_async_to_irq_callback(const struct device *dev,
131 struct uart_event *evt,
132 void *user_data)
133 {
134 struct uart_async_to_irq_data *data = (struct uart_async_to_irq_data *)user_data;
135 const struct uart_async_to_irq_config *config = get_config(dev);
136 bool call_handler = false;
137
138 switch (evt->type) {
139 case UART_TX_DONE:
140 atomic_and(&data->flags, ~A2I_TX_BUSY);
141 call_handler = data->flags & A2I_TX_IRQ_ENABLED;
142 break;
143 case UART_RX_RDY:
144 uart_async_rx_on_rdy(&data->rx.async_rx, evt->data.rx.buf, evt->data.rx.len);
145 call_handler = data->flags & A2I_RX_IRQ_ENABLED;
146 break;
147 case UART_RX_BUF_REQUEST:
148 on_rx_buf_req(dev, config, data);
149 break;
150 case UART_RX_BUF_RELEASED:
151 uart_async_rx_on_buf_rel(&data->rx.async_rx, evt->data.rx_buf.buf);
152 break;
153 case UART_RX_STOPPED:
154 atomic_or(&data->flags, A2I_ERR_PENDING);
155 call_handler = data->flags & A2I_ERR_IRQ_ENABLED;
156 break;
157 case UART_RX_DISABLED:
158 on_rx_dis(dev, data);
159 break;
160 default:
161 break;
162 }
163
164 if (data->callback && call_handler) {
165 atomic_inc(&data->irq_req);
166 config->trampoline(dev);
167 }
168 }
169
z_uart_async_to_irq_fifo_fill(const struct device * dev,const uint8_t * buf,int len)170 int z_uart_async_to_irq_fifo_fill(const struct device *dev, const uint8_t *buf, int len)
171 {
172 struct uart_async_to_irq_data *data = get_data(dev);
173 const struct uart_async_to_irq_config *config = get_config(dev);
174 int err;
175
176 len = MIN(len, data->tx.len);
177 if (atomic_or(&data->flags, A2I_TX_BUSY) & A2I_TX_BUSY) {
178 return 0;
179 }
180
181 memcpy(data->tx.buf, buf, len);
182
183 err = config->api->tx(dev, data->tx.buf, len, SYS_FOREVER_US);
184 if (err < 0) {
185 atomic_and(&data->flags, ~A2I_TX_BUSY);
186 return 0;
187 }
188
189 return len;
190 }
191
192 /** Interrupt driven FIFO read function */
z_uart_async_to_irq_fifo_read(const struct device * dev,uint8_t * buf,const int len)193 int z_uart_async_to_irq_fifo_read(const struct device *dev,
194 uint8_t *buf,
195 const int len)
196 {
197 struct uart_async_to_irq_data *data = get_data(dev);
198 const struct uart_async_to_irq_config *config = get_config(dev);
199 struct uart_async_rx *async_rx = &data->rx.async_rx;
200 size_t claim_len;
201 uint8_t *claim_buf;
202
203 claim_len = uart_async_rx_data_claim(async_rx, &claim_buf, len);
204 if (claim_len == 0) {
205 return 0;
206 }
207
208 memcpy(buf, claim_buf, claim_len);
209 bool buf_available = uart_async_rx_data_consume(async_rx, claim_len);
210
211 if (data->rx.pending_buf_req && buf_available) {
212 buf = uart_async_rx_buf_req(async_rx);
213 __ASSERT_NO_MSG(buf != NULL);
214 int err;
215 size_t rx_len = uart_async_rx_get_buf_len(async_rx);
216
217 atomic_dec(&data->rx.pending_buf_req);
218 err = config->api->rx_buf_rsp(dev, buf, rx_len);
219 if (err < 0) {
220 if (err == -EACCES) {
221 data->rx.pending_buf_req = 0;
222 err = rx_enable(dev, data, buf, rx_len);
223 }
224 if (err < 0) {
225 return err;
226 }
227 }
228 }
229
230 return (int)claim_len;
231 }
232
dir_disable(const struct device * dev,uint32_t flag)233 static void dir_disable(const struct device *dev, uint32_t flag)
234 {
235 struct uart_async_to_irq_data *data = get_data(dev);
236
237 atomic_and(&data->flags, ~flag);
238 }
239
dir_enable(const struct device * dev,uint32_t flag)240 static void dir_enable(const struct device *dev, uint32_t flag)
241 {
242 struct uart_async_to_irq_data *data = get_data(dev);
243
244 atomic_or(&data->flags, flag);
245
246 atomic_inc(&data->irq_req);
247 get_config(dev)->trampoline(dev);
248 }
249
250 /** Interrupt driven transfer enabling function */
z_uart_async_to_irq_irq_tx_enable(const struct device * dev)251 void z_uart_async_to_irq_irq_tx_enable(const struct device *dev)
252 {
253 dir_enable(dev, A2I_TX_IRQ_ENABLED);
254 }
255
256 /** Interrupt driven transfer disabling function */
z_uart_async_to_irq_irq_tx_disable(const struct device * dev)257 void z_uart_async_to_irq_irq_tx_disable(const struct device *dev)
258 {
259 dir_disable(dev, A2I_TX_IRQ_ENABLED);
260 }
261
262 /** Interrupt driven transfer ready function */
z_uart_async_to_irq_irq_tx_ready(const struct device * dev)263 int z_uart_async_to_irq_irq_tx_ready(const struct device *dev)
264 {
265 struct uart_async_to_irq_data *data = get_data(dev);
266 bool ready = (data->flags & A2I_TX_IRQ_ENABLED) && !(data->flags & A2I_TX_BUSY);
267
268 /* async API handles arbitrary sizes */
269 return ready ? data->tx.len : 0;
270 }
271
272 /** Interrupt driven receiver enabling function */
z_uart_async_to_irq_irq_rx_enable(const struct device * dev)273 void z_uart_async_to_irq_irq_rx_enable(const struct device *dev)
274 {
275 dir_enable(dev, A2I_RX_IRQ_ENABLED);
276 }
277
278 /** Interrupt driven receiver disabling function */
z_uart_async_to_irq_irq_rx_disable(const struct device * dev)279 void z_uart_async_to_irq_irq_rx_disable(const struct device *dev)
280 {
281 dir_disable(dev, A2I_RX_IRQ_ENABLED);
282 }
283
284 /** Interrupt driven transfer complete function */
z_uart_async_to_irq_irq_tx_complete(const struct device * dev)285 int z_uart_async_to_irq_irq_tx_complete(const struct device *dev)
286 {
287 return z_uart_async_to_irq_irq_tx_ready(dev) > 0 ? 1 : 0;
288 }
289
290 /** Interrupt driven receiver ready function */
z_uart_async_to_irq_irq_rx_ready(const struct device * dev)291 int z_uart_async_to_irq_irq_rx_ready(const struct device *dev)
292 {
293 struct uart_async_to_irq_data *data = get_data(dev);
294
295 return (data->flags & A2I_RX_IRQ_ENABLED) && (data->rx.async_rx.pending_bytes > 0);
296 }
297
298 /** Interrupt driven error enabling function */
z_uart_async_to_irq_irq_err_enable(const struct device * dev)299 void z_uart_async_to_irq_irq_err_enable(const struct device *dev)
300 {
301 dir_enable(dev, A2I_ERR_IRQ_ENABLED);
302 }
303
304 /** Interrupt driven error disabling function */
z_uart_async_to_irq_irq_err_disable(const struct device * dev)305 void z_uart_async_to_irq_irq_err_disable(const struct device *dev)
306 {
307 dir_disable(dev, A2I_ERR_IRQ_ENABLED);
308 }
309
310 /** Interrupt driven pending status function */
z_uart_async_to_irq_irq_is_pending(const struct device * dev)311 int z_uart_async_to_irq_irq_is_pending(const struct device *dev)
312 {
313 bool tx_rdy = z_uart_async_to_irq_irq_tx_ready(dev);
314 bool rx_rdy = z_uart_async_to_irq_irq_rx_ready(dev);
315 struct uart_async_to_irq_data *data = get_data(dev);
316 bool err_pending = atomic_and(&data->flags, ~A2I_ERR_PENDING) & A2I_ERR_PENDING;
317
318 return tx_rdy || rx_rdy || err_pending;
319 }
320
321 /** Interrupt driven interrupt update function */
z_uart_async_to_irq_irq_update(const struct device * dev)322 int z_uart_async_to_irq_irq_update(const struct device *dev)
323 {
324 return 1;
325 }
326
327 /** Set the irq callback function */
z_uart_async_to_irq_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)328 void z_uart_async_to_irq_irq_callback_set(const struct device *dev,
329 uart_irq_callback_user_data_t cb,
330 void *user_data)
331 {
332 struct uart_async_to_irq_data *data = get_data(dev);
333
334 data->callback = cb;
335 data->user_data = user_data;
336 }
337
uart_async_to_irq_rx_enable(const struct device * dev)338 int uart_async_to_irq_rx_enable(const struct device *dev)
339 {
340 struct uart_async_to_irq_data *data = get_data(dev);
341 int err;
342
343 err = try_rx_enable(dev, data);
344 if (err == 0) {
345 atomic_or(&data->flags, A2I_RX_ENABLE);
346 }
347
348 return err;
349 }
350
uart_async_to_irq_rx_disable(const struct device * dev)351 int uart_async_to_irq_rx_disable(const struct device *dev)
352 {
353 struct uart_async_to_irq_data *data = get_data(dev);
354 const struct uart_async_to_irq_config *config = get_config(dev);
355 int err;
356
357 if (atomic_and(&data->flags, ~A2I_RX_ENABLE) & A2I_RX_ENABLE) {
358 err = config->api->rx_disable(dev);
359 if (err < 0) {
360 return err;
361 }
362 k_sem_take(&data->rx.sem, K_FOREVER);
363 }
364
365 uart_async_rx_reset(&data->rx.async_rx);
366
367 return 0;
368 }
369
uart_async_to_irq_trampoline_cb(const struct device * dev)370 void uart_async_to_irq_trampoline_cb(const struct device *dev)
371 {
372 struct uart_async_to_irq_data *data = get_data(dev);
373
374 do {
375 data->callback(dev, data->user_data);
376 } while (atomic_dec(&data->irq_req) > 1);
377 }
378
uart_async_to_irq_init(const struct device * dev)379 int uart_async_to_irq_init(const struct device *dev)
380 {
381 struct uart_async_to_irq_data *data = get_data(dev);
382 const struct uart_async_to_irq_config *config = get_config(dev);
383 int err;
384
385 data->tx.buf = config->tx_buf;
386 data->tx.len = config->tx_len;
387
388 k_sem_init(&data->rx.sem, 0, 1);
389
390 err = config->api->callback_set(dev, uart_async_to_irq_callback, data);
391 if (err < 0) {
392 return err;
393 }
394
395 return uart_async_rx_init(&data->rx.async_rx, &config->async_rx);
396 }
397