1 /*
2  * Copyright (c) 2023 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/drivers/serial/uart_async_to_irq.h>
7 #include <string.h>
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(UART_ASYNC_TO_IRQ_LOG_NAME, CONFIG_UART_LOG_LEVEL);
10 
11 /* Internal state flags. */
12 
13 /* RX interrupt enabled. */
14 #define A2I_RX_IRQ_ENABLED	BIT(0)
15 
16 /* TX interrupt enabled. */
17 #define A2I_TX_IRQ_ENABLED	BIT(1)
18 
19 /* Error interrupt enabled. */
20 #define A2I_ERR_IRQ_ENABLED	BIT(2)
21 
22 /* Receiver to be kept enabled. */
23 #define A2I_RX_ENABLE		BIT(3)
24 
25 /* TX busy. */
26 #define A2I_TX_BUSY		BIT(4)
27 
28 /* Error pending. */
29 #define A2I_ERR_PENDING		BIT(5)
30 
get_data(const struct device * dev)31 static struct uart_async_to_irq_data *get_data(const struct device *dev)
32 {
33 	struct uart_async_to_irq_data **data = dev->data;
34 
35 	return *data;
36 }
37 
get_config(const struct device * dev)38 static const struct uart_async_to_irq_config *get_config(const struct device *dev)
39 {
40 	const struct uart_async_to_irq_config * const *config = dev->config;
41 
42 	return *config;
43 }
44 
45 /* Function calculates RX timeout based on baudrate. */
get_rx_timeout(const struct device * dev)46 static uint32_t get_rx_timeout(const struct device *dev)
47 {
48 	struct uart_config cfg = { 0 };
49 	int err;
50 	uint32_t baudrate;
51 
52 	err = uart_config_get(dev, &cfg);
53 	if (err == 0) {
54 		baudrate = cfg.baudrate;
55 	} else {
56 		baudrate = get_config(dev)->baudrate;
57 	}
58 
59 	__ASSERT_NO_MSG(baudrate != 0);
60 
61 	uint32_t us = (CONFIG_UART_ASYNC_TO_INT_DRIVEN_RX_TIMEOUT * 1000000) / baudrate;
62 
63 	return us;
64 }
65 
rx_enable(const struct device * dev,struct uart_async_to_irq_data * data,uint8_t * buf,size_t len)66 static int rx_enable(const struct device *dev,
67 		     struct uart_async_to_irq_data *data,
68 		     uint8_t *buf,
69 		     size_t len)
70 {
71 	int err;
72 	const struct uart_async_to_irq_config *config = get_config(dev);
73 
74 	err = config->api->rx_enable(dev, buf, len, get_rx_timeout(dev));
75 
76 	return err;
77 }
78 
try_rx_enable(const struct device * dev,struct uart_async_to_irq_data * data)79 static int try_rx_enable(const struct device *dev, struct uart_async_to_irq_data *data)
80 {
81 	uint8_t *buf = uart_async_rx_buf_req(&data->rx.async_rx);
82 	size_t len = uart_async_rx_get_buf_len(&data->rx.async_rx);
83 
84 	if (buf == NULL) {
85 		return -EBUSY;
86 	}
87 
88 	return rx_enable(dev, data, buf, len);
89 }
90 
on_rx_buf_req(const struct device * dev,const struct uart_async_to_irq_config * config,struct uart_async_to_irq_data * data)91 static void on_rx_buf_req(const struct device *dev,
92 			  const struct uart_async_to_irq_config *config,
93 			  struct uart_async_to_irq_data *data)
94 {
95 	struct uart_async_rx *async_rx = &data->rx.async_rx;
96 	uint8_t *buf = uart_async_rx_buf_req(async_rx);
97 	size_t len = uart_async_rx_get_buf_len(async_rx);
98 
99 	if (buf) {
100 		int err = config->api->rx_buf_rsp(dev, buf, len);
101 
102 		if (err < 0) {
103 			uart_async_rx_on_buf_rel(async_rx, buf);
104 		}
105 	} else {
106 		atomic_inc(&data->rx.pending_buf_req);
107 	}
108 }
109 
on_rx_dis(const struct device * dev,struct uart_async_to_irq_data * data)110 static void on_rx_dis(const struct device *dev, struct uart_async_to_irq_data *data)
111 {
112 	if (data->flags & A2I_RX_ENABLE) {
113 		int err;
114 
115 		if (data->rx.async_rx.pending_bytes == 0) {
116 			uart_async_rx_reset(&data->rx.async_rx);
117 		}
118 
119 		err = try_rx_enable(dev, data);
120 		if (err == 0) {
121 			data->rx.pending_buf_req = 0;
122 		}
123 
124 		LOG_INST_DBG(get_config(dev)->log, "Reenabling RX from RX_DISABLED (err:%d)", err);
125 		__ASSERT((err >= 0) || (err == -EBUSY), "err: %d", err);
126 		return;
127 	}
128 
129 	k_sem_give(&data->rx.sem);
130 }
131 
uart_async_to_irq_callback(const struct device * dev,struct uart_event * evt,void * user_data)132 static void uart_async_to_irq_callback(const struct device *dev,
133 					struct uart_event *evt,
134 					void *user_data)
135 {
136 	struct uart_async_to_irq_data *data = (struct uart_async_to_irq_data *)user_data;
137 	const struct uart_async_to_irq_config *config = get_config(dev);
138 	bool call_handler = false;
139 
140 	switch (evt->type) {
141 	case UART_TX_DONE:
142 		atomic_and(&data->flags, ~A2I_TX_BUSY);
143 		call_handler = data->flags & A2I_TX_IRQ_ENABLED;
144 		break;
145 	case UART_RX_RDY:
146 		uart_async_rx_on_rdy(&data->rx.async_rx, evt->data.rx.buf, evt->data.rx.len);
147 		call_handler = data->flags & A2I_RX_IRQ_ENABLED;
148 		break;
149 	case UART_RX_BUF_REQUEST:
150 		on_rx_buf_req(dev, config, data);
151 		break;
152 	case UART_RX_BUF_RELEASED:
153 		uart_async_rx_on_buf_rel(&data->rx.async_rx, evt->data.rx_buf.buf);
154 		break;
155 	case UART_RX_STOPPED:
156 		atomic_or(&data->flags, A2I_ERR_PENDING);
157 		call_handler = data->flags & A2I_ERR_IRQ_ENABLED;
158 		break;
159 	case UART_RX_DISABLED:
160 		on_rx_dis(dev, data);
161 		break;
162 	default:
163 		break;
164 	}
165 
166 	if (data->callback && call_handler) {
167 		atomic_inc(&data->irq_req);
168 		config->trampoline(dev);
169 	}
170 }
171 
z_uart_async_to_irq_fifo_fill(const struct device * dev,const uint8_t * buf,int len)172 int z_uart_async_to_irq_fifo_fill(const struct device *dev, const uint8_t *buf, int len)
173 {
174 	struct uart_async_to_irq_data *data = get_data(dev);
175 	const struct uart_async_to_irq_config *config = get_config(dev);
176 	int err;
177 
178 	len = MIN(len, data->tx.len);
179 	if (atomic_or(&data->flags, A2I_TX_BUSY) & A2I_TX_BUSY) {
180 		return 0;
181 	}
182 
183 	memcpy(data->tx.buf, buf, len);
184 
185 	err = config->api->tx(dev, data->tx.buf, len, SYS_FOREVER_US);
186 	if (err < 0) {
187 		atomic_and(&data->flags, ~A2I_TX_BUSY);
188 		return 0;
189 	}
190 
191 	return len;
192 }
193 
194 /** Interrupt driven FIFO read function */
z_uart_async_to_irq_fifo_read(const struct device * dev,uint8_t * buf,const int len)195 int z_uart_async_to_irq_fifo_read(const struct device *dev,
196 				uint8_t *buf,
197 				const int len)
198 {
199 	struct uart_async_to_irq_data *data = get_data(dev);
200 	const struct uart_async_to_irq_config *config = get_config(dev);
201 	struct uart_async_rx *async_rx = &data->rx.async_rx;
202 	size_t claim_len;
203 	uint8_t *claim_buf;
204 
205 	claim_len = uart_async_rx_data_claim(async_rx, &claim_buf, len);
206 	if (claim_len == 0) {
207 		return 0;
208 	}
209 
210 	memcpy(buf, claim_buf, claim_len);
211 	bool buf_available = uart_async_rx_data_consume(async_rx, claim_len);
212 
213 	if (data->rx.pending_buf_req && buf_available) {
214 		buf = uart_async_rx_buf_req(async_rx);
215 		__ASSERT_NO_MSG(buf != NULL);
216 		int err;
217 		size_t rx_len = uart_async_rx_get_buf_len(async_rx);
218 
219 		atomic_dec(&data->rx.pending_buf_req);
220 		err = config->api->rx_buf_rsp(dev, buf, rx_len);
221 		if (err < 0) {
222 			if (err == -EACCES) {
223 				data->rx.pending_buf_req = 0;
224 				err = rx_enable(dev, data, buf, rx_len);
225 			}
226 			if (err < 0) {
227 				return err;
228 			}
229 		}
230 	}
231 
232 	return (int)claim_len;
233 }
234 
dir_disable(const struct device * dev,uint32_t flag)235 static void dir_disable(const struct device *dev, uint32_t flag)
236 {
237 	struct uart_async_to_irq_data *data = get_data(dev);
238 
239 	atomic_and(&data->flags, ~flag);
240 }
241 
dir_enable(const struct device * dev,uint32_t flag)242 static void dir_enable(const struct device *dev, uint32_t flag)
243 {
244 	struct uart_async_to_irq_data *data = get_data(dev);
245 
246 	atomic_or(&data->flags, flag);
247 
248 	atomic_inc(&data->irq_req);
249 	get_config(dev)->trampoline(dev);
250 }
251 
252 /** Interrupt driven transfer enabling function */
z_uart_async_to_irq_irq_tx_enable(const struct device * dev)253 void z_uart_async_to_irq_irq_tx_enable(const struct device *dev)
254 {
255 	dir_enable(dev, A2I_TX_IRQ_ENABLED);
256 }
257 
258 /** Interrupt driven transfer disabling function */
z_uart_async_to_irq_irq_tx_disable(const struct device * dev)259 void z_uart_async_to_irq_irq_tx_disable(const struct device *dev)
260 {
261 	dir_disable(dev, A2I_TX_IRQ_ENABLED);
262 }
263 
264 /** Interrupt driven transfer ready function */
z_uart_async_to_irq_irq_tx_ready(const struct device * dev)265 int z_uart_async_to_irq_irq_tx_ready(const struct device *dev)
266 {
267 	struct uart_async_to_irq_data *data = get_data(dev);
268 	bool ready = (data->flags & A2I_TX_IRQ_ENABLED) && !(data->flags & A2I_TX_BUSY);
269 
270 	/* async API handles arbitrary sizes */
271 	return ready ? data->tx.len : 0;
272 }
273 
274 /** Interrupt driven receiver enabling function */
z_uart_async_to_irq_irq_rx_enable(const struct device * dev)275 void z_uart_async_to_irq_irq_rx_enable(const struct device *dev)
276 {
277 	dir_enable(dev, A2I_RX_IRQ_ENABLED);
278 }
279 
280 /** Interrupt driven receiver disabling function */
z_uart_async_to_irq_irq_rx_disable(const struct device * dev)281 void z_uart_async_to_irq_irq_rx_disable(const struct device *dev)
282 {
283 	dir_disable(dev, A2I_RX_IRQ_ENABLED);
284 }
285 
286 /** Interrupt driven transfer complete function */
z_uart_async_to_irq_irq_tx_complete(const struct device * dev)287 int z_uart_async_to_irq_irq_tx_complete(const struct device *dev)
288 {
289 	return z_uart_async_to_irq_irq_tx_ready(dev) > 0 ? 1 : 0;
290 }
291 
292 /** Interrupt driven receiver ready function */
z_uart_async_to_irq_irq_rx_ready(const struct device * dev)293 int z_uart_async_to_irq_irq_rx_ready(const struct device *dev)
294 {
295 	struct uart_async_to_irq_data *data = get_data(dev);
296 
297 	return (data->flags & A2I_RX_IRQ_ENABLED) && (data->rx.async_rx.pending_bytes > 0);
298 }
299 
300 /** Interrupt driven error enabling function */
z_uart_async_to_irq_irq_err_enable(const struct device * dev)301 void z_uart_async_to_irq_irq_err_enable(const struct device *dev)
302 {
303 	dir_enable(dev, A2I_ERR_IRQ_ENABLED);
304 }
305 
306 /** Interrupt driven error disabling function */
z_uart_async_to_irq_irq_err_disable(const struct device * dev)307 void z_uart_async_to_irq_irq_err_disable(const struct device *dev)
308 {
309 	dir_disable(dev, A2I_ERR_IRQ_ENABLED);
310 }
311 
312 /** Interrupt driven pending status function */
z_uart_async_to_irq_irq_is_pending(const struct device * dev)313 int z_uart_async_to_irq_irq_is_pending(const struct device *dev)
314 {
315 	bool tx_rdy = z_uart_async_to_irq_irq_tx_ready(dev);
316 	bool rx_rdy = z_uart_async_to_irq_irq_rx_ready(dev);
317 	struct uart_async_to_irq_data *data = get_data(dev);
318 	bool err_pending = atomic_and(&data->flags, ~A2I_ERR_PENDING) & A2I_ERR_PENDING;
319 
320 	return tx_rdy || rx_rdy || err_pending;
321 }
322 
323 /** Interrupt driven interrupt update function */
z_uart_async_to_irq_irq_update(const struct device * dev)324 int z_uart_async_to_irq_irq_update(const struct device *dev)
325 {
326 	return 1;
327 }
328 
329 /** Set the irq callback function */
z_uart_async_to_irq_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)330 void z_uart_async_to_irq_irq_callback_set(const struct device *dev,
331 			 uart_irq_callback_user_data_t cb,
332 			 void *user_data)
333 {
334 	struct uart_async_to_irq_data *data = get_data(dev);
335 
336 	data->callback = cb;
337 	data->user_data = user_data;
338 }
339 
uart_async_to_irq_rx_enable(const struct device * dev)340 int uart_async_to_irq_rx_enable(const struct device *dev)
341 {
342 	struct uart_async_to_irq_data *data = get_data(dev);
343 	int err;
344 
345 	err = try_rx_enable(dev, data);
346 	if (err == 0) {
347 		atomic_or(&data->flags, A2I_RX_ENABLE);
348 	}
349 
350 	return err;
351 }
352 
uart_async_to_irq_rx_disable(const struct device * dev)353 int uart_async_to_irq_rx_disable(const struct device *dev)
354 {
355 	struct uart_async_to_irq_data *data = get_data(dev);
356 	const struct uart_async_to_irq_config *config = get_config(dev);
357 	int err;
358 
359 	if (atomic_and(&data->flags, ~A2I_RX_ENABLE) & A2I_RX_ENABLE) {
360 		err = config->api->rx_disable(dev);
361 		if (err < 0) {
362 			return err;
363 		}
364 		k_sem_take(&data->rx.sem, K_FOREVER);
365 	}
366 
367 	uart_async_rx_reset(&data->rx.async_rx);
368 
369 	return 0;
370 }
371 
uart_async_to_irq_trampoline_cb(const struct device * dev)372 void uart_async_to_irq_trampoline_cb(const struct device *dev)
373 {
374 	struct uart_async_to_irq_data *data = get_data(dev);
375 
376 	do {
377 		data->callback(dev, data->user_data);
378 	} while (atomic_dec(&data->irq_req) > 1);
379 }
380 
uart_async_to_irq_init(const struct device * dev)381 int uart_async_to_irq_init(const struct device *dev)
382 {
383 	struct uart_async_to_irq_data *data = get_data(dev);
384 	const struct uart_async_to_irq_config *config = get_config(dev);
385 	int err;
386 
387 	data->tx.buf = config->tx_buf;
388 	data->tx.len = config->tx_len;
389 
390 	k_sem_init(&data->rx.sem, 0, 1);
391 
392 	err = config->api->callback_set(dev, uart_async_to_irq_callback, data);
393 	if (err < 0) {
394 		return err;
395 	}
396 
397 	return uart_async_rx_init(&data->rx.async_rx, &config->async_rx);
398 }
399