1 /*
2 * Copyright (c) 2024 Croxel, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/drivers/uart.h>
9 #include <zephyr/sys/ring_buffer.h>
10 #include <zephyr/sys/atomic.h>
11 #include <zephyr/bluetooth/services/nus.h>
12
13 #define DT_DRV_COMPAT zephyr_nus_uart
14
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(uart_nus, CONFIG_UART_LOG_LEVEL);
17
18 K_THREAD_STACK_DEFINE(nus_work_queue_stack, CONFIG_UART_BT_WORKQUEUE_STACK_SIZE);
19 static struct k_work_q nus_work_queue;
20
21 #define UART_BT_MTU_INVALID 0xFFFF
22
23 struct uart_bt_data {
24 struct {
25 struct bt_nus_inst *inst;
26 struct bt_nus_cb cb;
27 atomic_t enabled;
28 } bt;
29 struct {
30 struct ring_buf *rx_ringbuf;
31 struct ring_buf *tx_ringbuf;
32 struct k_work cb_work;
33 struct k_work_delayable tx_work;
34 bool rx_irq_ena;
35 bool tx_irq_ena;
36 struct {
37 const struct device *dev;
38 uart_irq_callback_user_data_t cb;
39 void *cb_data;
40 } callback;
41 } uart;
42 };
43
bt_notif_enabled(bool enabled,void * ctx)44 static void bt_notif_enabled(bool enabled, void *ctx)
45 {
46 __ASSERT_NO_MSG(ctx);
47
48 const struct device *dev = (const struct device *)ctx;
49 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
50
51 (void)atomic_set(&dev_data->bt.enabled, enabled ? 1 : 0);
52
53 LOG_DBG("%s() - %s", __func__, enabled ? "enabled" : "disabled");
54
55 if (!ring_buf_is_empty(dev_data->uart.tx_ringbuf)) {
56 k_work_reschedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_NO_WAIT);
57 }
58 }
59
bt_received(struct bt_conn * conn,const void * data,uint16_t len,void * ctx)60 static void bt_received(struct bt_conn *conn, const void *data, uint16_t len, void *ctx)
61 {
62 __ASSERT_NO_MSG(conn);
63 __ASSERT_NO_MSG(ctx);
64 __ASSERT_NO_MSG(data);
65 __ASSERT_NO_MSG(len > 0);
66
67 const struct device *dev = (const struct device *)ctx;
68 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
69 struct ring_buf *ringbuf = dev_data->uart.rx_ringbuf;
70 uint32_t put_len;
71
72 LOG_DBG("%s() - len: %d, rx_ringbuf space %d", __func__, len, ring_buf_space_get(ringbuf));
73 LOG_HEXDUMP_DBG(data, len, "data");
74
75 put_len = ring_buf_put(ringbuf, (const uint8_t *)data, len);
76 if (put_len < len) {
77 LOG_ERR("RX Ring buffer full. received: %d, added to queue: %d", len, put_len);
78 }
79
80 k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work);
81 }
82
foreach_conn_handler_get_att_mtu(struct bt_conn * conn,void * data)83 static void foreach_conn_handler_get_att_mtu(struct bt_conn *conn, void *data)
84 {
85 uint16_t *min_att_mtu = (uint16_t *)data;
86 uint16_t conn_att_mtu = 0;
87 struct bt_conn_info conn_info;
88 int err;
89
90 err = bt_conn_get_info(conn, &conn_info);
91 if (!err && conn_info.state == BT_CONN_STATE_CONNECTED) {
92 conn_att_mtu = bt_gatt_get_uatt_mtu(conn);
93
94 if (conn_att_mtu > 0) {
95 *min_att_mtu = MIN(*min_att_mtu, conn_att_mtu);
96 }
97 }
98 }
99
get_max_chunk_size(void)100 static inline uint16_t get_max_chunk_size(void)
101 {
102 uint16_t min_att_mtu = UART_BT_MTU_INVALID;
103
104 bt_conn_foreach(BT_CONN_TYPE_LE, foreach_conn_handler_get_att_mtu, &min_att_mtu);
105
106 if (min_att_mtu == UART_BT_MTU_INVALID) {
107 /** Default ATT MTU */
108 min_att_mtu = 23;
109 }
110
111 /** ATT NTF Payload overhead: opcode (1 octet) + attribute (2 octets) */
112 return (min_att_mtu - 1 - 2);
113 }
114
cb_work_handler(struct k_work * work)115 static void cb_work_handler(struct k_work *work)
116 {
117 struct uart_bt_data *dev_data = CONTAINER_OF(work, struct uart_bt_data, uart.cb_work);
118
119 if (dev_data->uart.callback.cb) {
120 dev_data->uart.callback.cb(
121 dev_data->uart.callback.dev,
122 dev_data->uart.callback.cb_data);
123 }
124 }
125
tx_work_handler(struct k_work * work)126 static void tx_work_handler(struct k_work *work)
127 {
128 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
129 struct uart_bt_data *dev_data = CONTAINER_OF(dwork, struct uart_bt_data, uart.tx_work);
130 uint8_t *data = NULL;
131 size_t len;
132 int err;
133
134 __ASSERT_NO_MSG(dev_data);
135
136 uint16_t chunk_size = get_max_chunk_size();
137 do {
138 /** The chunk size is based on the smallest MTU among all
139 * peers, and the same chunk is sent to everyone. This avoids
140 * managing separate read pointers: one per connection.
141 */
142 len = ring_buf_get_claim(dev_data->uart.tx_ringbuf, &data, chunk_size);
143 if (len > 0) {
144 err = bt_nus_inst_send(NULL, dev_data->bt.inst, data, len);
145 if (err) {
146 LOG_ERR("Failed to send data over BT: %d", err);
147 }
148 }
149
150 ring_buf_get_finish(dev_data->uart.tx_ringbuf, len);
151 } while (len > 0 && !err);
152
153 if ((ring_buf_space_get(dev_data->uart.tx_ringbuf) > 0) && dev_data->uart.tx_irq_ena) {
154 k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work);
155 }
156 }
157
uart_bt_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)158 static int uart_bt_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len)
159 {
160 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
161 size_t wrote;
162
163 wrote = ring_buf_put(dev_data->uart.tx_ringbuf, tx_data, len);
164 if (wrote < len) {
165 LOG_WRN("Ring buffer full, drop %zd bytes", len - wrote);
166 }
167
168 if (atomic_get(&dev_data->bt.enabled)) {
169 k_work_reschedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_NO_WAIT);
170 }
171
172 return wrote;
173 }
174
uart_bt_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)175 static int uart_bt_fifo_read(const struct device *dev, uint8_t *rx_data, const int size)
176 {
177 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
178
179 return ring_buf_get(dev_data->uart.rx_ringbuf, rx_data, size);
180 }
181
uart_bt_poll_in(const struct device * dev,unsigned char * c)182 static int uart_bt_poll_in(const struct device *dev, unsigned char *c)
183 {
184 int err = uart_bt_fifo_read(dev, c, 1);
185
186 return err == 1 ? 0 : -1;
187 }
188
uart_bt_poll_out(const struct device * dev,unsigned char c)189 static void uart_bt_poll_out(const struct device *dev, unsigned char c)
190 {
191 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
192 struct ring_buf *ringbuf = dev_data->uart.tx_ringbuf;
193
194 /** Right now we're discarding data if ring-buf is full. */
195 while (!ring_buf_put(ringbuf, &c, 1)) {
196 if (k_is_in_isr() || !atomic_get(&dev_data->bt.enabled)) {
197 LOG_WRN_ONCE("Ring buffer full, discard %c", c);
198 break;
199 }
200
201 k_sleep(K_MSEC(1));
202 }
203
204 /** Don't flush the data until notifications are enabled. */
205 if (atomic_get(&dev_data->bt.enabled)) {
206 /** Delay will allow buffering some characters before transmitting
207 * data, so more than one byte is transmitted (e.g: when poll_out is
208 * called inside a for-loop).
209 */
210 k_work_schedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_MSEC(1));
211 }
212 }
213
uart_bt_irq_tx_ready(const struct device * dev)214 static int uart_bt_irq_tx_ready(const struct device *dev)
215 {
216 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
217
218 if ((ring_buf_space_get(dev_data->uart.tx_ringbuf) > 0) && dev_data->uart.tx_irq_ena) {
219 return 1;
220 }
221
222 return 0;
223 }
224
uart_bt_irq_tx_enable(const struct device * dev)225 static void uart_bt_irq_tx_enable(const struct device *dev)
226 {
227 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
228
229 dev_data->uart.tx_irq_ena = true;
230
231 if (uart_bt_irq_tx_ready(dev)) {
232 k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work);
233 }
234 }
235
uart_bt_irq_tx_disable(const struct device * dev)236 static void uart_bt_irq_tx_disable(const struct device *dev)
237 {
238 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
239
240 dev_data->uart.tx_irq_ena = false;
241 }
242
uart_bt_irq_rx_ready(const struct device * dev)243 static int uart_bt_irq_rx_ready(const struct device *dev)
244 {
245 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
246
247 if (!ring_buf_is_empty(dev_data->uart.rx_ringbuf) && dev_data->uart.rx_irq_ena) {
248 return 1;
249 }
250
251 return 0;
252 }
253
uart_bt_irq_rx_enable(const struct device * dev)254 static void uart_bt_irq_rx_enable(const struct device *dev)
255 {
256 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
257
258 dev_data->uart.rx_irq_ena = true;
259
260 k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work);
261 }
262
uart_bt_irq_rx_disable(const struct device * dev)263 static void uart_bt_irq_rx_disable(const struct device *dev)
264 {
265 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
266
267 dev_data->uart.rx_irq_ena = false;
268 }
269
uart_bt_irq_is_pending(const struct device * dev)270 static int uart_bt_irq_is_pending(const struct device *dev)
271 {
272 return uart_bt_irq_rx_ready(dev);
273 }
274
uart_bt_irq_update(const struct device * dev)275 static int uart_bt_irq_update(const struct device *dev)
276 {
277 ARG_UNUSED(dev);
278
279 return 1;
280 }
281
uart_bt_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)282 static void uart_bt_irq_callback_set(const struct device *dev,
283 uart_irq_callback_user_data_t cb,
284 void *cb_data)
285 {
286 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
287
288 dev_data->uart.callback.cb = cb;
289 dev_data->uart.callback.cb_data = cb_data;
290 }
291
292 static DEVICE_API(uart, uart_bt_driver_api) = {
293 .poll_in = uart_bt_poll_in,
294 .poll_out = uart_bt_poll_out,
295 .fifo_fill = uart_bt_fifo_fill,
296 .fifo_read = uart_bt_fifo_read,
297 .irq_tx_enable = uart_bt_irq_tx_enable,
298 .irq_tx_disable = uart_bt_irq_tx_disable,
299 .irq_tx_ready = uart_bt_irq_tx_ready,
300 .irq_rx_enable = uart_bt_irq_rx_enable,
301 .irq_rx_disable = uart_bt_irq_rx_disable,
302 .irq_rx_ready = uart_bt_irq_rx_ready,
303 .irq_is_pending = uart_bt_irq_is_pending,
304 .irq_update = uart_bt_irq_update,
305 .irq_callback_set = uart_bt_irq_callback_set,
306 };
307
uart_bt_workqueue_init(void)308 static int uart_bt_workqueue_init(void)
309 {
310 k_work_queue_init(&nus_work_queue);
311 k_work_queue_start(&nus_work_queue, nus_work_queue_stack,
312 K_THREAD_STACK_SIZEOF(nus_work_queue_stack),
313 CONFIG_UART_BT_WORKQUEUE_PRIORITY, NULL);
314
315 return 0;
316 }
317
318 /** The work-queue is shared across all instances, hence we initialize it separatedly */
319 SYS_INIT(uart_bt_workqueue_init, POST_KERNEL, CONFIG_SERIAL_INIT_PRIORITY);
320
uart_bt_init(const struct device * dev)321 static int uart_bt_init(const struct device *dev)
322 {
323 int err;
324 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data;
325
326 /** As a way to backtrace the device handle from uart_bt_data.
327 * Used in cb_work_handler.
328 */
329 dev_data->uart.callback.dev = dev;
330
331 k_work_init_delayable(&dev_data->uart.tx_work, tx_work_handler);
332 k_work_init(&dev_data->uart.cb_work, cb_work_handler);
333
334 err = bt_nus_inst_cb_register(dev_data->bt.inst, &dev_data->bt.cb, (void *)dev);
335 if (err) {
336 return err;
337 }
338
339 return 0;
340 }
341
342 #define UART_BT_RX_FIFO_SIZE(inst) (DT_INST_PROP(inst, rx_fifo_size))
343 #define UART_BT_TX_FIFO_SIZE(inst) (DT_INST_PROP(inst, tx_fifo_size))
344
345 #define UART_BT_INIT(n) \
346 \
347 BT_NUS_INST_DEFINE(bt_nus_inst_##n); \
348 \
349 RING_BUF_DECLARE(bt_nus_rx_rb_##n, UART_BT_RX_FIFO_SIZE(n)); \
350 RING_BUF_DECLARE(bt_nus_tx_rb_##n, UART_BT_TX_FIFO_SIZE(n)); \
351 \
352 static struct uart_bt_data uart_bt_data_##n = { \
353 .bt = { \
354 .inst = &bt_nus_inst_##n, \
355 .enabled = ATOMIC_INIT(0), \
356 .cb = { \
357 .notif_enabled = bt_notif_enabled, \
358 .received = bt_received, \
359 }, \
360 }, \
361 .uart = { \
362 .rx_ringbuf = &bt_nus_rx_rb_##n, \
363 .tx_ringbuf = &bt_nus_tx_rb_##n, \
364 }, \
365 }; \
366 \
367 DEVICE_DT_INST_DEFINE(n, uart_bt_init, NULL, &uart_bt_data_##n, \
368 NULL, PRE_KERNEL_1, \
369 CONFIG_SERIAL_INIT_PRIORITY, \
370 &uart_bt_driver_api);
371
372 DT_INST_FOREACH_STATUS_OKAY(UART_BT_INIT)
373