1 /* h4.c - H:4 UART based Bluetooth driver */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <errno.h>
10 #include <stddef.h>
11
12 #include <zephyr/kernel.h>
13 #include <zephyr/arch/cpu.h>
14
15 #include <zephyr/init.h>
16 #include <zephyr/drivers/uart.h>
17 #include <zephyr/sys/util.h>
18 #include <zephyr/sys/byteorder.h>
19 #include <string.h>
20
21 #include <zephyr/bluetooth/bluetooth.h>
22 #include <zephyr/bluetooth/hci.h>
23 #include <zephyr/drivers/bluetooth.h>
24
25 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
26 #include <zephyr/logging/log.h>
27 LOG_MODULE_REGISTER(bt_driver);
28
29 #include "common/bt_str.h"
30
31 #include "../util.h"
32
33 #define DT_DRV_COMPAT zephyr_bt_hci_uart
34
35 struct h4_data {
36 struct {
37 struct net_buf *buf;
38 struct k_fifo fifo;
39
40 uint16_t remaining;
41 uint16_t discard;
42
43 bool have_hdr;
44 bool discardable;
45
46 uint8_t hdr_len;
47
48 uint8_t type;
49 union {
50 struct bt_hci_evt_hdr evt;
51 struct bt_hci_acl_hdr acl;
52 struct bt_hci_iso_hdr iso;
53 uint8_t hdr[4];
54 };
55 } rx;
56
57 struct {
58 uint8_t type;
59 struct net_buf *buf;
60 struct k_fifo fifo;
61 } tx;
62
63 bt_hci_recv_t recv;
64 };
65
66 struct h4_config {
67 const struct device *uart;
68 k_thread_stack_t *rx_thread_stack;
69 size_t rx_thread_stack_size;
70 struct k_thread *rx_thread;
71 };
72
h4_get_type(const struct device * dev)73 static inline void h4_get_type(const struct device *dev)
74 {
75 const struct h4_config *cfg = dev->config;
76 struct h4_data *h4 = dev->data;
77
78 /* Get packet type */
79 if (uart_fifo_read(cfg->uart, &h4->rx.type, 1) != 1) {
80 LOG_WRN("Unable to read H:4 packet type");
81 h4->rx.type = BT_HCI_H4_NONE;
82 return;
83 }
84
85 switch (h4->rx.type) {
86 case BT_HCI_H4_EVT:
87 h4->rx.remaining = sizeof(h4->rx.evt);
88 h4->rx.hdr_len = h4->rx.remaining;
89 break;
90 case BT_HCI_H4_ACL:
91 h4->rx.remaining = sizeof(h4->rx.acl);
92 h4->rx.hdr_len = h4->rx.remaining;
93 break;
94 case BT_HCI_H4_ISO:
95 if (IS_ENABLED(CONFIG_BT_ISO)) {
96 h4->rx.remaining = sizeof(h4->rx.iso);
97 h4->rx.hdr_len = h4->rx.remaining;
98 break;
99 }
100 __fallthrough;
101 default:
102 LOG_ERR("Unknown H:4 type 0x%02x", h4->rx.type);
103 h4->rx.type = BT_HCI_H4_NONE;
104 }
105 }
106
h4_read_hdr(const struct device * dev)107 static void h4_read_hdr(const struct device *dev)
108 {
109 const struct h4_config *cfg = dev->config;
110 struct h4_data *h4 = dev->data;
111 int bytes_read = h4->rx.hdr_len - h4->rx.remaining;
112 int ret;
113
114 ret = uart_fifo_read(cfg->uart, h4->rx.hdr + bytes_read, h4->rx.remaining);
115 if (unlikely(ret < 0)) {
116 LOG_ERR("Unable to read from UART (ret %d)", ret);
117 } else {
118 h4->rx.remaining -= ret;
119 }
120 }
121
get_acl_hdr(const struct device * dev)122 static inline void get_acl_hdr(const struct device *dev)
123 {
124 struct h4_data *h4 = dev->data;
125
126 h4_read_hdr(dev);
127
128 if (!h4->rx.remaining) {
129 struct bt_hci_acl_hdr *hdr = &h4->rx.acl;
130
131 h4->rx.remaining = sys_le16_to_cpu(hdr->len);
132 LOG_DBG("Got ACL header. Payload %u bytes", h4->rx.remaining);
133 h4->rx.have_hdr = true;
134 }
135 }
136
get_iso_hdr(const struct device * dev)137 static inline void get_iso_hdr(const struct device *dev)
138 {
139 struct h4_data *h4 = dev->data;
140
141 h4_read_hdr(dev);
142
143 if (!h4->rx.remaining) {
144 struct bt_hci_iso_hdr *hdr = &h4->rx.iso;
145
146 h4->rx.remaining = bt_iso_hdr_len(sys_le16_to_cpu(hdr->len));
147 LOG_DBG("Got ISO header. Payload %u bytes", h4->rx.remaining);
148 h4->rx.have_hdr = true;
149 }
150 }
151
get_evt_hdr(const struct device * dev)152 static inline void get_evt_hdr(const struct device *dev)
153 {
154 struct h4_data *h4 = dev->data;
155
156 struct bt_hci_evt_hdr *hdr = &h4->rx.evt;
157
158 h4_read_hdr(dev);
159
160 if (h4->rx.hdr_len == sizeof(*hdr) && h4->rx.remaining < sizeof(*hdr)) {
161 switch (h4->rx.evt.evt) {
162 case BT_HCI_EVT_LE_META_EVENT:
163 h4->rx.remaining++;
164 h4->rx.hdr_len++;
165 break;
166 #if defined(CONFIG_BT_CLASSIC)
167 case BT_HCI_EVT_INQUIRY_RESULT_WITH_RSSI:
168 case BT_HCI_EVT_EXTENDED_INQUIRY_RESULT:
169 h4->rx.discardable = true;
170 break;
171 #endif
172 }
173 }
174
175 if (!h4->rx.remaining) {
176 if (h4->rx.evt.evt == BT_HCI_EVT_LE_META_EVENT &&
177 (h4->rx.hdr[sizeof(*hdr)] == BT_HCI_EVT_LE_ADVERTISING_REPORT)) {
178 LOG_DBG("Marking adv report as discardable");
179 h4->rx.discardable = true;
180 }
181
182 h4->rx.remaining = hdr->len - (h4->rx.hdr_len - sizeof(*hdr));
183 LOG_DBG("Got event header. Payload %u bytes", hdr->len);
184 h4->rx.have_hdr = true;
185 }
186 }
187
188
copy_hdr(struct h4_data * h4)189 static inline void copy_hdr(struct h4_data *h4)
190 {
191 net_buf_add_mem(h4->rx.buf, h4->rx.hdr, h4->rx.hdr_len);
192 }
193
reset_rx(struct h4_data * h4)194 static void reset_rx(struct h4_data *h4)
195 {
196 h4->rx.type = BT_HCI_H4_NONE;
197 h4->rx.remaining = 0U;
198 h4->rx.have_hdr = false;
199 h4->rx.hdr_len = 0U;
200 h4->rx.discardable = false;
201 }
202
get_rx(struct h4_data * h4,k_timeout_t timeout)203 static struct net_buf *get_rx(struct h4_data *h4, k_timeout_t timeout)
204 {
205 LOG_DBG("type 0x%02x, evt 0x%02x", h4->rx.type, h4->rx.evt.evt);
206
207 switch (h4->rx.type) {
208 case BT_HCI_H4_EVT:
209 return bt_buf_get_evt(h4->rx.evt.evt, h4->rx.discardable, timeout);
210 case BT_HCI_H4_ACL:
211 return bt_buf_get_rx(BT_BUF_ACL_IN, timeout);
212 case BT_HCI_H4_ISO:
213 if (IS_ENABLED(CONFIG_BT_ISO)) {
214 return bt_buf_get_rx(BT_BUF_ISO_IN, timeout);
215 }
216 }
217
218 return NULL;
219 }
220
rx_thread(void * p1,void * p2,void * p3)221 static void rx_thread(void *p1, void *p2, void *p3)
222 {
223 const struct device *dev = p1;
224 const struct h4_config *cfg = dev->config;
225 struct h4_data *h4 = dev->data;
226 struct net_buf *buf;
227
228 ARG_UNUSED(p2);
229 ARG_UNUSED(p3);
230
231 LOG_DBG("started");
232
233 while (1) {
234 LOG_DBG("rx.buf %p", h4->rx.buf);
235
236 /* We can only do the allocation if we know the initial
237 * header, since Command Complete/Status events must use the
238 * original command buffer (if available).
239 */
240 if (h4->rx.have_hdr && !h4->rx.buf) {
241 h4->rx.buf = get_rx(h4, K_FOREVER);
242 LOG_DBG("Got rx.buf %p", h4->rx.buf);
243 if (h4->rx.remaining > net_buf_tailroom(h4->rx.buf)) {
244 LOG_ERR("Not enough space in buffer");
245 h4->rx.discard = h4->rx.remaining;
246 reset_rx(h4);
247 } else {
248 copy_hdr(h4);
249 }
250 }
251
252 /* Let the ISR continue receiving new packets */
253 uart_irq_rx_enable(cfg->uart);
254
255 buf = net_buf_get(&h4->rx.fifo, K_FOREVER);
256 do {
257 uart_irq_rx_enable(cfg->uart);
258
259 LOG_DBG("Calling bt_recv(%p)", buf);
260 h4->recv(dev, buf);
261
262 /* Give other threads a chance to run if the ISR
263 * is receiving data so fast that rx.fifo never
264 * or very rarely goes empty.
265 */
266 k_yield();
267
268 uart_irq_rx_disable(cfg->uart);
269 buf = net_buf_get(&h4->rx.fifo, K_NO_WAIT);
270 } while (buf);
271 }
272 }
273
h4_discard(const struct device * uart,size_t len)274 static size_t h4_discard(const struct device *uart, size_t len)
275 {
276 uint8_t buf[33];
277 int err;
278
279 err = uart_fifo_read(uart, buf, MIN(len, sizeof(buf)));
280 if (unlikely(err < 0)) {
281 LOG_ERR("Unable to read from UART (err %d)", err);
282 return 0;
283 }
284
285 return err;
286 }
287
read_payload(const struct device * dev)288 static inline void read_payload(const struct device *dev)
289 {
290 const struct h4_config *cfg = dev->config;
291 struct h4_data *h4 = dev->data;
292 struct net_buf *buf;
293 int read;
294
295 if (!h4->rx.buf) {
296 size_t buf_tailroom;
297
298 h4->rx.buf = get_rx(h4, K_NO_WAIT);
299 if (!h4->rx.buf) {
300 if (h4->rx.discardable) {
301 LOG_WRN("Discarding event 0x%02x", h4->rx.evt.evt);
302 h4->rx.discard = h4->rx.remaining;
303 reset_rx(h4);
304 return;
305 }
306
307 LOG_WRN("Failed to allocate, deferring to rx_thread");
308 uart_irq_rx_disable(cfg->uart);
309 return;
310 }
311
312 LOG_DBG("Allocated rx.buf %p", h4->rx.buf);
313
314 buf_tailroom = net_buf_tailroom(h4->rx.buf);
315 if (buf_tailroom < h4->rx.remaining) {
316 LOG_ERR("Not enough space in buffer %u/%zu", h4->rx.remaining,
317 buf_tailroom);
318 h4->rx.discard = h4->rx.remaining;
319 reset_rx(h4);
320 return;
321 }
322
323 copy_hdr(h4);
324 }
325
326 read = uart_fifo_read(cfg->uart, net_buf_tail(h4->rx.buf), h4->rx.remaining);
327 if (unlikely(read < 0)) {
328 LOG_ERR("Failed to read UART (err %d)", read);
329 return;
330 }
331
332 net_buf_add(h4->rx.buf, read);
333 h4->rx.remaining -= read;
334
335 LOG_DBG("got %d bytes, remaining %u", read, h4->rx.remaining);
336 LOG_DBG("Payload (len %u): %s", h4->rx.buf->len,
337 bt_hex(h4->rx.buf->data, h4->rx.buf->len));
338
339 if (h4->rx.remaining) {
340 return;
341 }
342
343 buf = h4->rx.buf;
344 h4->rx.buf = NULL;
345
346 if (h4->rx.type == BT_HCI_H4_EVT) {
347 bt_buf_set_type(buf, BT_BUF_EVT);
348 } else {
349 bt_buf_set_type(buf, BT_BUF_ACL_IN);
350 }
351
352 reset_rx(h4);
353
354 LOG_DBG("Putting buf %p to rx fifo", buf);
355 net_buf_put(&h4->rx.fifo, buf);
356 }
357
read_header(const struct device * dev)358 static inline void read_header(const struct device *dev)
359 {
360 struct h4_data *h4 = dev->data;
361
362 switch (h4->rx.type) {
363 case BT_HCI_H4_NONE:
364 h4_get_type(dev);
365 return;
366 case BT_HCI_H4_EVT:
367 get_evt_hdr(dev);
368 break;
369 case BT_HCI_H4_ACL:
370 get_acl_hdr(dev);
371 break;
372 case BT_HCI_H4_ISO:
373 if (IS_ENABLED(CONFIG_BT_ISO)) {
374 get_iso_hdr(dev);
375 break;
376 }
377 __fallthrough;
378 default:
379 CODE_UNREACHABLE;
380 return;
381 }
382
383 if (h4->rx.have_hdr && h4->rx.buf) {
384 if (h4->rx.remaining > net_buf_tailroom(h4->rx.buf)) {
385 LOG_ERR("Not enough space in buffer");
386 h4->rx.discard = h4->rx.remaining;
387 reset_rx(h4);
388 } else {
389 copy_hdr(h4);
390 }
391 }
392 }
393
process_tx(const struct device * dev)394 static inline void process_tx(const struct device *dev)
395 {
396 const struct h4_config *cfg = dev->config;
397 struct h4_data *h4 = dev->data;
398 int bytes;
399
400 if (!h4->tx.buf) {
401 h4->tx.buf = net_buf_get(&h4->tx.fifo, K_NO_WAIT);
402 if (!h4->tx.buf) {
403 LOG_ERR("TX interrupt but no pending buffer!");
404 uart_irq_tx_disable(cfg->uart);
405 return;
406 }
407 }
408
409 if (!h4->tx.type) {
410 switch (bt_buf_get_type(h4->tx.buf)) {
411 case BT_BUF_ACL_OUT:
412 h4->tx.type = BT_HCI_H4_ACL;
413 break;
414 case BT_BUF_CMD:
415 h4->tx.type = BT_HCI_H4_CMD;
416 break;
417 case BT_BUF_ISO_OUT:
418 if (IS_ENABLED(CONFIG_BT_ISO)) {
419 h4->tx.type = BT_HCI_H4_ISO;
420 break;
421 }
422 __fallthrough;
423 default:
424 LOG_ERR("Unknown buffer type");
425 goto done;
426 }
427
428 bytes = uart_fifo_fill(cfg->uart, &h4->tx.type, 1);
429 if (bytes != 1) {
430 LOG_WRN("Unable to send H:4 type");
431 h4->tx.type = BT_HCI_H4_NONE;
432 return;
433 }
434 }
435
436 bytes = uart_fifo_fill(cfg->uart, h4->tx.buf->data, h4->tx.buf->len);
437 if (unlikely(bytes < 0)) {
438 LOG_ERR("Unable to write to UART (err %d)", bytes);
439 } else {
440 net_buf_pull(h4->tx.buf, bytes);
441 }
442
443 if (h4->tx.buf->len) {
444 return;
445 }
446
447 done:
448 h4->tx.type = BT_HCI_H4_NONE;
449 net_buf_unref(h4->tx.buf);
450 h4->tx.buf = net_buf_get(&h4->tx.fifo, K_NO_WAIT);
451 if (!h4->tx.buf) {
452 uart_irq_tx_disable(cfg->uart);
453 }
454 }
455
process_rx(const struct device * dev)456 static inline void process_rx(const struct device *dev)
457 {
458 const struct h4_config *cfg = dev->config;
459 struct h4_data *h4 = dev->data;
460
461 LOG_DBG("remaining %u discard %u have_hdr %u rx.buf %p len %u",
462 h4->rx.remaining, h4->rx.discard, h4->rx.have_hdr, h4->rx.buf,
463 h4->rx.buf ? h4->rx.buf->len : 0);
464
465 if (h4->rx.discard) {
466 h4->rx.discard -= h4_discard(cfg->uart, h4->rx.discard);
467 return;
468 }
469
470 if (h4->rx.have_hdr) {
471 read_payload(dev);
472 } else {
473 read_header(dev);
474 }
475 }
476
bt_uart_isr(const struct device * uart,void * user_data)477 static void bt_uart_isr(const struct device *uart, void *user_data)
478 {
479 struct device *dev = user_data;
480
481 while (uart_irq_update(uart) && uart_irq_is_pending(uart)) {
482 if (uart_irq_tx_ready(uart)) {
483 process_tx(dev);
484 }
485
486 if (uart_irq_rx_ready(uart)) {
487 process_rx(dev);
488 }
489 }
490 }
491
h4_send(const struct device * dev,struct net_buf * buf)492 static int h4_send(const struct device *dev, struct net_buf *buf)
493 {
494 const struct h4_config *cfg = dev->config;
495 struct h4_data *h4 = dev->data;
496
497 LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
498
499 net_buf_put(&h4->tx.fifo, buf);
500 uart_irq_tx_enable(cfg->uart);
501
502 return 0;
503 }
504
505 /** Setup the HCI transport, which usually means to reset the Bluetooth IC
506 *
507 * @param dev The device structure for the bus connecting to the IC
508 *
509 * @return 0 on success, negative error value on failure
510 */
bt_hci_transport_setup(const struct device * uart)511 int __weak bt_hci_transport_setup(const struct device *uart)
512 {
513 h4_discard(uart, 32);
514 return 0;
515 }
516
h4_open(const struct device * dev,bt_hci_recv_t recv)517 static int h4_open(const struct device *dev, bt_hci_recv_t recv)
518 {
519 const struct h4_config *cfg = dev->config;
520 struct h4_data *h4 = dev->data;
521 int ret;
522 k_tid_t tid;
523
524 LOG_DBG("");
525
526 uart_irq_rx_disable(cfg->uart);
527 uart_irq_tx_disable(cfg->uart);
528
529 ret = bt_hci_transport_setup(cfg->uart);
530 if (ret < 0) {
531 return -EIO;
532 }
533
534 h4->recv = recv;
535
536 uart_irq_callback_user_data_set(cfg->uart, bt_uart_isr, (void *)dev);
537
538 tid = k_thread_create(cfg->rx_thread, cfg->rx_thread_stack,
539 cfg->rx_thread_stack_size,
540 rx_thread, (void *)dev, NULL, NULL,
541 K_PRIO_COOP(CONFIG_BT_RX_PRIO),
542 0, K_NO_WAIT);
543 k_thread_name_set(tid, "bt_rx_thread");
544
545 return 0;
546 }
547
548 #if defined(CONFIG_BT_HCI_SETUP)
h4_setup(const struct device * dev,const struct bt_hci_setup_params * params)549 static int h4_setup(const struct device *dev, const struct bt_hci_setup_params *params)
550 {
551 const struct h4_config *cfg = dev->config;
552
553 ARG_UNUSED(params);
554
555 /* Extern bt_h4_vnd_setup function.
556 * This function executes vendor-specific commands sequence to
557 * initialize BT Controller before BT Host executes Reset sequence.
558 * bt_h4_vnd_setup function must be implemented in vendor-specific HCI
559 * extansion module if CONFIG_BT_HCI_SETUP is enabled.
560 */
561 extern int bt_h4_vnd_setup(const struct device *dev);
562
563 return bt_h4_vnd_setup(cfg->uart);
564 }
565 #endif
566
567 static const struct bt_hci_driver_api h4_driver_api = {
568 .open = h4_open,
569 .send = h4_send,
570 #if defined(CONFIG_BT_HCI_SETUP)
571 .setup = h4_setup,
572 #endif
573 };
574
575 #define BT_UART_DEVICE_INIT(inst) \
576 static K_KERNEL_STACK_DEFINE(rx_thread_stack_##inst, CONFIG_BT_DRV_RX_STACK_SIZE); \
577 static struct k_thread rx_thread_##inst; \
578 static const struct h4_config h4_config_##inst = { \
579 .uart = DEVICE_DT_GET(DT_INST_PARENT(inst)), \
580 .rx_thread_stack = rx_thread_stack_##inst, \
581 .rx_thread_stack_size = K_KERNEL_STACK_SIZEOF(rx_thread_stack_##inst), \
582 .rx_thread = &rx_thread_##inst, \
583 }; \
584 static struct h4_data h4_data_##inst = { \
585 .rx = { \
586 .fifo = Z_FIFO_INITIALIZER(h4_data_##inst.rx.fifo), \
587 }, \
588 .tx = { \
589 .fifo = Z_FIFO_INITIALIZER(h4_data_##inst.tx.fifo), \
590 }, \
591 }; \
592 DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &h4_data_##inst, &h4_config_##inst, \
593 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &h4_driver_api)
594
595 DT_INST_FOREACH_STATUS_OKAY(BT_UART_DEVICE_INIT)
596