1 /* uart_h5.c - UART based Bluetooth driver */
2 
3 /*
4  * Copyright (c) 2015-2016 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <errno.h>
10 #include <stddef.h>
11 
12 #include <zephyr/kernel.h>
13 
14 #include <zephyr/init.h>
15 #include <zephyr/drivers/uart.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/byteorder.h>
18 #include <zephyr/debug/stack.h>
19 #include <zephyr/sys/printk.h>
20 #include <string.h>
21 
22 #include <zephyr/bluetooth/bluetooth.h>
23 #include <zephyr/bluetooth/hci.h>
24 #include <zephyr/drivers/bluetooth/hci_driver.h>
25 
26 #include "../util.h"
27 
28 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
29 #include <zephyr/logging/log.h>
30 LOG_MODULE_REGISTER(bt_driver);
31 
32 static K_KERNEL_STACK_DEFINE(tx_stack, CONFIG_BT_DRV_TX_STACK_SIZE);
33 static K_KERNEL_STACK_DEFINE(rx_stack, CONFIG_BT_DRV_RX_STACK_SIZE);
34 
35 static struct k_thread tx_thread_data;
36 static struct k_thread rx_thread_data;
37 
38 static struct k_work_delayable ack_work;
39 static struct k_work_delayable retx_work;
40 
41 #define HCI_3WIRE_ACK_PKT	0x00
42 #define HCI_COMMAND_PKT		0x01
43 #define HCI_ACLDATA_PKT		0x02
44 #define HCI_SCODATA_PKT		0x03
45 #define HCI_EVENT_PKT		0x04
46 #define HCI_ISODATA_PKT		0x05
47 #define HCI_3WIRE_LINK_PKT	0x0f
48 #define HCI_VENDOR_PKT		0xff
49 
reliable_packet(uint8_t type)50 static bool reliable_packet(uint8_t type)
51 {
52 	switch (type) {
53 	case HCI_COMMAND_PKT:
54 	case HCI_ACLDATA_PKT:
55 	case HCI_EVENT_PKT:
56 	case HCI_ISODATA_PKT:
57 		return true;
58 	default:
59 		return false;
60 	}
61 }
62 
63 /* FIXME: Correct timeout */
64 #define H5_RX_ACK_TIMEOUT	K_MSEC(250)
65 #define H5_TX_ACK_TIMEOUT	K_MSEC(250)
66 
67 #define SLIP_DELIMITER	0xc0
68 #define SLIP_ESC	0xdb
69 #define SLIP_ESC_DELIM	0xdc
70 #define SLIP_ESC_ESC	0xdd
71 
72 #define H5_RX_ESC	1
73 #define H5_TX_ACK_PEND	2
74 
75 #define H5_HDR_SEQ(hdr)		((hdr)[0] & 0x07)
76 #define H5_HDR_ACK(hdr)		(((hdr)[0] >> 3) & 0x07)
77 #define H5_HDR_CRC(hdr)		(((hdr)[0] >> 6) & 0x01)
78 #define H5_HDR_RELIABLE(hdr)	(((hdr)[0] >> 7) & 0x01)
79 #define H5_HDR_PKT_TYPE(hdr)	((hdr)[1] & 0x0f)
80 #define H5_HDR_LEN(hdr)		((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
81 
82 #define H5_SET_SEQ(hdr, seq)	((hdr)[0] |= (seq))
83 #define H5_SET_ACK(hdr, ack)	((hdr)[0] |= (ack) << 3)
84 #define H5_SET_RELIABLE(hdr)	((hdr)[0] |= 1 << 7)
85 #define H5_SET_TYPE(hdr, type)	((hdr)[1] |= type)
86 #define H5_SET_LEN(hdr, len)	(((hdr)[1] |= ((len) & 0x0f) << 4), \
87 				 ((hdr)[2] |= (len) >> 4))
88 
89 static struct h5 {
90 	struct net_buf		*rx_buf;
91 
92 	struct k_fifo		tx_queue;
93 	struct k_fifo		rx_queue;
94 	struct k_fifo		unack_queue;
95 
96 	uint8_t			tx_win;
97 	uint8_t			tx_ack;
98 	uint8_t			tx_seq;
99 
100 	uint8_t			rx_ack;
101 
102 	enum {
103 		UNINIT,
104 		INIT,
105 		ACTIVE,
106 	}			link_state;
107 
108 	enum {
109 		START,
110 		HEADER,
111 		PAYLOAD,
112 		END,
113 	}			rx_state;
114 } h5;
115 
116 static uint8_t unack_queue_len;
117 
118 static const uint8_t sync_req[] = { 0x01, 0x7e };
119 static const uint8_t sync_rsp[] = { 0x02, 0x7d };
120 /* Third byte may change */
121 static uint8_t conf_req[3] = { 0x03, 0xfc };
122 static const uint8_t conf_rsp[] = { 0x04, 0x7b };
123 
124 /* H5 signal buffers pool */
125 #define MAX_SIG_LEN	3
126 #define SIGNAL_COUNT	2
127 #define SIG_BUF_SIZE (BT_BUF_RESERVE + MAX_SIG_LEN)
128 NET_BUF_POOL_DEFINE(h5_pool, SIGNAL_COUNT, SIG_BUF_SIZE, 0, NULL);
129 
130 static const struct device *const h5_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_bt_uart));
131 
h5_reset_rx(void)132 static void h5_reset_rx(void)
133 {
134 	if (h5.rx_buf) {
135 		net_buf_unref(h5.rx_buf);
136 		h5.rx_buf = NULL;
137 	}
138 
139 	h5.rx_state = START;
140 }
141 
h5_unslip_byte(uint8_t * byte)142 static int h5_unslip_byte(uint8_t *byte)
143 {
144 	int count;
145 
146 	if (*byte != SLIP_ESC) {
147 		return 0;
148 	}
149 
150 	do {
151 		count = uart_fifo_read(h5_dev, byte, sizeof(*byte));
152 	} while (!count);
153 
154 	switch (*byte) {
155 	case SLIP_ESC_DELIM:
156 		*byte = SLIP_DELIMITER;
157 		break;
158 	case SLIP_ESC_ESC:
159 		*byte = SLIP_ESC;
160 		break;
161 	default:
162 		LOG_ERR("Invalid escape byte %x\n", *byte);
163 		return -EIO;
164 	}
165 
166 	return 0;
167 }
168 
process_unack(void)169 static void process_unack(void)
170 {
171 	uint8_t next_seq = h5.tx_seq;
172 	uint8_t number_removed = unack_queue_len;
173 
174 	if (!unack_queue_len) {
175 		return;
176 	}
177 
178 	LOG_DBG("rx_ack %u tx_ack %u tx_seq %u unack_queue_len %u", h5.rx_ack, h5.tx_ack, h5.tx_seq,
179 		unack_queue_len);
180 
181 	while (unack_queue_len > 0) {
182 		if (next_seq == h5.rx_ack) {
183 			/* Next sequence number is the same as last received
184 			 * ack number
185 			 */
186 			break;
187 		}
188 
189 		number_removed--;
190 		/* Similar to (n - 1) % 8 with unsigned conversion */
191 		next_seq = (next_seq - 1) & 0x07;
192 	}
193 
194 	if (next_seq != h5.rx_ack) {
195 		LOG_ERR("Wrong sequence: rx_ack %u tx_seq %u next_seq %u", h5.rx_ack, h5.tx_seq,
196 			next_seq);
197 	}
198 
199 	LOG_DBG("Need to remove %u packet from the queue", number_removed);
200 
201 	while (number_removed) {
202 		struct net_buf *buf = net_buf_get(&h5.unack_queue, K_NO_WAIT);
203 
204 		if (!buf) {
205 			LOG_ERR("Unack queue is empty");
206 			break;
207 		}
208 
209 		/* TODO: print or do something with packet */
210 		LOG_DBG("Remove buf from the unack_queue");
211 
212 		net_buf_unref(buf);
213 		unack_queue_len--;
214 		number_removed--;
215 	}
216 }
217 
h5_print_header(const uint8_t * hdr,const char * str)218 static void h5_print_header(const uint8_t *hdr, const char *str)
219 {
220 	if (H5_HDR_RELIABLE(hdr)) {
221 		LOG_DBG("%s REL: seq %u ack %u crc %u type %u len %u", str, H5_HDR_SEQ(hdr),
222 			H5_HDR_ACK(hdr), H5_HDR_CRC(hdr), H5_HDR_PKT_TYPE(hdr), H5_HDR_LEN(hdr));
223 	} else {
224 		LOG_DBG("%s UNREL: ack %u crc %u type %u len %u", str, H5_HDR_ACK(hdr),
225 			H5_HDR_CRC(hdr), H5_HDR_PKT_TYPE(hdr), H5_HDR_LEN(hdr));
226 	}
227 }
228 
229 #if defined(CONFIG_BT_HCI_DRIVER_LOG_LEVEL_DBG)
hexdump(const char * str,const uint8_t * packet,size_t length)230 static void hexdump(const char *str, const uint8_t *packet, size_t length)
231 {
232 	int n = 0;
233 
234 	if (!length) {
235 		printk("%s zero-length signal packet\n", str);
236 		return;
237 	}
238 
239 	while (length--) {
240 		if (n % 16 == 0) {
241 			printk("%s %08X ", str, n);
242 		}
243 
244 		printk("%02X ", *packet++);
245 
246 		n++;
247 		if (n % 8 == 0) {
248 			if (n % 16 == 0) {
249 				printk("\n");
250 			} else {
251 				printk(" ");
252 			}
253 		}
254 	}
255 
256 	if (n % 16) {
257 		printk("\n");
258 	}
259 }
260 #else
261 #define hexdump(str, packet, length)
262 #endif
263 
h5_slip_byte(uint8_t byte)264 static uint8_t h5_slip_byte(uint8_t byte)
265 {
266 	switch (byte) {
267 	case SLIP_DELIMITER:
268 		uart_poll_out(h5_dev, SLIP_ESC);
269 		uart_poll_out(h5_dev, SLIP_ESC_DELIM);
270 		return 2;
271 	case SLIP_ESC:
272 		uart_poll_out(h5_dev, SLIP_ESC);
273 		uart_poll_out(h5_dev, SLIP_ESC_ESC);
274 		return 2;
275 	default:
276 		uart_poll_out(h5_dev, byte);
277 		return 1;
278 	}
279 }
280 
h5_send(const uint8_t * payload,uint8_t type,int len)281 static void h5_send(const uint8_t *payload, uint8_t type, int len)
282 {
283 	uint8_t hdr[4];
284 	int i;
285 
286 	hexdump("<= ", payload, len);
287 
288 	(void)memset(hdr, 0, sizeof(hdr));
289 
290 	/* Set ACK for outgoing packet and stop delayed work */
291 	H5_SET_ACK(hdr, h5.tx_ack);
292 	/* If cancel fails we may ack the same seq number twice, this is OK. */
293 	(void)k_work_cancel_delayable(&ack_work);
294 
295 	if (reliable_packet(type)) {
296 		H5_SET_RELIABLE(hdr);
297 		H5_SET_SEQ(hdr, h5.tx_seq);
298 		h5.tx_seq = (h5.tx_seq + 1) % 8;
299 	}
300 
301 	H5_SET_TYPE(hdr, type);
302 	H5_SET_LEN(hdr, len);
303 
304 	/* Calculate CRC */
305 	hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
306 
307 	h5_print_header(hdr, "TX: <");
308 
309 	uart_poll_out(h5_dev, SLIP_DELIMITER);
310 
311 	for (i = 0; i < 4; i++) {
312 		h5_slip_byte(hdr[i]);
313 	}
314 
315 	for (i = 0; i < len; i++) {
316 		h5_slip_byte(payload[i]);
317 	}
318 
319 	uart_poll_out(h5_dev, SLIP_DELIMITER);
320 }
321 
322 /* Delayed work taking care about retransmitting packets */
retx_timeout(struct k_work * work)323 static void retx_timeout(struct k_work *work)
324 {
325 	ARG_UNUSED(work);
326 
327 	LOG_DBG("unack_queue_len %u", unack_queue_len);
328 
329 	if (unack_queue_len) {
330 		struct k_fifo tmp_queue;
331 		struct net_buf *buf;
332 
333 		k_fifo_init(&tmp_queue);
334 
335 		/* Queue to temporary queue */
336 		while ((buf = net_buf_get(&h5.tx_queue, K_NO_WAIT))) {
337 			net_buf_put(&tmp_queue, buf);
338 		}
339 
340 		/* Queue unack packets to the beginning of the queue */
341 		while ((buf = net_buf_get(&h5.unack_queue, K_NO_WAIT))) {
342 			/* include also packet type */
343 			net_buf_push(buf, sizeof(uint8_t));
344 			net_buf_put(&h5.tx_queue, buf);
345 			h5.tx_seq = (h5.tx_seq - 1) & 0x07;
346 			unack_queue_len--;
347 		}
348 
349 		/* Queue saved packets from temp queue */
350 		while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) {
351 			net_buf_put(&h5.tx_queue, buf);
352 		}
353 	}
354 }
355 
ack_timeout(struct k_work * work)356 static void ack_timeout(struct k_work *work)
357 {
358 	ARG_UNUSED(work);
359 
360 	LOG_DBG("");
361 
362 	h5_send(NULL, HCI_3WIRE_ACK_PKT, 0);
363 }
364 
h5_process_complete_packet(uint8_t * hdr)365 static void h5_process_complete_packet(uint8_t *hdr)
366 {
367 	struct net_buf *buf;
368 
369 	LOG_DBG("");
370 
371 	/* rx_ack should be in every packet */
372 	h5.rx_ack = H5_HDR_ACK(hdr);
373 
374 	if (reliable_packet(H5_HDR_PKT_TYPE(hdr))) {
375 		/* For reliable packet increment next transmit ack number */
376 		h5.tx_ack = (h5.tx_ack + 1) % 8;
377 		/* Submit delayed work to ack the packet */
378 		k_work_reschedule(&ack_work, H5_RX_ACK_TIMEOUT);
379 	}
380 
381 	h5_print_header(hdr, "RX: >");
382 
383 	process_unack();
384 
385 	buf = h5.rx_buf;
386 	h5.rx_buf = NULL;
387 
388 	switch (H5_HDR_PKT_TYPE(hdr)) {
389 	case HCI_3WIRE_ACK_PKT:
390 		net_buf_unref(buf);
391 		break;
392 	case HCI_3WIRE_LINK_PKT:
393 		net_buf_put(&h5.rx_queue, buf);
394 		break;
395 	case HCI_EVENT_PKT:
396 	case HCI_ACLDATA_PKT:
397 	case HCI_ISODATA_PKT:
398 		hexdump("=> ", buf->data, buf->len);
399 		bt_recv(buf);
400 		break;
401 	}
402 }
403 
get_evt_buf(uint8_t evt)404 static inline struct net_buf *get_evt_buf(uint8_t evt)
405 {
406 	return bt_buf_get_evt(evt, false, K_NO_WAIT);
407 }
408 
bt_uart_isr(const struct device * unused,void * user_data)409 static void bt_uart_isr(const struct device *unused, void *user_data)
410 {
411 	static int remaining;
412 	uint8_t byte;
413 	int ret;
414 	static uint8_t hdr[4];
415 	size_t buf_tailroom;
416 
417 	ARG_UNUSED(unused);
418 	ARG_UNUSED(user_data);
419 
420 	while (uart_irq_update(h5_dev) &&
421 	       uart_irq_is_pending(h5_dev)) {
422 
423 		if (!uart_irq_rx_ready(h5_dev)) {
424 			if (uart_irq_tx_ready(h5_dev)) {
425 				LOG_DBG("transmit ready");
426 			} else {
427 				LOG_DBG("spurious interrupt");
428 			}
429 			/* Only the UART RX path is interrupt-enabled */
430 			break;
431 		}
432 
433 		ret = uart_fifo_read(h5_dev, &byte, sizeof(byte));
434 		if (!ret) {
435 			continue;
436 		}
437 
438 		switch (h5.rx_state) {
439 		case START:
440 			if (byte == SLIP_DELIMITER) {
441 				h5.rx_state = HEADER;
442 				remaining = sizeof(hdr);
443 			}
444 			break;
445 		case HEADER:
446 			/* In a case we confuse ending slip delimiter
447 			 * with starting one.
448 			 */
449 			if (byte == SLIP_DELIMITER) {
450 				remaining = sizeof(hdr);
451 				continue;
452 			}
453 
454 			if (h5_unslip_byte(&byte) < 0) {
455 				h5_reset_rx();
456 				continue;
457 			}
458 
459 			memcpy(&hdr[sizeof(hdr) - remaining], &byte, 1);
460 			remaining--;
461 
462 			if (remaining) {
463 				break;
464 			}
465 
466 			remaining = H5_HDR_LEN(hdr);
467 
468 			switch (H5_HDR_PKT_TYPE(hdr)) {
469 			case HCI_EVENT_PKT:
470 				/* The buffer is allocated only once we know
471 				 * the exact event type.
472 				 */
473 				h5.rx_state = PAYLOAD;
474 				break;
475 			case HCI_ACLDATA_PKT:
476 				h5.rx_buf = bt_buf_get_rx(BT_BUF_ACL_IN,
477 							  K_NO_WAIT);
478 				if (!h5.rx_buf) {
479 					LOG_WRN("No available data buffers");
480 					h5_reset_rx();
481 					continue;
482 				}
483 
484 				h5.rx_state = PAYLOAD;
485 				break;
486 			case HCI_ISODATA_PKT:
487 				h5.rx_buf = bt_buf_get_rx(BT_BUF_ISO_IN,
488 							  K_NO_WAIT);
489 				if (!h5.rx_buf) {
490 					LOG_WRN("No available data buffers");
491 					h5_reset_rx();
492 					continue;
493 				}
494 
495 				h5.rx_state = PAYLOAD;
496 				break;
497 			case HCI_3WIRE_LINK_PKT:
498 			case HCI_3WIRE_ACK_PKT:
499 				h5.rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT);
500 				if (!h5.rx_buf) {
501 					LOG_WRN("No available signal buffers");
502 					h5_reset_rx();
503 					continue;
504 				}
505 
506 				h5.rx_state = PAYLOAD;
507 				break;
508 			default:
509 				LOG_ERR("Wrong packet type %u", H5_HDR_PKT_TYPE(hdr));
510 				h5.rx_state = END;
511 				break;
512 			}
513 			if (!remaining) {
514 				h5.rx_state = END;
515 			}
516 			break;
517 		case PAYLOAD:
518 			if (h5_unslip_byte(&byte) < 0) {
519 				h5_reset_rx();
520 				continue;
521 			}
522 
523 			/* Allocate HCI event buffer now that we know the
524 			 * exact event type.
525 			 */
526 			if (!h5.rx_buf) {
527 				h5.rx_buf = get_evt_buf(byte);
528 				if (!h5.rx_buf) {
529 					LOG_WRN("No available event buffers");
530 					h5_reset_rx();
531 					continue;
532 				}
533 			}
534 
535 			buf_tailroom = net_buf_tailroom(h5.rx_buf);
536 			if (buf_tailroom < sizeof(byte)) {
537 				LOG_ERR("Not enough space in buffer %zu/%zu", sizeof(byte),
538 					buf_tailroom);
539 				h5_reset_rx();
540 				break;
541 			}
542 
543 			net_buf_add_mem(h5.rx_buf, &byte, sizeof(byte));
544 			remaining--;
545 			if (!remaining) {
546 				h5.rx_state = END;
547 			}
548 			break;
549 		case END:
550 			if (byte != SLIP_DELIMITER) {
551 				LOG_ERR("Missing ending SLIP_DELIMITER");
552 				h5_reset_rx();
553 				break;
554 			}
555 
556 			LOG_DBG("Received full packet: type %u", H5_HDR_PKT_TYPE(hdr));
557 
558 			/* Check when full packet is received, it can be done
559 			 * when parsing packet header but we need to receive
560 			 * full packet anyway to clear UART.
561 			 */
562 			if (H5_HDR_RELIABLE(hdr) &&
563 			    H5_HDR_SEQ(hdr) != h5.tx_ack) {
564 				LOG_ERR("Seq expected %u got %u. Drop packet", h5.tx_ack,
565 					H5_HDR_SEQ(hdr));
566 				h5_reset_rx();
567 				break;
568 			}
569 
570 			h5_process_complete_packet(hdr);
571 			h5.rx_state = START;
572 			break;
573 		}
574 	}
575 }
576 
h5_get_type(struct net_buf * buf)577 static uint8_t h5_get_type(struct net_buf *buf)
578 {
579 	return net_buf_pull_u8(buf);
580 }
581 
h5_queue(struct net_buf * buf)582 static int h5_queue(struct net_buf *buf)
583 {
584 	uint8_t type;
585 
586 	LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
587 
588 	switch (bt_buf_get_type(buf)) {
589 	case BT_BUF_CMD:
590 		type = HCI_COMMAND_PKT;
591 		break;
592 	case BT_BUF_ACL_OUT:
593 		type = HCI_ACLDATA_PKT;
594 		break;
595 	case BT_BUF_ISO_OUT:
596 		type = HCI_ISODATA_PKT;
597 		break;
598 	default:
599 		LOG_ERR("Unknown packet type %u", bt_buf_get_type(buf));
600 		return -1;
601 	}
602 
603 	memcpy(net_buf_push(buf, sizeof(type)), &type, sizeof(type));
604 
605 	net_buf_put(&h5.tx_queue, buf);
606 
607 	return 0;
608 }
609 
tx_thread(void * p1,void * p2,void * p3)610 static void tx_thread(void *p1, void *p2, void *p3)
611 {
612 	ARG_UNUSED(p1);
613 	ARG_UNUSED(p2);
614 	ARG_UNUSED(p3);
615 
616 	LOG_DBG("");
617 
618 	/* FIXME: make periodic sending */
619 	h5_send(sync_req, HCI_3WIRE_LINK_PKT, sizeof(sync_req));
620 
621 	while (true) {
622 		struct net_buf *buf;
623 		uint8_t type;
624 
625 		LOG_DBG("link_state %u", h5.link_state);
626 
627 		switch (h5.link_state) {
628 		case UNINIT:
629 			/* FIXME: send sync */
630 			k_sleep(K_MSEC(100));
631 			break;
632 		case INIT:
633 			/* FIXME: send conf */
634 			k_sleep(K_MSEC(100));
635 			break;
636 		case ACTIVE:
637 			buf = net_buf_get(&h5.tx_queue, K_FOREVER);
638 			type = h5_get_type(buf);
639 
640 			h5_send(buf->data, type, buf->len);
641 
642 			/* buf is dequeued from tx_queue and queued to unack
643 			 * queue.
644 			 */
645 			net_buf_put(&h5.unack_queue, buf);
646 			unack_queue_len++;
647 
648 			k_work_reschedule(&retx_work, H5_TX_ACK_TIMEOUT);
649 
650 			break;
651 		}
652 	}
653 }
654 
h5_set_txwin(uint8_t * conf)655 static void h5_set_txwin(uint8_t *conf)
656 {
657 	conf[2] = h5.tx_win & 0x07;
658 }
659 
rx_thread(void * p1,void * p2,void * p3)660 static void rx_thread(void *p1, void *p2, void *p3)
661 {
662 	ARG_UNUSED(p1);
663 	ARG_UNUSED(p2);
664 	ARG_UNUSED(p3);
665 
666 	LOG_DBG("");
667 
668 	while (true) {
669 		struct net_buf *buf;
670 
671 		buf = net_buf_get(&h5.rx_queue, K_FOREVER);
672 
673 		hexdump("=> ", buf->data, buf->len);
674 
675 		if (!memcmp(buf->data, sync_req, sizeof(sync_req))) {
676 			if (h5.link_state == ACTIVE) {
677 				/* TODO Reset H5 */
678 			}
679 
680 			h5_send(sync_rsp, HCI_3WIRE_LINK_PKT, sizeof(sync_rsp));
681 		} else if (!memcmp(buf->data, sync_rsp, sizeof(sync_rsp))) {
682 			if (h5.link_state == ACTIVE) {
683 				/* TODO Reset H5 */
684 			}
685 
686 			h5.link_state = INIT;
687 			h5_set_txwin(conf_req);
688 			h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req));
689 		} else if (!memcmp(buf->data, conf_req, 2)) {
690 			/*
691 			 * The Host sends Config Response messages without a
692 			 * Configuration Field.
693 			 */
694 			h5_send(conf_rsp, HCI_3WIRE_LINK_PKT, sizeof(conf_rsp));
695 
696 			/* Then send Config Request with Configuration Field */
697 			h5_set_txwin(conf_req);
698 			h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req));
699 		} else if (!memcmp(buf->data, conf_rsp, 2)) {
700 			h5.link_state = ACTIVE;
701 			if (buf->len > 2) {
702 				/* Configuration field present */
703 				h5.tx_win = (buf->data[2] & 0x07);
704 			}
705 
706 			LOG_DBG("Finished H5 configuration, tx_win %u", h5.tx_win);
707 		} else {
708 			LOG_ERR("Not handled yet %x %x", buf->data[0], buf->data[1]);
709 		}
710 
711 		net_buf_unref(buf);
712 
713 		/* Make sure we don't hog the CPU if the rx_queue never
714 		 * gets empty.
715 		 */
716 		k_yield();
717 	}
718 }
719 
h5_init(void)720 static void h5_init(void)
721 {
722 	LOG_DBG("");
723 
724 	h5.link_state = UNINIT;
725 	h5.rx_state = START;
726 	h5.tx_win = 4U;
727 
728 	/* TX thread */
729 	k_fifo_init(&h5.tx_queue);
730 	k_thread_create(&tx_thread_data, tx_stack,
731 			K_KERNEL_STACK_SIZEOF(tx_stack),
732 			tx_thread, NULL, NULL, NULL,
733 			K_PRIO_COOP(CONFIG_BT_HCI_TX_PRIO),
734 			0, K_NO_WAIT);
735 	k_thread_name_set(&tx_thread_data, "tx_thread");
736 
737 	k_fifo_init(&h5.rx_queue);
738 	k_thread_create(&rx_thread_data, rx_stack,
739 			K_KERNEL_STACK_SIZEOF(rx_stack),
740 			rx_thread, NULL, NULL, NULL,
741 			K_PRIO_COOP(CONFIG_BT_RX_PRIO),
742 			0, K_NO_WAIT);
743 	k_thread_name_set(&rx_thread_data, "rx_thread");
744 
745 	/* Unack queue */
746 	k_fifo_init(&h5.unack_queue);
747 
748 	/* Init delayed work */
749 	k_work_init_delayable(&ack_work, ack_timeout);
750 	k_work_init_delayable(&retx_work, retx_timeout);
751 }
752 
h5_open(void)753 static int h5_open(void)
754 {
755 	LOG_DBG("");
756 
757 	uart_irq_rx_disable(h5_dev);
758 	uart_irq_tx_disable(h5_dev);
759 
760 	bt_uart_drain(h5_dev);
761 
762 	uart_irq_callback_set(h5_dev, bt_uart_isr);
763 
764 	h5_init();
765 
766 	uart_irq_rx_enable(h5_dev);
767 
768 	return 0;
769 }
770 
771 static const struct bt_hci_driver drv = {
772 	.name		= "H:5",
773 	.bus		= BT_HCI_DRIVER_BUS_UART,
774 	.open		= h5_open,
775 	.send		= h5_queue,
776 };
777 
bt_uart_init(void)778 static int bt_uart_init(void)
779 {
780 
781 	if (!device_is_ready(h5_dev)) {
782 		return -ENODEV;
783 	}
784 
785 	bt_hci_driver_register(&drv);
786 
787 	return 0;
788 }
789 
790 SYS_INIT(bt_uart_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
791