1 /* uart_h5.c - UART based Bluetooth driver */
2 
3 /*
4  * Copyright (c) 2015-2016 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <errno.h>
10 #include <stddef.h>
11 
12 #include <zephyr/kernel.h>
13 
14 #include <zephyr/init.h>
15 #include <zephyr/drivers/uart.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/byteorder.h>
18 #include <zephyr/debug/stack.h>
19 #include <zephyr/sys/printk.h>
20 #include <stdint.h>
21 #include <stdbool.h>
22 #include <string.h>
23 
24 #include <zephyr/bluetooth/bluetooth.h>
25 #include <zephyr/bluetooth/hci.h>
26 #include <zephyr/drivers/bluetooth.h>
27 
28 #include "../util.h"
29 
30 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
31 #include <zephyr/logging/log.h>
32 LOG_MODULE_REGISTER(bt_driver);
33 
34 #define DT_DRV_COMPAT zephyr_bt_hci_3wire_uart
35 
36 #define HCI_3WIRE_ACK_PKT	0x00
37 #define HCI_COMMAND_PKT		0x01
38 #define HCI_ACLDATA_PKT		0x02
39 #define HCI_SCODATA_PKT		0x03
40 #define HCI_EVENT_PKT		0x04
41 #define HCI_ISODATA_PKT		0x05
42 #define HCI_3WIRE_LINK_PKT	0x0f
43 #define HCI_VENDOR_PKT		0xff
44 
reliable_packet(uint8_t type)45 static bool reliable_packet(uint8_t type)
46 {
47 	switch (type) {
48 	case HCI_COMMAND_PKT:
49 	case HCI_ACLDATA_PKT:
50 	case HCI_EVENT_PKT:
51 	case HCI_ISODATA_PKT:
52 		return true;
53 	default:
54 		return false;
55 	}
56 }
57 
58 /* FIXME: Correct timeout */
59 #define H5_RX_ACK_TIMEOUT	K_MSEC(250)
60 #define H5_TX_ACK_TIMEOUT	K_MSEC(250)
61 
62 #define SLIP_DELIMITER	0xc0
63 #define SLIP_ESC	0xdb
64 #define SLIP_ESC_DELIM	0xdc
65 #define SLIP_ESC_ESC	0xdd
66 
67 #define H5_RX_ESC	1
68 #define H5_TX_ACK_PEND	2
69 
70 #define H5_HDR_SEQ(hdr)		((hdr)[0] & 0x07)
71 #define H5_HDR_ACK(hdr)		(((hdr)[0] >> 3) & 0x07)
72 #define H5_HDR_CRC(hdr)		(((hdr)[0] >> 6) & 0x01)
73 #define H5_HDR_RELIABLE(hdr)	(((hdr)[0] >> 7) & 0x01)
74 #define H5_HDR_PKT_TYPE(hdr)	((hdr)[1] & 0x0f)
75 #define H5_HDR_LEN(hdr)		((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
76 
77 #define H5_SET_SEQ(hdr, seq)	((hdr)[0] |= (seq))
78 #define H5_SET_ACK(hdr, ack)	((hdr)[0] |= (ack) << 3)
79 #define H5_SET_RELIABLE(hdr)	((hdr)[0] |= 1 << 7)
80 #define H5_SET_TYPE(hdr, type)	((hdr)[1] |= type)
81 #define H5_SET_LEN(hdr, len)	(((hdr)[1] |= ((len) & 0x0f) << 4), \
82 				 ((hdr)[2] |= (len) >> 4))
83 
84 struct h5_data {
85 	/* Needed for delayed work callbacks */
86 	const struct device     *dev;
87 
88 	bt_hci_recv_t           recv;
89 
90 	struct net_buf		*rx_buf;
91 
92 	struct k_fifo		tx_queue;
93 	struct k_fifo		rx_queue;
94 	struct k_fifo		unack_queue;
95 
96 	struct k_work_delayable ack_work;
97 	struct k_work_delayable retx_work;
98 
99 	uint8_t			tx_win;
100 	uint8_t			tx_ack;
101 	uint8_t			tx_seq;
102 
103 	uint8_t			rx_ack;
104 
105 	enum {
106 		UNINIT,
107 		INIT,
108 		ACTIVE,
109 	}			link_state;
110 
111 	enum {
112 		START,
113 		HEADER,
114 		PAYLOAD,
115 		END,
116 	}			rx_state;
117 
118 	uint8_t unack_queue_len;
119 };
120 
121 struct h5_config {
122 	const struct device *uart;
123 
124 	k_thread_stack_t *rx_stack;
125 	size_t rx_stack_size;
126 	struct k_thread *rx_thread;
127 
128 	k_thread_stack_t *tx_stack;
129 	size_t tx_stack_size;
130 	struct k_thread *tx_thread;
131 };
132 
133 static const uint8_t sync_req[] = { 0x01, 0x7e };
134 static const uint8_t sync_rsp[] = { 0x02, 0x7d };
135 /* Third byte may change */
136 static uint8_t conf_req[3] = { 0x03, 0xfc };
137 static const uint8_t conf_rsp[] = { 0x04, 0x7b };
138 
139 /* H5 signal buffers pool */
140 #define MAX_SIG_LEN	3
141 #define SIGNAL_COUNT	(2 * DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT))
142 #define SIG_BUF_SIZE (BT_BUF_RESERVE + MAX_SIG_LEN)
143 NET_BUF_POOL_DEFINE(h5_pool, SIGNAL_COUNT, SIG_BUF_SIZE, 0, NULL);
144 
h5_reset_rx(struct h5_data * h5)145 static void h5_reset_rx(struct h5_data *h5)
146 {
147 	if (h5->rx_buf) {
148 		net_buf_unref(h5->rx_buf);
149 		h5->rx_buf = NULL;
150 	}
151 
152 	h5->rx_state = START;
153 }
154 
h5_unslip_byte(const struct device * uart,uint8_t * byte)155 static int h5_unslip_byte(const struct device *uart, uint8_t *byte)
156 {
157 	int count;
158 
159 	if (*byte != SLIP_ESC) {
160 		return 0;
161 	}
162 
163 	do {
164 		count = uart_fifo_read(uart, byte, sizeof(*byte));
165 	} while (!count);
166 
167 	switch (*byte) {
168 	case SLIP_ESC_DELIM:
169 		*byte = SLIP_DELIMITER;
170 		break;
171 	case SLIP_ESC_ESC:
172 		*byte = SLIP_ESC;
173 		break;
174 	default:
175 		LOG_ERR("Invalid escape byte %x\n", *byte);
176 		return -EIO;
177 	}
178 
179 	return 0;
180 }
181 
process_unack(struct h5_data * h5)182 static void process_unack(struct h5_data *h5)
183 {
184 	uint8_t next_seq = h5->tx_seq;
185 	uint8_t number_removed = h5->unack_queue_len;
186 
187 	if (!h5->unack_queue_len) {
188 		return;
189 	}
190 
191 	LOG_DBG("rx_ack %u tx_ack %u tx_seq %u unack_queue_len %u", h5->rx_ack, h5->tx_ack,
192 		h5->tx_seq, h5->unack_queue_len);
193 
194 	while (h5->unack_queue_len > 0) {
195 		if (next_seq == h5->rx_ack) {
196 			/* Next sequence number is the same as last received
197 			 * ack number
198 			 */
199 			break;
200 		}
201 
202 		number_removed--;
203 		/* Similar to (n - 1) % 8 with unsigned conversion */
204 		next_seq = (next_seq - 1) & 0x07;
205 	}
206 
207 	if (next_seq != h5->rx_ack) {
208 		LOG_ERR("Wrong sequence: rx_ack %u tx_seq %u next_seq %u", h5->rx_ack,
209 			h5->tx_seq, next_seq);
210 	}
211 
212 	LOG_DBG("Need to remove %u packet from the queue", number_removed);
213 
214 	while (number_removed) {
215 		struct net_buf *buf = net_buf_get(&h5->unack_queue, K_NO_WAIT);
216 
217 		if (!buf) {
218 			LOG_ERR("Unack queue is empty");
219 			break;
220 		}
221 
222 		/* TODO: print or do something with packet */
223 		LOG_DBG("Remove buf from the unack_queue");
224 
225 		net_buf_unref(buf);
226 		h5->unack_queue_len--;
227 		number_removed--;
228 	}
229 }
230 
h5_print_header(const uint8_t * hdr,const char * str)231 static void h5_print_header(const uint8_t *hdr, const char *str)
232 {
233 	if (H5_HDR_RELIABLE(hdr)) {
234 		LOG_DBG("%s REL: seq %u ack %u crc %u type %u len %u", str, H5_HDR_SEQ(hdr),
235 			H5_HDR_ACK(hdr), H5_HDR_CRC(hdr), H5_HDR_PKT_TYPE(hdr), H5_HDR_LEN(hdr));
236 	} else {
237 		LOG_DBG("%s UNREL: ack %u crc %u type %u len %u", str, H5_HDR_ACK(hdr),
238 			H5_HDR_CRC(hdr), H5_HDR_PKT_TYPE(hdr), H5_HDR_LEN(hdr));
239 	}
240 }
241 
242 #if defined(CONFIG_BT_HCI_DRIVER_LOG_LEVEL_DBG)
hexdump(const char * str,const uint8_t * packet,size_t length)243 static void hexdump(const char *str, const uint8_t *packet, size_t length)
244 {
245 	int n = 0;
246 
247 	if (!length) {
248 		printk("%s zero-length signal packet\n", str);
249 		return;
250 	}
251 
252 	while (length--) {
253 		if (n % 16 == 0) {
254 			printk("%s %08X ", str, n);
255 		}
256 
257 		printk("%02X ", *packet++);
258 
259 		n++;
260 		if (n % 8 == 0) {
261 			if (n % 16 == 0) {
262 				printk("\n");
263 			} else {
264 				printk(" ");
265 			}
266 		}
267 	}
268 
269 	if (n % 16) {
270 		printk("\n");
271 	}
272 }
273 #else
274 #define hexdump(str, packet, length)
275 #endif
276 
h5_slip_byte(const struct device * uart,uint8_t byte)277 static uint8_t h5_slip_byte(const struct device *uart, uint8_t byte)
278 {
279 	switch (byte) {
280 	case SLIP_DELIMITER:
281 		uart_poll_out(uart, SLIP_ESC);
282 		uart_poll_out(uart, SLIP_ESC_DELIM);
283 		return 2;
284 	case SLIP_ESC:
285 		uart_poll_out(uart, SLIP_ESC);
286 		uart_poll_out(uart, SLIP_ESC_ESC);
287 		return 2;
288 	default:
289 		uart_poll_out(uart, byte);
290 		return 1;
291 	}
292 }
293 
h5_send(const struct device * dev,const uint8_t * payload,uint8_t type,int len)294 static void h5_send(const struct device *dev, const uint8_t *payload, uint8_t type, int len)
295 {
296 	const struct h5_config *cfg = dev->config;
297 	struct h5_data *h5 = dev->data;
298 	uint8_t hdr[4];
299 	int i;
300 
301 	hexdump("<= ", payload, len);
302 
303 	(void)memset(hdr, 0, sizeof(hdr));
304 
305 	/* Set ACK for outgoing packet and stop delayed work */
306 	H5_SET_ACK(hdr, h5->tx_ack);
307 	/* If cancel fails we may ack the same seq number twice, this is OK. */
308 	(void)k_work_cancel_delayable(&h5->ack_work);
309 
310 	if (reliable_packet(type)) {
311 		H5_SET_RELIABLE(hdr);
312 		H5_SET_SEQ(hdr, h5->tx_seq);
313 		h5->tx_seq = (h5->tx_seq + 1) % 8;
314 	}
315 
316 	H5_SET_TYPE(hdr, type);
317 	H5_SET_LEN(hdr, len);
318 
319 	/* Calculate CRC */
320 	hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
321 
322 	h5_print_header(hdr, "TX: <");
323 
324 	uart_poll_out(cfg->uart, SLIP_DELIMITER);
325 
326 	for (i = 0; i < 4; i++) {
327 		h5_slip_byte(cfg->uart, hdr[i]);
328 	}
329 
330 	for (i = 0; i < len; i++) {
331 		h5_slip_byte(cfg->uart, payload[i]);
332 	}
333 
334 	uart_poll_out(cfg->uart, SLIP_DELIMITER);
335 }
336 
337 /* Delayed work taking care about retransmitting packets */
retx_timeout(struct k_work * work)338 static void retx_timeout(struct k_work *work)
339 {
340 	struct k_work_delayable *delayable = k_work_delayable_from_work(work);
341 	struct h5_data *h5 = CONTAINER_OF(delayable, struct h5_data, retx_work);
342 
343 	LOG_DBG("unack_queue_len %u", h5->unack_queue_len);
344 
345 	if (h5->unack_queue_len) {
346 		struct k_fifo tmp_queue;
347 		struct net_buf *buf;
348 
349 		k_fifo_init(&tmp_queue);
350 
351 		/* Queue to temporary queue */
352 		while ((buf = net_buf_get(&h5->tx_queue, K_NO_WAIT))) {
353 			net_buf_put(&tmp_queue, buf);
354 		}
355 
356 		/* Queue unack packets to the beginning of the queue */
357 		while ((buf = net_buf_get(&h5->unack_queue, K_NO_WAIT))) {
358 			/* include also packet type */
359 			net_buf_push(buf, sizeof(uint8_t));
360 			net_buf_put(&h5->tx_queue, buf);
361 			h5->tx_seq = (h5->tx_seq - 1) & 0x07;
362 			h5->unack_queue_len--;
363 		}
364 
365 		/* Queue saved packets from temp queue */
366 		while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) {
367 			net_buf_put(&h5->tx_queue, buf);
368 		}
369 	}
370 }
371 
ack_timeout(struct k_work * work)372 static void ack_timeout(struct k_work *work)
373 {
374 	struct k_work_delayable *delayable = k_work_delayable_from_work(work);
375 	struct h5_data *h5 = CONTAINER_OF(delayable, struct h5_data, ack_work);
376 
377 	LOG_DBG("");
378 
379 	h5_send(h5->dev, NULL, HCI_3WIRE_ACK_PKT, 0);
380 }
381 
h5_process_complete_packet(const struct device * dev,uint8_t * hdr)382 static void h5_process_complete_packet(const struct device *dev, uint8_t *hdr)
383 {
384 	struct h5_data *h5 = dev->data;
385 	struct net_buf *buf;
386 
387 	LOG_DBG("");
388 
389 	/* rx_ack should be in every packet */
390 	h5->rx_ack = H5_HDR_ACK(hdr);
391 
392 	if (reliable_packet(H5_HDR_PKT_TYPE(hdr))) {
393 		/* For reliable packet increment next transmit ack number */
394 		h5->tx_ack = (h5->tx_ack + 1) % 8;
395 		/* Submit delayed work to ack the packet */
396 		k_work_reschedule(&h5->ack_work, H5_RX_ACK_TIMEOUT);
397 	}
398 
399 	h5_print_header(hdr, "RX: >");
400 
401 	process_unack(h5);
402 
403 	buf = h5->rx_buf;
404 	h5->rx_buf = NULL;
405 
406 	switch (H5_HDR_PKT_TYPE(hdr)) {
407 	case HCI_3WIRE_ACK_PKT:
408 		net_buf_unref(buf);
409 		break;
410 	case HCI_3WIRE_LINK_PKT:
411 		net_buf_put(&h5->rx_queue, buf);
412 		break;
413 	case HCI_EVENT_PKT:
414 	case HCI_ACLDATA_PKT:
415 	case HCI_ISODATA_PKT:
416 		hexdump("=> ", buf->data, buf->len);
417 		h5->recv(dev, buf);
418 		break;
419 	}
420 }
421 
get_evt_buf(uint8_t evt)422 static inline struct net_buf *get_evt_buf(uint8_t evt)
423 {
424 	return bt_buf_get_evt(evt, false, K_NO_WAIT);
425 }
426 
bt_uart_isr(const struct device * uart,void * user_data)427 static void bt_uart_isr(const struct device *uart, void *user_data)
428 {
429 	const struct device *dev = user_data;
430 	struct h5_data *h5 = dev->data;
431 	static int remaining;
432 	uint8_t byte;
433 	int ret;
434 	static uint8_t hdr[4];
435 	size_t buf_tailroom;
436 
437 	while (uart_irq_update(uart) &&
438 	       uart_irq_is_pending(uart)) {
439 
440 		if (!uart_irq_rx_ready(uart)) {
441 			if (uart_irq_tx_ready(uart)) {
442 				LOG_DBG("transmit ready");
443 			} else {
444 				LOG_DBG("spurious interrupt");
445 			}
446 			/* Only the UART RX path is interrupt-enabled */
447 			break;
448 		}
449 
450 		ret = uart_fifo_read(uart, &byte, sizeof(byte));
451 		if (!ret) {
452 			continue;
453 		}
454 
455 		switch (h5->rx_state) {
456 		case START:
457 			if (byte == SLIP_DELIMITER) {
458 				h5->rx_state = HEADER;
459 				remaining = sizeof(hdr);
460 			}
461 			break;
462 		case HEADER:
463 			/* In a case we confuse ending slip delimiter
464 			 * with starting one.
465 			 */
466 			if (byte == SLIP_DELIMITER) {
467 				remaining = sizeof(hdr);
468 				continue;
469 			}
470 
471 			if (h5_unslip_byte(uart, &byte) < 0) {
472 				h5_reset_rx(h5);
473 				continue;
474 			}
475 
476 			memcpy(&hdr[sizeof(hdr) - remaining], &byte, 1);
477 			remaining--;
478 
479 			if (remaining) {
480 				break;
481 			}
482 
483 			remaining = H5_HDR_LEN(hdr);
484 
485 			switch (H5_HDR_PKT_TYPE(hdr)) {
486 			case HCI_EVENT_PKT:
487 				/* The buffer is allocated only once we know
488 				 * the exact event type.
489 				 */
490 				h5->rx_state = PAYLOAD;
491 				break;
492 			case HCI_ACLDATA_PKT:
493 				h5->rx_buf = bt_buf_get_rx(BT_BUF_ACL_IN,
494 							   K_NO_WAIT);
495 				if (!h5->rx_buf) {
496 					LOG_WRN("No available data buffers");
497 					h5_reset_rx(h5);
498 					continue;
499 				}
500 
501 				h5->rx_state = PAYLOAD;
502 				break;
503 			case HCI_ISODATA_PKT:
504 				h5->rx_buf = bt_buf_get_rx(BT_BUF_ISO_IN, K_NO_WAIT);
505 				if (!h5->rx_buf) {
506 					LOG_WRN("No available data buffers");
507 					h5_reset_rx(h5);
508 					continue;
509 				}
510 
511 				h5->rx_state = PAYLOAD;
512 				break;
513 			case HCI_3WIRE_LINK_PKT:
514 			case HCI_3WIRE_ACK_PKT:
515 				h5->rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT);
516 				if (!h5->rx_buf) {
517 					LOG_WRN("No available signal buffers");
518 					h5_reset_rx(h5);
519 					continue;
520 				}
521 
522 				h5->rx_state = PAYLOAD;
523 				break;
524 			default:
525 				LOG_ERR("Wrong packet type %u", H5_HDR_PKT_TYPE(hdr));
526 				h5->rx_state = END;
527 				break;
528 			}
529 			if (!remaining) {
530 				h5->rx_state = END;
531 			}
532 			break;
533 		case PAYLOAD:
534 			if (h5_unslip_byte(uart, &byte) < 0) {
535 				h5_reset_rx(h5);
536 				continue;
537 			}
538 
539 			/* Allocate HCI event buffer now that we know the
540 			 * exact event type.
541 			 */
542 			if (!h5->rx_buf) {
543 				h5->rx_buf = get_evt_buf(byte);
544 				if (!h5->rx_buf) {
545 					LOG_WRN("No available event buffers");
546 					h5_reset_rx(h5);
547 					continue;
548 				}
549 			}
550 
551 			buf_tailroom = net_buf_tailroom(h5->rx_buf);
552 			if (buf_tailroom < sizeof(byte)) {
553 				LOG_ERR("Not enough space in buffer %zu/%zu", sizeof(byte),
554 					buf_tailroom);
555 				h5_reset_rx(h5);
556 				break;
557 			}
558 
559 			net_buf_add_mem(h5->rx_buf, &byte, sizeof(byte));
560 			remaining--;
561 			if (!remaining) {
562 				h5->rx_state = END;
563 			}
564 			break;
565 		case END:
566 			if (byte != SLIP_DELIMITER) {
567 				LOG_ERR("Missing ending SLIP_DELIMITER");
568 				h5_reset_rx(h5);
569 				break;
570 			}
571 
572 			LOG_DBG("Received full packet: type %u", H5_HDR_PKT_TYPE(hdr));
573 
574 			/* Check when full packet is received, it can be done
575 			 * when parsing packet header but we need to receive
576 			 * full packet anyway to clear UART.
577 			 */
578 			if (H5_HDR_RELIABLE(hdr) &&
579 			    H5_HDR_SEQ(hdr) != h5->tx_ack) {
580 				LOG_ERR("Seq expected %u got %u. Drop packet", h5->tx_ack,
581 					H5_HDR_SEQ(hdr));
582 				h5_reset_rx(h5);
583 				break;
584 			}
585 
586 			h5_process_complete_packet(dev, hdr);
587 			h5->rx_state = START;
588 			break;
589 		}
590 	}
591 }
592 
h5_get_type(struct net_buf * buf)593 static uint8_t h5_get_type(struct net_buf *buf)
594 {
595 	return net_buf_pull_u8(buf);
596 }
597 
h5_queue(const struct device * dev,struct net_buf * buf)598 static int h5_queue(const struct device *dev, struct net_buf *buf)
599 {
600 	struct h5_data *h5 = dev->data;
601 	uint8_t type;
602 
603 	LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
604 
605 	switch (bt_buf_get_type(buf)) {
606 	case BT_BUF_CMD:
607 		type = HCI_COMMAND_PKT;
608 		break;
609 	case BT_BUF_ACL_OUT:
610 		type = HCI_ACLDATA_PKT;
611 		break;
612 	case BT_BUF_ISO_OUT:
613 		type = HCI_ISODATA_PKT;
614 		break;
615 	default:
616 		LOG_ERR("Unknown packet type %u", bt_buf_get_type(buf));
617 		return -1;
618 	}
619 
620 	memcpy(net_buf_push(buf, sizeof(type)), &type, sizeof(type));
621 
622 	net_buf_put(&h5->tx_queue, buf);
623 
624 	return 0;
625 }
626 
tx_thread(void * p1,void * p2,void * p3)627 static void tx_thread(void *p1, void *p2, void *p3)
628 {
629 	const struct device *dev = p1;
630 	struct h5_data *h5 = dev->data;
631 
632 	ARG_UNUSED(p2);
633 	ARG_UNUSED(p3);
634 
635 	LOG_DBG("");
636 
637 	/* FIXME: make periodic sending */
638 	h5_send(dev, sync_req, HCI_3WIRE_LINK_PKT, sizeof(sync_req));
639 
640 	while (true) {
641 		struct net_buf *buf;
642 		uint8_t type;
643 
644 		LOG_DBG("link_state %u", h5->link_state);
645 
646 		switch (h5->link_state) {
647 		case UNINIT:
648 			/* FIXME: send sync */
649 			k_sleep(K_MSEC(100));
650 			break;
651 		case INIT:
652 			/* FIXME: send conf */
653 			k_sleep(K_MSEC(100));
654 			break;
655 		case ACTIVE:
656 			buf = net_buf_get(&h5->tx_queue, K_FOREVER);
657 			type = h5_get_type(buf);
658 
659 			h5_send(dev, buf->data, type, buf->len);
660 
661 			/* buf is dequeued from tx_queue and queued to unack
662 			 * queue.
663 			 */
664 			net_buf_put(&h5->unack_queue, buf);
665 			h5->unack_queue_len++;
666 
667 			k_work_reschedule(&h5->retx_work, H5_TX_ACK_TIMEOUT);
668 
669 			break;
670 		}
671 	}
672 }
673 
h5_set_txwin(struct h5_data * h5,uint8_t * conf)674 static void h5_set_txwin(struct h5_data *h5, uint8_t *conf)
675 {
676 	conf[2] = h5->tx_win & 0x07;
677 }
678 
rx_thread(void * p1,void * p2,void * p3)679 static void rx_thread(void *p1, void *p2, void *p3)
680 {
681 	const struct device *dev = p1;
682 	struct h5_data *h5 = dev->data;
683 
684 	ARG_UNUSED(p2);
685 	ARG_UNUSED(p3);
686 
687 	LOG_DBG("");
688 
689 	while (true) {
690 		struct net_buf *buf;
691 
692 		buf = net_buf_get(&h5->rx_queue, K_FOREVER);
693 
694 		hexdump("=> ", buf->data, buf->len);
695 
696 		if (!memcmp(buf->data, sync_req, sizeof(sync_req))) {
697 			if (h5->link_state == ACTIVE) {
698 				/* TODO Reset H5 */
699 			}
700 
701 			h5_send(dev, sync_rsp, HCI_3WIRE_LINK_PKT, sizeof(sync_rsp));
702 		} else if (!memcmp(buf->data, sync_rsp, sizeof(sync_rsp))) {
703 			if (h5->link_state == ACTIVE) {
704 				/* TODO Reset H5 */
705 			}
706 
707 			h5->link_state = INIT;
708 			h5_set_txwin(h5, conf_req);
709 			h5_send(dev, conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req));
710 		} else if (!memcmp(buf->data, conf_req, 2)) {
711 			/*
712 			 * The Host sends Config Response messages without a
713 			 * Configuration Field.
714 			 */
715 			h5_send(dev, conf_rsp, HCI_3WIRE_LINK_PKT, sizeof(conf_rsp));
716 
717 			/* Then send Config Request with Configuration Field */
718 			h5_set_txwin(h5, conf_req);
719 			h5_send(dev, conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req));
720 		} else if (!memcmp(buf->data, conf_rsp, 2)) {
721 			h5->link_state = ACTIVE;
722 			if (buf->len > 2) {
723 				/* Configuration field present */
724 				h5->tx_win = (buf->data[2] & 0x07);
725 			}
726 
727 			LOG_DBG("Finished H5 configuration, tx_win %u", h5->tx_win);
728 		} else {
729 			LOG_ERR("Not handled yet %x %x", buf->data[0], buf->data[1]);
730 		}
731 
732 		net_buf_unref(buf);
733 
734 		/* Make sure we don't hog the CPU if the rx_queue never
735 		 * gets empty.
736 		 */
737 		k_yield();
738 	}
739 }
740 
h5_init(const struct device * dev)741 static void h5_init(const struct device *dev)
742 {
743 	const struct h5_config *cfg = dev->config;
744 	struct h5_data *h5 = dev->data;
745 	k_tid_t tid;
746 
747 	LOG_DBG("");
748 
749 	h5->link_state = UNINIT;
750 	h5->rx_state = START;
751 	h5->tx_win = 4U;
752 
753 	/* TX thread */
754 	k_fifo_init(&h5->tx_queue);
755 	tid = k_thread_create(cfg->tx_thread, cfg->tx_stack, cfg->tx_stack_size,
756 			      tx_thread, (void *)dev, NULL, NULL,
757 			      K_PRIO_COOP(CONFIG_BT_HCI_TX_PRIO),
758 			      0, K_NO_WAIT);
759 	k_thread_name_set(tid, "tx_thread");
760 
761 	k_fifo_init(&h5->rx_queue);
762 	tid = k_thread_create(cfg->rx_thread, cfg->rx_stack, cfg->rx_stack_size,
763 			      rx_thread, (void *)dev, NULL, NULL,
764 			      K_PRIO_COOP(CONFIG_BT_RX_PRIO),
765 			      0, K_NO_WAIT);
766 	k_thread_name_set(tid, "rx_thread");
767 
768 	/* Unack queue */
769 	k_fifo_init(&h5->unack_queue);
770 
771 	/* Init delayed work */
772 	k_work_init_delayable(&h5->ack_work, ack_timeout);
773 	k_work_init_delayable(&h5->retx_work, retx_timeout);
774 }
775 
h5_open(const struct device * dev,bt_hci_recv_t recv)776 static int h5_open(const struct device *dev, bt_hci_recv_t recv)
777 {
778 	const struct h5_config *cfg = dev->config;
779 	struct h5_data *h5 = dev->data;
780 
781 	LOG_DBG("");
782 
783 	/* This is needed so that we can access the device struct from within the
784 	 * delayed work callbacks.
785 	 */
786 	h5->dev = dev;
787 
788 	h5->recv = recv;
789 
790 	uart_irq_rx_disable(cfg->uart);
791 	uart_irq_tx_disable(cfg->uart);
792 
793 	bt_uart_drain(cfg->uart);
794 
795 	uart_irq_callback_user_data_set(cfg->uart, bt_uart_isr, (void *)dev);
796 
797 	h5_init(dev);
798 
799 	uart_irq_rx_enable(cfg->uart);
800 
801 	return 0;
802 }
803 
804 static const struct bt_hci_driver_api h5_driver_api = {
805 	.open = h5_open,
806 	.send = h5_queue,
807 };
808 
809 #define BT_UART_DEVICE_INIT(inst) \
810 	static K_KERNEL_STACK_DEFINE(rx_thread_stack_##inst, CONFIG_BT_DRV_RX_STACK_SIZE); \
811 	static struct k_thread rx_thread_##inst; \
812 	static K_KERNEL_STACK_DEFINE(tx_thread_stack_##inst, CONFIG_BT_DRV_TX_STACK_SIZE); \
813 	static struct k_thread tx_thread_##inst; \
814 	static const struct h5_config h5_config_##inst = { \
815 		.uart = DEVICE_DT_GET(DT_INST_PARENT(inst)), \
816 		.rx_stack = rx_thread_stack_##inst, \
817 		.rx_stack_size = K_KERNEL_STACK_SIZEOF(rx_thread_stack_##inst), \
818 		.rx_thread = &rx_thread_##inst, \
819 		.tx_stack = tx_thread_stack_##inst, \
820 		.tx_stack_size = K_KERNEL_STACK_SIZEOF(tx_thread_stack_##inst), \
821 		.tx_thread = &tx_thread_##inst, \
822 	}; \
823 	static struct h5_data h5_##inst; \
824 	DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &h5_##inst, &h5_config_##inst, \
825 			      POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &h5_driver_api)
826 
827 
828 DT_INST_FOREACH_STATUS_OKAY(BT_UART_DEVICE_INIT)
829