1 /*
2  * Copyright (c) 2024 STMicroelectronics
3  * Copyright (c) 2016 Nordic Semiconductor ASA
4  * Copyright (c) 2015-2016 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <errno.h>
10 #include <stddef.h>
11 #include <stdio.h>
12 #include <string.h>
13 
14 #include <zephyr/kernel.h>
15 #include <zephyr/arch/cpu.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/logging/log.h>
18 #include <zephyr/sys/util.h>
19 #include <zephyr/device.h>
20 #include <zephyr/init.h>
21 #include <zephyr/drivers/uart.h>
22 #include <zephyr/drivers/bluetooth/hci_driver_bluenrg.h>
23 #include <zephyr/bluetooth/hci_types.h>
24 #include <zephyr/usb/usb_device.h>
25 #include <zephyr/net_buf.h>
26 #include <zephyr/bluetooth/bluetooth.h>
27 #include <zephyr/bluetooth/l2cap.h>
28 #include <zephyr/bluetooth/hci.h>
29 #include <zephyr/bluetooth/buf.h>
30 #include <zephyr/bluetooth/hci_raw.h>
31 #include <version.h>
32 
33 #define LOG_MODULE_NAME gui_hci_uart
34 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
35 
36 static const struct device *const hci_uart_dev =
37 	DEVICE_DT_GET(DT_CHOSEN(zephyr_bt_c2h_uart));
38 static K_THREAD_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
39 static struct k_thread tx_thread_data;
40 static K_FIFO_DEFINE(tx_queue);
41 
42 /* RX in terms of bluetooth communication */
43 static K_FIFO_DEFINE(uart_tx_queue);
44 
45 #define H4_ST_EXT_CMD	0x81
46 #define H4_ST_VND_CMD	0xFF
47 
48 #define ST_IDLE		0 /* Waiting for packet type. */
49 #define ST_HDR		1 /* Receiving packet header. */
50 #define ST_PAYLOAD	2 /* Receiving packet payload. */
51 #define ST_DISCARD	3 /* Dropping packet. */
52 
53 /* Length of a discard/flush buffer.
54  * This is sized to align with a BLE HCI packet:
55  * 1 byte H:4 header + 32 bytes ACL/event data
56  * Bigger values might overflow the stack since this is declared as a local
57  * variable, smaller ones will force the caller to call into discard more
58  * often.
59  */
60 #define H4_DISCARD_LEN 33
61 
62 #define RESP_VENDOR_CODE_OFFSET	1
63 #define RESP_LEN_OFFSET_LSB	2
64 #define RESP_LEN_OFFSET_MSB	3
65 #define RESP_CMDCODE_OFFSET	4
66 #define RESP_STATUS_OFFSET	5
67 #define RESP_PARAM_OFFSET	6
68 
69 /* Types of vendor codes */
70 #define VENDOR_CODE_ERROR	0
71 #define VENDOR_CODE_RESPONSE	1
72 
73 /* Commands */
74 #define VENDOR_CMD_READ_VERSION		0x01
75 #define VENDOR_CMD_BLUENRG_RESET	0x04
76 #define VENDOR_CMD_HW_BOOTLOADER	0x05
77 
78 struct bt_hci_ext_cmd_hdr {
79 	uint16_t opcode;
80 	uint16_t param_len;
81 } __packed;
82 
83 struct bt_vendor_cmd_hdr {
84 	uint8_t opcode;
85 	uint16_t param_len;
86 } __packed;
87 
88 struct bt_vendor_rsp_hdr {
89 	uint8_t vendor_code;
90 	uint16_t param_len;
91 	uint8_t opcode;
92 	uint8_t status;
93 	uint8_t params[2];
94 } __packed;
95 
96 static int h4_send(struct net_buf *buf);
97 
parse_cmd(uint8_t * hci_buffer,uint16_t hci_pckt_len,uint8_t * buffer_out)98 static uint16_t parse_cmd(uint8_t *hci_buffer, uint16_t hci_pckt_len, uint8_t *buffer_out)
99 {
100 	uint16_t len = 0;
101 	struct bt_vendor_cmd_hdr *hdr = (struct bt_vendor_cmd_hdr *) hci_buffer;
102 	struct bt_vendor_rsp_hdr *rsp = (struct bt_vendor_rsp_hdr *) (buffer_out + 1);
103 
104 	buffer_out[0] = H4_ST_VND_CMD;
105 	rsp->vendor_code = VENDOR_CODE_RESPONSE;
106 	rsp->opcode = hdr->opcode;
107 	rsp->status = 0;
108 
109 	switch (hdr->opcode) {
110 	case VENDOR_CMD_READ_VERSION:
111 		rsp->params[0] = KERNEL_VERSION_MAJOR;
112 		if (KERNEL_PATCHLEVEL >= 9) {
113 			rsp->params[1] = (KERNEL_VERSION_MINOR * 10) + 9;
114 		} else {
115 			rsp->params[1] = (KERNEL_VERSION_MINOR * 10) + KERNEL_PATCHLEVEL;
116 		}
117 		len = 2;
118 		break;
119 #if DT_HAS_COMPAT_STATUS_OKAY(st_hci_spi_v1) || DT_HAS_COMPAT_STATUS_OKAY(st_hci_spi_v2)
120 	case VENDOR_CMD_BLUENRG_RESET:
121 		bluenrg_bt_reset(0);
122 		break;
123 	case VENDOR_CMD_HW_BOOTLOADER:
124 		bluenrg_bt_reset(1);
125 		break;
126 #endif /* DT_HAS_COMPAT_STATUS_OKAY(st_hci_spi_v1) || DT_HAS_COMPAT_STATUS_OKAY(st_hci_spi_v2) */
127 	default:
128 		rsp->vendor_code = VENDOR_CODE_ERROR;
129 		rsp->status = BT_HCI_ERR_UNKNOWN_CMD;
130 	}
131 
132 	len += 2; /* Status and Command code */
133 	rsp->param_len = sys_cpu_to_le16(len);
134 	len += RESP_CMDCODE_OFFSET;
135 
136 	return len;
137 }
138 
send_evt(uint8_t * response,uint8_t len)139 static int send_evt(uint8_t *response, uint8_t len)
140 {
141 	struct net_buf *buf;
142 
143 	buf = bt_buf_get_rx(BT_BUF_EVT, K_NO_WAIT);
144 
145 	if (!buf) {
146 		LOG_ERR("EVT no buffer");
147 		return -ENOMEM;
148 	}
149 	if (len > net_buf_tailroom(buf)) {
150 		LOG_ERR("EVT too long: %d", len);
151 		net_buf_unref(buf);
152 		return -ENOMEM;
153 	}
154 	net_buf_add_mem(buf, response, len);
155 
156 	return h4_send(buf);
157 }
158 
h4_read(const struct device * uart,uint8_t * buf,size_t len)159 static int h4_read(const struct device *uart, uint8_t *buf, size_t len)
160 {
161 	int rx = uart_fifo_read(uart, buf, len);
162 
163 	LOG_DBG("read %d req %d", rx, len);
164 	return rx;
165 }
166 
valid_type(uint8_t type)167 static bool valid_type(uint8_t type)
168 {
169 	return (type == BT_HCI_H4_CMD) | (type == H4_ST_EXT_CMD) |
170 		(type == BT_HCI_H4_ACL) | (type == BT_HCI_H4_ISO) | (type == H4_ST_VND_CMD);
171 }
172 
173 /* Function expects that type is validated and only CMD, ISO or ACL are used. */
get_len(const uint8_t * hdr_buf,uint8_t type)174 static uint32_t get_len(const uint8_t *hdr_buf, uint8_t type)
175 {
176 	switch (type) {
177 	case BT_HCI_H4_CMD:
178 		return ((const struct bt_hci_cmd_hdr *)hdr_buf)->param_len;
179 	case H4_ST_EXT_CMD:
180 		return ((const struct bt_hci_ext_cmd_hdr *)hdr_buf)->param_len;
181 	case H4_ST_VND_CMD:
182 		return ((const struct bt_vendor_cmd_hdr *)hdr_buf)->param_len;
183 	case BT_HCI_H4_ISO:
184 		return bt_iso_hdr_len(
185 			sys_le16_to_cpu(((const struct bt_hci_iso_hdr *)hdr_buf)->len));
186 	case BT_HCI_H4_ACL:
187 		return sys_le16_to_cpu(((const struct bt_hci_acl_hdr *)hdr_buf)->len);
188 	default:
189 		LOG_ERR("Invalid type: %u", type);
190 		return 0;
191 	}
192 }
193 
194 /* Function expects that type is validated and only CMD, ISO or ACL are used. */
hdr_len(uint8_t type)195 static int hdr_len(uint8_t type)
196 {
197 	switch (type) {
198 	case BT_HCI_H4_CMD:
199 		return sizeof(struct bt_hci_cmd_hdr);
200 	case H4_ST_EXT_CMD:
201 		return sizeof(struct bt_hci_ext_cmd_hdr);
202 	case H4_ST_VND_CMD:
203 		return sizeof(struct bt_vendor_cmd_hdr);
204 	case BT_HCI_H4_ISO:
205 		return sizeof(struct bt_hci_iso_hdr);
206 	case BT_HCI_H4_ACL:
207 		return sizeof(struct bt_hci_acl_hdr);
208 	default:
209 		LOG_ERR("Invalid type: %u", type);
210 		return 0;
211 	}
212 }
213 
alloc_tx_buf(uint8_t type)214 static struct net_buf *alloc_tx_buf(uint8_t type)
215 {
216 	uint8_t alloc_type = type;
217 	struct net_buf *buf;
218 
219 	switch (type) {
220 	case H4_ST_EXT_CMD:
221 	case BT_HCI_H4_CMD:
222 	case H4_ST_VND_CMD:
223 		alloc_type = BT_HCI_H4_CMD;
224 		break;
225 	case BT_HCI_H4_ISO:
226 	case BT_HCI_H4_ACL:
227 		break;
228 	default:
229 		LOG_ERR("Invalid type: %u", type);
230 		return NULL;
231 	}
232 	buf = bt_buf_get_tx(BT_BUF_H4, K_NO_WAIT, &alloc_type, sizeof(alloc_type));
233 	if (buf && (type == H4_ST_VND_CMD)) {
234 		bt_buf_set_type(buf, type);
235 	}
236 	return buf;
237 }
238 
rx_isr(void)239 static void rx_isr(void)
240 {
241 	static struct net_buf *buf;
242 	static int remaining;
243 	static uint8_t state;
244 	static uint8_t type;
245 	static uint8_t hdr_buf[MAX(sizeof(struct bt_hci_cmd_hdr), sizeof(struct bt_hci_acl_hdr))];
246 	int read;
247 
248 	do {
249 		switch (state) {
250 		case ST_IDLE:
251 			/* Get packet type */
252 			read = h4_read(hci_uart_dev, &type, sizeof(type));
253 			/* since we read in loop until no data is in the fifo,
254 			 * it is possible that read = 0.
255 			 */
256 			if (read) {
257 				if (valid_type(type)) {
258 					/* Get expected header size and switch
259 					 * to receiving header.
260 					 */
261 					remaining = hdr_len(type);
262 					state = ST_HDR;
263 				} else {
264 					LOG_WRN("Unknown header %d", type);
265 				}
266 			}
267 			break;
268 		case ST_HDR:
269 			read = h4_read(hci_uart_dev, &hdr_buf[hdr_len(type) - remaining],
270 				remaining);
271 			remaining -= read;
272 			if (remaining == 0) {
273 				/* Header received. Allocate buffer and get
274 				 * payload length. If allocation fails leave
275 				 * interrupt. On failed allocation state machine
276 				 * is reset.
277 				 */
278 				uint8_t header_length;
279 
280 				buf = alloc_tx_buf(type);
281 				if (!buf) {
282 					LOG_ERR("No available command buffers!");
283 					state = ST_IDLE;
284 					return;
285 				}
286 
287 				remaining = get_len(hdr_buf, type);
288 				header_length = hdr_len(type);
289 				if (type == H4_ST_EXT_CMD) {
290 					/* Convert to regular HCI_CMD */
291 					if (remaining > 255) {
292 						LOG_ERR("len > 255");
293 						net_buf_unref(buf);
294 						state = ST_DISCARD;
295 					} else {
296 						header_length--;
297 					}
298 				}
299 				net_buf_add_mem(buf, hdr_buf, header_length);
300 				if (remaining > net_buf_tailroom(buf)) {
301 					LOG_ERR("Not enough space in buffer");
302 					net_buf_unref(buf);
303 					state = ST_DISCARD;
304 				} else {
305 					state = ST_PAYLOAD;
306 				}
307 
308 			}
309 			break;
310 		case ST_PAYLOAD:
311 			read = h4_read(hci_uart_dev, net_buf_tail(buf), remaining);
312 			buf->len += read;
313 			remaining -= read;
314 			if (remaining == 0) {
315 				/* Packet received */
316 				LOG_DBG("putting RX packet in queue.");
317 				k_fifo_put(&tx_queue, buf);
318 				state = ST_IDLE;
319 			}
320 			break;
321 		case ST_DISCARD:
322 			uint8_t discard[H4_DISCARD_LEN];
323 			size_t to_read = MIN(remaining, sizeof(discard));
324 
325 			read = h4_read(hci_uart_dev, discard, to_read);
326 			remaining -= read;
327 			if (remaining == 0) {
328 				state = ST_IDLE;
329 			}
330 			break;
331 		default:
332 			read = 0;
333 			__ASSERT_NO_MSG(0);
334 			break;
335 
336 		}
337 	} while (read);
338 }
339 
tx_isr(void)340 static void tx_isr(void)
341 {
342 	static struct net_buf *buf;
343 	int len;
344 
345 	if (!buf) {
346 		buf = k_fifo_get(&uart_tx_queue, K_NO_WAIT);
347 		if (!buf) {
348 			uart_irq_tx_disable(hci_uart_dev);
349 			return;
350 		}
351 	}
352 	len = uart_fifo_fill(hci_uart_dev, buf->data, buf->len);
353 	net_buf_pull(buf, len);
354 	if (!buf->len) {
355 		net_buf_unref(buf);
356 		buf = NULL;
357 	}
358 }
359 
bt_uart_isr(const struct device * unused,void * user_data)360 static void bt_uart_isr(const struct device *unused, void *user_data)
361 {
362 	ARG_UNUSED(unused);
363 	ARG_UNUSED(user_data);
364 
365 	if (!(uart_irq_rx_ready(hci_uart_dev) || uart_irq_tx_ready(hci_uart_dev))) {
366 		LOG_DBG("spurious interrupt");
367 	}
368 	if (uart_irq_tx_ready(hci_uart_dev)) {
369 		tx_isr();
370 	}
371 	if (uart_irq_rx_ready(hci_uart_dev)) {
372 		rx_isr();
373 	}
374 }
375 
tx_thread(void * p1,void * p2,void * p3)376 static void tx_thread(void *p1, void *p2, void *p3)
377 {
378 	enum bt_buf_type buf_type;
379 
380 	while (1) {
381 		struct net_buf *buf;
382 		int err = 0;
383 		uint8_t len;
384 		uint8_t response[16];
385 
386 		/* Wait until a buffer is available */
387 		buf = k_fifo_get(&tx_queue, K_FOREVER);
388 		buf_type = bt_buf_get_type(buf);
389 		if (buf_type == H4_ST_VND_CMD) {
390 			len = parse_cmd(buf->data, buf->len, response);
391 			err =  send_evt(response, len);
392 			if (!err) {
393 				net_buf_unref(buf);
394 			}
395 		} else {
396 			/* Pass buffer to the stack */
397 			err = bt_send(buf);
398 		}
399 		if (err) {
400 			LOG_ERR("Unable to send (err %d)", err);
401 			net_buf_unref(buf);
402 		}
403 
404 		/* Give other threads a chance to run if tx_queue keeps getting
405 		 * new data all the time.
406 		 */
407 		k_yield();
408 	}
409 }
410 
h4_send(struct net_buf * buf)411 static int h4_send(struct net_buf *buf)
412 {
413 	LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
414 	k_fifo_put(&uart_tx_queue, buf);
415 	uart_irq_tx_enable(hci_uart_dev);
416 	return 0;
417 }
418 
hci_uart_init(void)419 static int hci_uart_init(void)
420 {
421 	LOG_DBG("");
422 	if (!device_is_ready(hci_uart_dev)) {
423 		LOG_ERR("HCI UART %s is not ready", hci_uart_dev->name);
424 		return -EINVAL;
425 	}
426 	uart_irq_rx_disable(hci_uart_dev);
427 	uart_irq_tx_disable(hci_uart_dev);
428 	uart_irq_callback_set(hci_uart_dev, bt_uart_isr);
429 	uart_irq_rx_enable(hci_uart_dev);
430 	return 0;
431 }
432 
main(void)433 int main(void)
434 {
435 	/* incoming events and data from the controller */
436 	static K_FIFO_DEFINE(rx_queue);
437 	int err;
438 
439 	LOG_DBG("Start");
440 	__ASSERT(hci_uart_dev, "UART device is NULL");
441 
442 	/* Enable the raw interface, this will in turn open the HCI driver */
443 	bt_enable_raw(&rx_queue);
444 	/* Spawn the TX thread and start feeding commands and data to the controller */
445 	k_thread_create(&tx_thread_data, tx_thread_stack,
446 			K_THREAD_STACK_SIZEOF(tx_thread_stack), tx_thread,
447 			NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
448 	k_thread_name_set(&tx_thread_data, "HCI uart TX");
449 
450 	while (1) {
451 		struct net_buf *buf;
452 
453 		buf = k_fifo_get(&rx_queue, K_FOREVER);
454 		err = h4_send(buf);
455 		if (err) {
456 			LOG_ERR("Failed to send");
457 		}
458 	}
459 	return 0;
460 }
461 
462 SYS_INIT(hci_uart_init, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
463