1 /*
2  * Copyright (c) 2019-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdio.h>
10 #include <string.h>
11 
12 #include <zephyr/device.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/util.h>
16 
17 #include <zephyr/ipc/ipc_service.h>
18 
19 #include <zephyr/net_buf.h>
20 #include <zephyr/bluetooth/bluetooth.h>
21 #include <zephyr/bluetooth/l2cap.h>
22 #include <zephyr/bluetooth/hci.h>
23 #include <zephyr/bluetooth/buf.h>
24 #include <zephyr/bluetooth/hci_raw.h>
25 #include <zephyr/bluetooth/hci_vs.h>
26 
27 #include <zephyr/logging/log_ctrl.h>
28 #include <zephyr/logging/log.h>
29 
30 LOG_MODULE_REGISTER(hci_ipc, CONFIG_BT_LOG_LEVEL);
31 
32 BUILD_ASSERT(!IS_ENABLED(CONFIG_BT_CONN) || IS_ENABLED(CONFIG_BT_HCI_ACL_FLOW_CONTROL),
33 	     "HCI IPC driver can drop ACL data without Controller-to-Host ACL flow control");
34 
35 static struct ipc_ept hci_ept;
36 
37 static K_THREAD_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
38 static struct k_thread tx_thread_data;
39 static K_FIFO_DEFINE(tx_queue);
40 static K_SEM_DEFINE(ipc_bound_sem, 0, 1);
41 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER) || defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
42 /* A flag used to store information if the IPC endpoint has already been bound. The end point can't
43  * be used before that happens.
44  */
45 static bool ipc_ept_ready;
46 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER || CONFIG_BT_HCI_VS_FATAL_ERROR */
47 
48 #define HCI_IPC_CMD 0x01
49 #define HCI_IPC_ACL 0x02
50 #define HCI_IPC_SCO 0x03
51 #define HCI_IPC_EVT 0x04
52 #define HCI_IPC_ISO 0x05
53 
54 #define HCI_FATAL_ERR_MSG true
55 #define HCI_REGULAR_MSG false
56 
hci_ipc_cmd_recv(uint8_t * data,size_t remaining)57 static struct net_buf *hci_ipc_cmd_recv(uint8_t *data, size_t remaining)
58 {
59 	struct bt_hci_cmd_hdr *hdr = (void *)data;
60 	struct net_buf *buf;
61 
62 	if (remaining < sizeof(*hdr)) {
63 		LOG_ERR("Not enough data for command header");
64 		return NULL;
65 	}
66 
67 	buf = bt_buf_get_tx(BT_BUF_CMD, K_NO_WAIT, hdr, sizeof(*hdr));
68 	if (buf) {
69 		data += sizeof(*hdr);
70 		remaining -= sizeof(*hdr);
71 	} else {
72 		LOG_ERR("No available command buffers!");
73 		return NULL;
74 	}
75 
76 	if (remaining != hdr->param_len) {
77 		LOG_ERR("Command payload length is not correct");
78 		net_buf_unref(buf);
79 		return NULL;
80 	}
81 
82 	if (remaining > net_buf_tailroom(buf)) {
83 		LOG_ERR("Not enough space in buffer");
84 		net_buf_unref(buf);
85 		return NULL;
86 	}
87 
88 	LOG_DBG("len %u", hdr->param_len);
89 	net_buf_add_mem(buf, data, remaining);
90 
91 	return buf;
92 }
93 
hci_ipc_acl_recv(uint8_t * data,size_t remaining)94 static struct net_buf *hci_ipc_acl_recv(uint8_t *data, size_t remaining)
95 {
96 	struct bt_hci_acl_hdr *hdr = (void *)data;
97 	struct net_buf *buf;
98 
99 	if (remaining < sizeof(*hdr)) {
100 		LOG_ERR("Not enough data for ACL header");
101 		return NULL;
102 	}
103 
104 	buf = bt_buf_get_tx(BT_BUF_ACL_OUT, K_NO_WAIT, hdr, sizeof(*hdr));
105 	if (buf) {
106 		data += sizeof(*hdr);
107 		remaining -= sizeof(*hdr);
108 	} else {
109 		LOG_ERR("No available ACL buffers!");
110 		return NULL;
111 	}
112 
113 	if (remaining != sys_le16_to_cpu(hdr->len)) {
114 		LOG_ERR("ACL payload length is not correct");
115 		net_buf_unref(buf);
116 		return NULL;
117 	}
118 
119 	if (remaining > net_buf_tailroom(buf)) {
120 		LOG_ERR("Not enough space in buffer");
121 		net_buf_unref(buf);
122 		return NULL;
123 	}
124 
125 	LOG_DBG("len %u", remaining);
126 	net_buf_add_mem(buf, data, remaining);
127 
128 	return buf;
129 }
130 
hci_ipc_iso_recv(uint8_t * data,size_t remaining)131 static struct net_buf *hci_ipc_iso_recv(uint8_t *data, size_t remaining)
132 {
133 	struct bt_hci_iso_hdr *hdr = (void *)data;
134 	struct net_buf *buf;
135 
136 	if (remaining < sizeof(*hdr)) {
137 		LOG_ERR("Not enough data for ISO header");
138 		return NULL;
139 	}
140 
141 	buf = bt_buf_get_tx(BT_BUF_ISO_OUT, K_NO_WAIT, hdr, sizeof(*hdr));
142 	if (buf) {
143 		data += sizeof(*hdr);
144 		remaining -= sizeof(*hdr);
145 	} else {
146 		LOG_ERR("No available ISO buffers!");
147 		return NULL;
148 	}
149 
150 	if (remaining != bt_iso_hdr_len(sys_le16_to_cpu(hdr->len))) {
151 		LOG_ERR("ISO payload length is not correct");
152 		net_buf_unref(buf);
153 		return NULL;
154 	}
155 
156 	if (remaining > net_buf_tailroom(buf)) {
157 		LOG_ERR("Not enough space in buffer");
158 		net_buf_unref(buf);
159 		return NULL;
160 	}
161 
162 	LOG_DBG("len %zu", remaining);
163 	net_buf_add_mem(buf, data, remaining);
164 
165 	return buf;
166 }
167 
hci_ipc_rx(uint8_t * data,size_t len)168 static void hci_ipc_rx(uint8_t *data, size_t len)
169 {
170 	uint8_t pkt_indicator;
171 	struct net_buf *buf = NULL;
172 	size_t remaining = len;
173 
174 	LOG_HEXDUMP_DBG(data, len, "IPC data:");
175 
176 	pkt_indicator = *data++;
177 	remaining -= sizeof(pkt_indicator);
178 
179 	switch (pkt_indicator) {
180 	case HCI_IPC_CMD:
181 		buf = hci_ipc_cmd_recv(data, remaining);
182 		break;
183 
184 	case HCI_IPC_ACL:
185 		buf = hci_ipc_acl_recv(data, remaining);
186 		break;
187 
188 	case HCI_IPC_ISO:
189 		buf = hci_ipc_iso_recv(data, remaining);
190 		break;
191 
192 	default:
193 		LOG_ERR("Unknown HCI type %u", pkt_indicator);
194 		return;
195 	}
196 
197 	if (buf) {
198 		k_fifo_put(&tx_queue, buf);
199 
200 		LOG_HEXDUMP_DBG(buf->data, buf->len, "Final net buffer:");
201 	}
202 }
203 
tx_thread(void * p1,void * p2,void * p3)204 static void tx_thread(void *p1, void *p2, void *p3)
205 {
206 	while (1) {
207 		struct net_buf *buf;
208 		int err;
209 
210 		/* Wait until a buffer is available */
211 		buf = k_fifo_get(&tx_queue, K_FOREVER);
212 		/* Pass buffer to the stack */
213 		err = bt_send(buf);
214 		if (err) {
215 			LOG_ERR("Unable to send (err %d)", err);
216 			net_buf_unref(buf);
217 		}
218 
219 		/* Give other threads a chance to run if tx_queue keeps getting
220 		 * new data all the time.
221 		 */
222 		k_yield();
223 	}
224 }
225 
hci_ipc_send(struct net_buf * buf,bool is_fatal_err)226 static void hci_ipc_send(struct net_buf *buf, bool is_fatal_err)
227 {
228 	uint8_t pkt_indicator;
229 	uint8_t retries = 0;
230 	int ret;
231 
232 	LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
233 
234 	LOG_HEXDUMP_DBG(buf->data, buf->len, "Controller buffer:");
235 
236 	switch (bt_buf_get_type(buf)) {
237 	case BT_BUF_ACL_IN:
238 		pkt_indicator = HCI_IPC_ACL;
239 		break;
240 	case BT_BUF_EVT:
241 		pkt_indicator = HCI_IPC_EVT;
242 		break;
243 	case BT_BUF_ISO_IN:
244 		pkt_indicator = HCI_IPC_ISO;
245 		break;
246 	default:
247 		LOG_ERR("Unknown type %u", bt_buf_get_type(buf));
248 		net_buf_unref(buf);
249 		return;
250 	}
251 	net_buf_push_u8(buf, pkt_indicator);
252 
253 	LOG_HEXDUMP_DBG(buf->data, buf->len, "Final HCI buffer:");
254 
255 	do {
256 		ret = ipc_service_send(&hci_ept, buf->data, buf->len);
257 		if (ret < 0) {
258 			retries++;
259 			if (retries > 10) {
260 				/* Default backend (rpmsg_virtio) has a timeout of 150ms. */
261 				LOG_WRN("IPC send has been blocked for 1.5 seconds.");
262 				retries = 0;
263 			}
264 
265 			/* The function can be called by the application main thread,
266 			 * bt_ctlr_assert_handle and k_sys_fatal_error_handler. In case of a call by
267 			 * Bluetooth Controller assert handler or system fatal error handler the
268 			 * call can be from ISR context, hence there is no thread to yield. Besides
269 			 * that both handlers implement a policy to provide error information and
270 			 * stop the system in an infinite loop. The goal is to prevent any other
271 			 * damage to the system if one of such exeptional situations occur, hence
272 			 * call to k_yield is against it.
273 			 */
274 			if (is_fatal_err) {
275 				LOG_ERR("IPC service send error: %d", ret);
276 			} else {
277 				/* In the POSIX ARCH, code takes zero simulated time to execute,
278 				 * so busy wait loops become infinite loops, unless we
279 				 * force the loop to take a bit of time.
280 				 *
281 				 * This delay allows the IPC consumer to execute, thus making
282 				 * it possible to send more data over IPC afterwards.
283 				 */
284 				Z_SPIN_DELAY(500);
285 				k_yield();
286 			}
287 		}
288 	} while (ret < 0);
289 
290 	LOG_INF("Sent message of %d bytes.", ret);
291 
292 	net_buf_unref(buf);
293 }
294 
295 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER)
bt_ctlr_assert_handle(char * file,uint32_t line)296 void bt_ctlr_assert_handle(char *file, uint32_t line)
297 {
298 	/* Disable interrupts, this is unrecoverable */
299 	(void)irq_lock();
300 
301 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
302 	/* Generate an error event only when IPC service endpoint is already bound. */
303 	if (ipc_ept_ready) {
304 		/* Prepare vendor specific HCI debug event */
305 		struct net_buf *buf;
306 
307 		buf = hci_vs_err_assert(file, line);
308 		if (buf != NULL) {
309 			/* Send the event over ipc */
310 			hci_ipc_send(buf, HCI_FATAL_ERR_MSG);
311 		} else {
312 			LOG_ERR("Can't create Fatal Error HCI event: %s at %d", __FILE__, __LINE__);
313 		}
314 	} else {
315 		LOG_ERR("IPC endpoint is not ready yet: %s at %d", __FILE__, __LINE__);
316 	}
317 
318 	LOG_ERR("Halting system");
319 
320 #else /* !CONFIG_BT_HCI_VS_FATAL_ERROR */
321 	LOG_ERR("Controller assert in: %s at %d", file, line);
322 
323 #endif /* !CONFIG_BT_HCI_VS_FATAL_ERROR */
324 
325 	/* Flush the logs before locking the CPU */
326 	LOG_PANIC();
327 
328 	while (true) {
329 	};
330 }
331 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER */
332 
333 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * esf)334 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
335 {
336 	/* Disable interrupts, this is unrecoverable */
337 	(void)irq_lock();
338 
339 	/* Generate an error event only when there is a stack frame and IPC service endpoint is
340 	 * already bound.
341 	 */
342 	if (esf != NULL && ipc_ept_ready) {
343 		/* Prepare vendor specific HCI debug event */
344 		struct net_buf *buf;
345 
346 		buf = hci_vs_err_stack_frame(reason, esf);
347 		if (buf != NULL) {
348 			hci_ipc_send(buf, HCI_FATAL_ERR_MSG);
349 		} else {
350 			LOG_ERR("Can't create Fatal Error HCI event.\n");
351 		}
352 	}
353 
354 	LOG_ERR("Halting system");
355 
356 	/* Flush the logs before locking the CPU */
357 	LOG_PANIC();
358 
359 	while (true) {
360 	};
361 
362 	CODE_UNREACHABLE;
363 }
364 #endif /* CONFIG_BT_HCI_VS_FATAL_ERROR */
365 
hci_ept_bound(void * priv)366 static void hci_ept_bound(void *priv)
367 {
368 	k_sem_give(&ipc_bound_sem);
369 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER) || defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
370 	ipc_ept_ready = true;
371 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER || CONFIG_BT_HCI_VS_FATAL_ERROR */
372 }
373 
hci_ept_recv(const void * data,size_t len,void * priv)374 static void hci_ept_recv(const void *data, size_t len, void *priv)
375 {
376 	LOG_INF("Received message of %u bytes.", len);
377 	hci_ipc_rx((uint8_t *) data, len);
378 }
379 
380 static struct ipc_ept_cfg hci_ept_cfg = {
381 	.name = "nrf_bt_hci",
382 	.cb = {
383 		.bound    = hci_ept_bound,
384 		.received = hci_ept_recv,
385 	},
386 };
387 
main(void)388 int main(void)
389 {
390 	int err;
391 	const struct device *hci_ipc_instance =
392 		DEVICE_DT_GET(DT_CHOSEN(zephyr_bt_hci_ipc));
393 
394 	/* incoming events and data from the controller */
395 	static K_FIFO_DEFINE(rx_queue);
396 
397 	LOG_DBG("Start");
398 
399 	/* Enable the raw interface, this will in turn open the HCI driver */
400 	bt_enable_raw(&rx_queue);
401 
402 	/* Spawn the TX thread and start feeding commands and data to the
403 	 * controller
404 	 */
405 	k_thread_create(&tx_thread_data, tx_thread_stack,
406 			K_THREAD_STACK_SIZEOF(tx_thread_stack), tx_thread,
407 			NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
408 	k_thread_name_set(&tx_thread_data, "HCI ipc TX");
409 
410 	/* Initialize IPC service instance and register endpoint. */
411 	err = ipc_service_open_instance(hci_ipc_instance);
412 	if (err < 0 && err != -EALREADY) {
413 		LOG_ERR("IPC service instance initialization failed: %d\n", err);
414 	}
415 
416 	err = ipc_service_register_endpoint(hci_ipc_instance, &hci_ept, &hci_ept_cfg);
417 	if (err) {
418 		LOG_ERR("Registering endpoint failed with %d", err);
419 	}
420 
421 	k_sem_take(&ipc_bound_sem, K_FOREVER);
422 
423 	while (1) {
424 		struct net_buf *buf;
425 
426 		buf = k_fifo_get(&rx_queue, K_FOREVER);
427 		hci_ipc_send(buf, HCI_REGULAR_MSG);
428 	}
429 	return 0;
430 }
431