1 /*
2  * Copyright (c) 2019-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdio.h>
10 #include <string.h>
11 
12 #include <zephyr/device.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/util.h>
16 
17 #include <zephyr/ipc/ipc_service.h>
18 
19 #include <zephyr/net_buf.h>
20 #include <zephyr/bluetooth/bluetooth.h>
21 #include <zephyr/bluetooth/l2cap.h>
22 #include <zephyr/bluetooth/hci.h>
23 #include <zephyr/bluetooth/buf.h>
24 #include <zephyr/bluetooth/hci_raw.h>
25 #include <zephyr/bluetooth/hci_vs.h>
26 
27 #include <zephyr/logging/log_ctrl.h>
28 #include <zephyr/logging/log.h>
29 
30 LOG_MODULE_REGISTER(hci_ipc, CONFIG_BT_LOG_LEVEL);
31 
32 static struct ipc_ept hci_ept;
33 
34 static K_THREAD_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
35 static struct k_thread tx_thread_data;
36 static K_FIFO_DEFINE(tx_queue);
37 static K_SEM_DEFINE(ipc_bound_sem, 0, 1);
38 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER) || defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
39 /* A flag used to store information if the IPC endpoint has already been bound. The end point can't
40  * be used before that happens.
41  */
42 static bool ipc_ept_ready;
43 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER || CONFIG_BT_HCI_VS_FATAL_ERROR */
44 
45 #define HCI_IPC_CMD 0x01
46 #define HCI_IPC_ACL 0x02
47 #define HCI_IPC_SCO 0x03
48 #define HCI_IPC_EVT 0x04
49 #define HCI_IPC_ISO 0x05
50 
51 #define HCI_FATAL_ERR_MSG true
52 #define HCI_REGULAR_MSG false
53 
hci_ipc_cmd_recv(uint8_t * data,size_t remaining)54 static struct net_buf *hci_ipc_cmd_recv(uint8_t *data, size_t remaining)
55 {
56 	struct bt_hci_cmd_hdr *hdr = (void *)data;
57 	struct net_buf *buf;
58 
59 	if (remaining < sizeof(*hdr)) {
60 		LOG_ERR("Not enough data for command header");
61 		return NULL;
62 	}
63 
64 	buf = bt_buf_get_tx(BT_BUF_CMD, K_NO_WAIT, hdr, sizeof(*hdr));
65 	if (buf) {
66 		data += sizeof(*hdr);
67 		remaining -= sizeof(*hdr);
68 	} else {
69 		LOG_ERR("No available command buffers!");
70 		return NULL;
71 	}
72 
73 	if (remaining != hdr->param_len) {
74 		LOG_ERR("Command payload length is not correct");
75 		net_buf_unref(buf);
76 		return NULL;
77 	}
78 
79 	if (remaining > net_buf_tailroom(buf)) {
80 		LOG_ERR("Not enough space in buffer");
81 		net_buf_unref(buf);
82 		return NULL;
83 	}
84 
85 	LOG_DBG("len %u", hdr->param_len);
86 	net_buf_add_mem(buf, data, remaining);
87 
88 	return buf;
89 }
90 
hci_ipc_acl_recv(uint8_t * data,size_t remaining)91 static struct net_buf *hci_ipc_acl_recv(uint8_t *data, size_t remaining)
92 {
93 	struct bt_hci_acl_hdr *hdr = (void *)data;
94 	struct net_buf *buf;
95 
96 	if (remaining < sizeof(*hdr)) {
97 		LOG_ERR("Not enough data for ACL header");
98 		return NULL;
99 	}
100 
101 	buf = bt_buf_get_tx(BT_BUF_ACL_OUT, K_NO_WAIT, hdr, sizeof(*hdr));
102 	if (buf) {
103 		data += sizeof(*hdr);
104 		remaining -= sizeof(*hdr);
105 	} else {
106 		LOG_ERR("No available ACL buffers!");
107 		return NULL;
108 	}
109 
110 	if (remaining != sys_le16_to_cpu(hdr->len)) {
111 		LOG_ERR("ACL payload length is not correct");
112 		net_buf_unref(buf);
113 		return NULL;
114 	}
115 
116 	if (remaining > net_buf_tailroom(buf)) {
117 		LOG_ERR("Not enough space in buffer");
118 		net_buf_unref(buf);
119 		return NULL;
120 	}
121 
122 	LOG_DBG("len %u", remaining);
123 	net_buf_add_mem(buf, data, remaining);
124 
125 	return buf;
126 }
127 
hci_ipc_iso_recv(uint8_t * data,size_t remaining)128 static struct net_buf *hci_ipc_iso_recv(uint8_t *data, size_t remaining)
129 {
130 	struct bt_hci_iso_hdr *hdr = (void *)data;
131 	struct net_buf *buf;
132 
133 	if (remaining < sizeof(*hdr)) {
134 		LOG_ERR("Not enough data for ISO header");
135 		return NULL;
136 	}
137 
138 	buf = bt_buf_get_tx(BT_BUF_ISO_OUT, K_NO_WAIT, hdr, sizeof(*hdr));
139 	if (buf) {
140 		data += sizeof(*hdr);
141 		remaining -= sizeof(*hdr);
142 	} else {
143 		LOG_ERR("No available ISO buffers!");
144 		return NULL;
145 	}
146 
147 	if (remaining != bt_iso_hdr_len(sys_le16_to_cpu(hdr->len))) {
148 		LOG_ERR("ISO payload length is not correct");
149 		net_buf_unref(buf);
150 		return NULL;
151 	}
152 
153 	if (remaining > net_buf_tailroom(buf)) {
154 		LOG_ERR("Not enough space in buffer");
155 		net_buf_unref(buf);
156 		return NULL;
157 	}
158 
159 	LOG_DBG("len %zu", remaining);
160 	net_buf_add_mem(buf, data, remaining);
161 
162 	return buf;
163 }
164 
hci_ipc_rx(uint8_t * data,size_t len)165 static void hci_ipc_rx(uint8_t *data, size_t len)
166 {
167 	uint8_t pkt_indicator;
168 	struct net_buf *buf = NULL;
169 	size_t remaining = len;
170 
171 	LOG_HEXDUMP_DBG(data, len, "IPC data:");
172 
173 	pkt_indicator = *data++;
174 	remaining -= sizeof(pkt_indicator);
175 
176 	switch (pkt_indicator) {
177 	case HCI_IPC_CMD:
178 		buf = hci_ipc_cmd_recv(data, remaining);
179 		break;
180 
181 	case HCI_IPC_ACL:
182 		buf = hci_ipc_acl_recv(data, remaining);
183 		break;
184 
185 	case HCI_IPC_ISO:
186 		buf = hci_ipc_iso_recv(data, remaining);
187 		break;
188 
189 	default:
190 		LOG_ERR("Unknown HCI type %u", pkt_indicator);
191 		return;
192 	}
193 
194 	if (buf) {
195 		k_fifo_put(&tx_queue, buf);
196 
197 		LOG_HEXDUMP_DBG(buf->data, buf->len, "Final net buffer:");
198 	}
199 }
200 
tx_thread(void * p1,void * p2,void * p3)201 static void tx_thread(void *p1, void *p2, void *p3)
202 {
203 	while (1) {
204 		struct net_buf *buf;
205 		int err;
206 
207 		/* Wait until a buffer is available */
208 		buf = k_fifo_get(&tx_queue, K_FOREVER);
209 		/* Pass buffer to the stack */
210 		err = bt_send(buf);
211 		if (err) {
212 			LOG_ERR("Unable to send (err %d)", err);
213 			net_buf_unref(buf);
214 		}
215 
216 		/* Give other threads a chance to run if tx_queue keeps getting
217 		 * new data all the time.
218 		 */
219 		k_yield();
220 	}
221 }
222 
hci_ipc_send(struct net_buf * buf,bool is_fatal_err)223 static void hci_ipc_send(struct net_buf *buf, bool is_fatal_err)
224 {
225 	uint8_t pkt_indicator;
226 	uint8_t retries = 0;
227 	int ret;
228 
229 	LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
230 
231 	LOG_HEXDUMP_DBG(buf->data, buf->len, "Controller buffer:");
232 
233 	switch (bt_buf_get_type(buf)) {
234 	case BT_BUF_ACL_IN:
235 		pkt_indicator = HCI_IPC_ACL;
236 		break;
237 	case BT_BUF_EVT:
238 		pkt_indicator = HCI_IPC_EVT;
239 		break;
240 	case BT_BUF_ISO_IN:
241 		pkt_indicator = HCI_IPC_ISO;
242 		break;
243 	default:
244 		LOG_ERR("Unknown type %u", bt_buf_get_type(buf));
245 		net_buf_unref(buf);
246 		return;
247 	}
248 	net_buf_push_u8(buf, pkt_indicator);
249 
250 	LOG_HEXDUMP_DBG(buf->data, buf->len, "Final HCI buffer:");
251 
252 	do {
253 		ret = ipc_service_send(&hci_ept, buf->data, buf->len);
254 		if (ret < 0) {
255 			retries++;
256 			if (retries > 10) {
257 				/* Default backend (rpmsg_virtio) has a timeout of 150ms. */
258 				LOG_WRN("IPC send has been blocked for 1.5 seconds.");
259 				retries = 0;
260 			}
261 
262 			/* The function can be called by the application main thread,
263 			 * bt_ctlr_assert_handle and k_sys_fatal_error_handler. In case of a call by
264 			 * Bluetooth Controller assert handler or system fatal error handler the
265 			 * call can be from ISR context, hence there is no thread to yield. Besides
266 			 * that both handlers implement a policy to provide error information and
267 			 * stop the system in an infinite loop. The goal is to prevent any other
268 			 * damage to the system if one of such exeptional situations occur, hence
269 			 * call to k_yield is against it.
270 			 */
271 			if (is_fatal_err) {
272 				LOG_ERR("IPC service send error: %d", ret);
273 			} else {
274 				/* In the POSIX ARCH, code takes zero simulated time to execute,
275 				 * so busy wait loops become infinite loops, unless we
276 				 * force the loop to take a bit of time.
277 				 *
278 				 * This delay allows the IPC consumer to execute, thus making
279 				 * it possible to send more data over IPC afterwards.
280 				 */
281 				Z_SPIN_DELAY(500);
282 				k_yield();
283 			}
284 		}
285 	} while (ret < 0);
286 
287 	LOG_INF("Sent message of %d bytes.", ret);
288 
289 	net_buf_unref(buf);
290 }
291 
292 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER)
bt_ctlr_assert_handle(char * file,uint32_t line)293 void bt_ctlr_assert_handle(char *file, uint32_t line)
294 {
295 	/* Disable interrupts, this is unrecoverable */
296 	(void)irq_lock();
297 
298 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
299 	/* Generate an error event only when IPC service endpoint is already bound. */
300 	if (ipc_ept_ready) {
301 		/* Prepare vendor specific HCI debug event */
302 		struct net_buf *buf;
303 
304 		buf = hci_vs_err_assert(file, line);
305 		if (buf != NULL) {
306 			/* Send the event over ipc */
307 			hci_ipc_send(buf, HCI_FATAL_ERR_MSG);
308 		} else {
309 			LOG_ERR("Can't create Fatal Error HCI event: %s at %d", __FILE__, __LINE__);
310 		}
311 	} else {
312 		LOG_ERR("IPC endpoint is not ready yet: %s at %d", __FILE__, __LINE__);
313 	}
314 
315 	LOG_ERR("Halting system");
316 
317 #else /* !CONFIG_BT_HCI_VS_FATAL_ERROR */
318 	LOG_ERR("Controller assert in: %s at %d", file, line);
319 
320 #endif /* !CONFIG_BT_HCI_VS_FATAL_ERROR */
321 
322 	/* Flush the logs before locking the CPU */
323 	LOG_PANIC();
324 
325 	while (true) {
326 	};
327 }
328 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER */
329 
330 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * esf)331 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
332 {
333 	/* Disable interrupts, this is unrecoverable */
334 	(void)irq_lock();
335 
336 	/* Generate an error event only when there is a stack frame and IPC service endpoint is
337 	 * already bound.
338 	 */
339 	if (esf != NULL && ipc_ept_ready) {
340 		/* Prepare vendor specific HCI debug event */
341 		struct net_buf *buf;
342 
343 		buf = hci_vs_err_stack_frame(reason, esf);
344 		if (buf != NULL) {
345 			hci_ipc_send(buf, HCI_FATAL_ERR_MSG);
346 		} else {
347 			LOG_ERR("Can't create Fatal Error HCI event.\n");
348 		}
349 	}
350 
351 	LOG_ERR("Halting system");
352 
353 	/* Flush the logs before locking the CPU */
354 	LOG_PANIC();
355 
356 	while (true) {
357 	};
358 
359 	CODE_UNREACHABLE;
360 }
361 #endif /* CONFIG_BT_HCI_VS_FATAL_ERROR */
362 
hci_ept_bound(void * priv)363 static void hci_ept_bound(void *priv)
364 {
365 	k_sem_give(&ipc_bound_sem);
366 #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER) || defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
367 	ipc_ept_ready = true;
368 #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER || CONFIG_BT_HCI_VS_FATAL_ERROR */
369 }
370 
hci_ept_recv(const void * data,size_t len,void * priv)371 static void hci_ept_recv(const void *data, size_t len, void *priv)
372 {
373 	LOG_INF("Received message of %u bytes.", len);
374 	hci_ipc_rx((uint8_t *) data, len);
375 }
376 
377 static struct ipc_ept_cfg hci_ept_cfg = {
378 	.name = "nrf_bt_hci",
379 	.cb = {
380 		.bound    = hci_ept_bound,
381 		.received = hci_ept_recv,
382 	},
383 };
384 
main(void)385 int main(void)
386 {
387 	int err;
388 	const struct device *hci_ipc_instance =
389 		DEVICE_DT_GET(DT_CHOSEN(zephyr_bt_hci_ipc));
390 
391 	/* incoming events and data from the controller */
392 	static K_FIFO_DEFINE(rx_queue);
393 
394 	LOG_DBG("Start");
395 
396 	/* Enable the raw interface, this will in turn open the HCI driver */
397 	bt_enable_raw(&rx_queue);
398 
399 	/* Spawn the TX thread and start feeding commands and data to the
400 	 * controller
401 	 */
402 	k_thread_create(&tx_thread_data, tx_thread_stack,
403 			K_THREAD_STACK_SIZEOF(tx_thread_stack), tx_thread,
404 			NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
405 	k_thread_name_set(&tx_thread_data, "HCI ipc TX");
406 
407 	/* Initialize IPC service instance and register endpoint. */
408 	err = ipc_service_open_instance(hci_ipc_instance);
409 	if (err < 0 && err != -EALREADY) {
410 		LOG_ERR("IPC service instance initialization failed: %d\n", err);
411 	}
412 
413 	err = ipc_service_register_endpoint(hci_ipc_instance, &hci_ept, &hci_ept_cfg);
414 	if (err) {
415 		LOG_ERR("Registering endpoint failed with %d", err);
416 	}
417 
418 	k_sem_take(&ipc_bound_sem, K_FOREVER);
419 
420 	while (1) {
421 		struct net_buf *buf;
422 
423 		buf = k_fifo_get(&rx_queue, K_FOREVER);
424 		hci_ipc_send(buf, HCI_REGULAR_MSG);
425 	}
426 	return 0;
427 }
428