1 /*
2  * Copyright (c) 2016 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <errno.h>
9 #include <stddef.h>
10 #include <string.h>
11 
12 #include <zephyr/kernel.h>
13 #include <soc.h>
14 #include <zephyr/init.h>
15 #include <zephyr/device.h>
16 #include <zephyr/drivers/clock_control.h>
17 #include <zephyr/sys/atomic.h>
18 
19 #include <zephyr/sys/util.h>
20 #include <zephyr/debug/stack.h>
21 #include <zephyr/sys/byteorder.h>
22 
23 #include <zephyr/bluetooth/hci_types.h>
24 #include <zephyr/drivers/bluetooth.h>
25 
26 #ifdef CONFIG_CLOCK_CONTROL_NRF
27 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
28 #endif
29 
30 #include "hal/debug.h"
31 
32 #include "util/util.h"
33 #include "util/memq.h"
34 #include "util/dbuf.h"
35 
36 #include "hal/ccm.h"
37 
38 #if defined(CONFIG_SOC_FAMILY_NORDIC_NRF)
39 #include "hal/radio.h"
40 #endif /* CONFIG_SOC_FAMILY_NORDIC_NRF */
41 
42 #include "ll_sw/pdu_df.h"
43 #include "lll/pdu_vendor.h"
44 #include "ll_sw/pdu.h"
45 
46 #include "ll_sw/lll.h"
47 #include "lll/lll_df_types.h"
48 #include "ll_sw/lll_sync_iso.h"
49 #include "ll_sw/lll_conn.h"
50 #include "ll_sw/lll_conn_iso.h"
51 #include "ll_sw/isoal.h"
52 
53 #include "ll_sw/ull_iso_types.h"
54 #include "ll_sw/ull_conn_iso_types.h"
55 
56 #include "ll_sw/ull_iso_internal.h"
57 #include "ll_sw/ull_sync_iso_internal.h"
58 #include "ll_sw/ull_conn_internal.h"
59 #include "ll_sw/ull_conn_iso_internal.h"
60 
61 #include "ll.h"
62 
63 #include "hci_internal.h"
64 
65 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
66 #include <zephyr/logging/log.h>
67 LOG_MODULE_REGISTER(bt_ctlr_hci_driver);
68 
69 #define DT_DRV_COMPAT zephyr_bt_hci_ll_sw_split
70 
71 struct hci_driver_data {
72 	bt_hci_recv_t recv;
73 };
74 
75 static struct k_sem sem_prio_recv;
76 static struct k_fifo recv_fifo;
77 
78 struct k_thread prio_recv_thread_data;
79 static K_KERNEL_STACK_DEFINE(prio_recv_thread_stack,
80 			     CONFIG_BT_CTLR_RX_PRIO_STACK_SIZE);
81 struct k_thread recv_thread_data;
82 static K_KERNEL_STACK_DEFINE(recv_thread_stack, CONFIG_BT_CTLR_RX_STACK_SIZE);
83 
84 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
85 static struct k_poll_signal hbuf_signal;
86 static sys_slist_t hbuf_pend;
87 static int32_t hbuf_count;
88 #endif
89 
90 #define BT_HCI_EVT_FLAG_RECV_PRIO BIT(0)
91 #define BT_HCI_EVT_FLAG_RECV      BIT(1)
92 
93 /** @brief Get HCI event flags.
94  *
95  * Helper for the HCI driver to get HCI event flags that describes rules that.
96  * must be followed.
97  *
98  * @param evt HCI event code.
99  *
100  * @return HCI event flags for the specified event.
101  */
bt_hci_evt_get_flags(uint8_t evt)102 static inline uint8_t bt_hci_evt_get_flags(uint8_t evt)
103 {
104 	switch (evt) {
105 	case BT_HCI_EVT_DISCONN_COMPLETE:
106 		return BT_HCI_EVT_FLAG_RECV | BT_HCI_EVT_FLAG_RECV_PRIO;
107 		/* fallthrough */
108 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
109 	case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
110 #if defined(CONFIG_BT_CONN)
111 	case BT_HCI_EVT_DATA_BUF_OVERFLOW:
112 		__fallthrough;
113 #endif /* defined(CONFIG_BT_CONN) */
114 #endif /* CONFIG_BT_CONN ||  CONFIG_BT_ISO */
115 	case BT_HCI_EVT_CMD_COMPLETE:
116 	case BT_HCI_EVT_CMD_STATUS:
117 		return BT_HCI_EVT_FLAG_RECV_PRIO;
118 	default:
119 		return BT_HCI_EVT_FLAG_RECV;
120 	}
121 }
122 
123 /* Copied here from `hci_raw.c`, which would be used in
124  * conjunction with this driver when serializing HCI over wire.
125  * This serves as a converter from the historical (removed from
126  * tree) 'recv blocking' API to the normal single-receiver
127  * `bt_recv` API.
128  */
bt_recv_prio(const struct device * dev,struct net_buf * buf)129 static int bt_recv_prio(const struct device *dev, struct net_buf *buf)
130 {
131 	struct hci_driver_data *data = dev->data;
132 
133 	if (bt_buf_get_type(buf) == BT_BUF_EVT) {
134 		struct bt_hci_evt_hdr *hdr = (void *)buf->data;
135 		uint8_t evt_flags = bt_hci_evt_get_flags(hdr->evt);
136 
137 		if ((evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO) &&
138 		    (evt_flags & BT_HCI_EVT_FLAG_RECV)) {
139 			/* Avoid queuing the event twice */
140 			return 0;
141 		}
142 	}
143 
144 	return data->recv(dev, buf);
145 }
146 
147 #if defined(CONFIG_BT_CTLR_ISO)
148 
149 #define SDU_HCI_HDR_SIZE (BT_HCI_ISO_HDR_SIZE + BT_HCI_ISO_SDU_TS_HDR_SIZE)
150 
sink_sdu_alloc_hci(const struct isoal_sink * sink_ctx,const struct isoal_pdu_rx * valid_pdu,struct isoal_sdu_buffer * sdu_buffer)151 isoal_status_t sink_sdu_alloc_hci(const struct isoal_sink    *sink_ctx,
152 				  const struct isoal_pdu_rx  *valid_pdu,
153 				  struct isoal_sdu_buffer    *sdu_buffer)
154 {
155 	ARG_UNUSED(sink_ctx);
156 	ARG_UNUSED(valid_pdu); /* TODO copy valid pdu into netbuf ? */
157 
158 	struct net_buf *buf  = bt_buf_get_rx(BT_BUF_ISO_IN, K_FOREVER);
159 
160 	if (buf) {
161 		/* Increase reserved space for headers */
162 		net_buf_reserve(buf, SDU_HCI_HDR_SIZE + net_buf_headroom(buf));
163 
164 		sdu_buffer->dbuf = buf;
165 		sdu_buffer->size = net_buf_tailroom(buf);
166 	} else {
167 		LL_ASSERT(0);
168 	}
169 
170 	return ISOAL_STATUS_OK;
171 }
172 
173 
sink_sdu_emit_hci(const struct isoal_sink * sink_ctx,const struct isoal_emitted_sdu_frag * sdu_frag,const struct isoal_emitted_sdu * sdu)174 isoal_status_t sink_sdu_emit_hci(const struct isoal_sink             *sink_ctx,
175 				 const struct isoal_emitted_sdu_frag *sdu_frag,
176 				 const struct isoal_emitted_sdu      *sdu)
177 {
178 	const struct device *dev = DEVICE_DT_GET(DT_DRV_INST(0));
179 	struct hci_driver_data *data = dev->data;
180 	struct bt_hci_iso_sdu_ts_hdr *sdu_hdr;
181 	uint16_t packet_status_flag;
182 	struct bt_hci_iso_hdr *hdr;
183 	uint16_t handle_packed;
184 	uint16_t slen_packed;
185 	struct net_buf *buf;
186 	uint16_t total_len;
187 	uint16_t handle;
188 	uint8_t  ts, pb;
189 	uint16_t len;
190 
191 	buf = (struct net_buf *) sdu_frag->sdu.contents.dbuf;
192 
193 
194 	if (buf) {
195 #if defined(CONFIG_BT_CTLR_CONN_ISO_HCI_DATAPATH_SKIP_INVALID_DATA)
196 		if (sdu_frag->sdu.status != ISOAL_SDU_STATUS_VALID) {
197 			/* unref buffer if invalid fragment */
198 			net_buf_unref(buf);
199 
200 			return ISOAL_STATUS_OK;
201 		}
202 #endif /* CONFIG_BT_CTLR_CONN_ISO_HCI_DATAPATH_SKIP_INVALID_DATA */
203 
204 		pb  = sdu_frag->sdu_state;
205 		len = sdu_frag->sdu_frag_size;
206 		total_len = sdu->total_sdu_size;
207 		packet_status_flag = sdu->collated_status;
208 
209 		/* BT Core V5.3 : Vol 4 HCI I/F : Part G HCI Func. Spec.:
210 		 * 5.4.5 HCI ISO Data packets
211 		 * If Packet_Status_Flag equals 0b10 then PB_Flag shall equal 0b10.
212 		 * When Packet_Status_Flag is set to 0b10 in packets from the Controller to the
213 		 * Host, there is no data and ISO_SDU_Length shall be set to zero.
214 		 */
215 		if (packet_status_flag == ISOAL_SDU_STATUS_LOST_DATA) {
216 			if (len > 0 && buf->len >= len) {
217 				/* Discard data */
218 				net_buf_pull_mem(buf, len);
219 			}
220 			len = 0;
221 			total_len = 0;
222 		}
223 
224 		/*
225 		 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
226 		 * 5.4.5 HCI ISO Data packets
227 		 *
228 		 * PB_Flag:
229 		 *  Value   Parameter Description
230 		 *  0b00    The ISO_Data_Load field contains a header and the first fragment
231 		 *          of a fragmented SDU.
232 		 *  0b01    The ISO_Data_Load field contains a continuation fragment of an SDU.
233 		 *  0b10    The ISO_Data_Load field contains a header and a complete SDU.
234 		 *  0b11    The ISO_Data_Load field contains the last fragment of an SDU.
235 		 *
236 		 * The TS_Flag bit shall be set if the ISO_Data_Load field contains a
237 		 * Time_Stamp field. This bit shall only be set if the PB_Flag field equals 0b00 or
238 		 * 0b10.
239 		 */
240 		ts = (pb & 0x1) == 0x0;
241 
242 		if (ts) {
243 			sdu_hdr = net_buf_push(buf, BT_HCI_ISO_SDU_TS_HDR_SIZE);
244 			slen_packed = bt_iso_pkt_len_pack(total_len, packet_status_flag);
245 
246 			sdu_hdr->ts = sys_cpu_to_le32((uint32_t) sdu_frag->sdu.timestamp);
247 			sdu_hdr->sdu.sn   = sys_cpu_to_le16((uint16_t) sdu_frag->sdu.sn);
248 			sdu_hdr->sdu.slen = sys_cpu_to_le16(slen_packed);
249 
250 			len += BT_HCI_ISO_SDU_TS_HDR_SIZE;
251 		}
252 
253 		hdr = net_buf_push(buf, BT_HCI_ISO_HDR_SIZE);
254 
255 		handle = sink_ctx->session.handle;
256 		handle_packed = bt_iso_handle_pack(handle, pb, ts);
257 
258 		hdr->handle = sys_cpu_to_le16(handle_packed);
259 		hdr->len = sys_cpu_to_le16(len);
260 
261 		/* send fragment up the chain */
262 		data->recv(dev, buf);
263 	}
264 
265 	return ISOAL_STATUS_OK;
266 }
267 
sink_sdu_write_hci(void * dbuf,const size_t sdu_written,const uint8_t * pdu_payload,const size_t consume_len)268 isoal_status_t sink_sdu_write_hci(void *dbuf,
269 				  const size_t sdu_written,
270 				  const uint8_t *pdu_payload,
271 				  const size_t consume_len)
272 {
273 	ARG_UNUSED(sdu_written);
274 
275 	struct net_buf *buf = (struct net_buf *) dbuf;
276 
277 	LL_ASSERT(buf);
278 	net_buf_add_mem(buf, pdu_payload, consume_len);
279 
280 	return ISOAL_STATUS_OK;
281 }
282 #endif
283 
hci_recv_fifo_reset(void)284 void hci_recv_fifo_reset(void)
285 {
286 	/* NOTE: As there is no equivalent API to wake up a waiting thread and
287 	 * reinitialize the queue so it is empty, we use the cancel wait and
288 	 * initialize the queue. As the Tx thread and Rx thread are co-operative
289 	 * we should be relatively safe doing the below.
290 	 * Added k_sched_lock and k_sched_unlock, as native_posix seems to
291 	 * swap to waiting thread on call to k_fifo_cancel_wait!.
292 	 */
293 	k_sched_lock();
294 	k_fifo_cancel_wait(&recv_fifo);
295 	k_fifo_init(&recv_fifo);
296 	k_sched_unlock();
297 }
298 
process_prio_evt(struct node_rx_pdu * node_rx,uint8_t * evt_flags)299 static struct net_buf *process_prio_evt(struct node_rx_pdu *node_rx,
300 					uint8_t *evt_flags)
301 {
302 #if defined(CONFIG_BT_CONN)
303 	if (node_rx->hdr.user_meta == HCI_CLASS_EVT_CONNECTION) {
304 		uint16_t handle;
305 		struct pdu_data *pdu_data = (void *)node_rx->pdu;
306 
307 		handle = node_rx->hdr.handle;
308 		if (node_rx->hdr.type == NODE_RX_TYPE_TERMINATE) {
309 			struct net_buf *buf;
310 
311 			buf = bt_buf_get_evt(BT_HCI_EVT_DISCONN_COMPLETE, false,
312 					     K_FOREVER);
313 			hci_disconn_complete_encode(pdu_data, handle, buf);
314 			hci_disconn_complete_process(handle);
315 			*evt_flags = BT_HCI_EVT_FLAG_RECV_PRIO | BT_HCI_EVT_FLAG_RECV;
316 			return buf;
317 		}
318 	}
319 #endif /* CONFIG_BT_CONN */
320 
321 	*evt_flags = BT_HCI_EVT_FLAG_RECV;
322 	return NULL;
323 }
324 
325 /**
326  * @brief Handover from Controller thread to Host thread
327  * @details Execution context: Controller thread
328  *   Pull from memq_ll_rx and push up to Host thread recv_thread() via recv_fifo
329  * @param p1  Unused. Required to conform with Zephyr thread prototype
330  * @param p2  Unused. Required to conform with Zephyr thread prototype
331  * @param p3  Unused. Required to conform with Zephyr thread prototype
332  */
prio_recv_thread(void * p1,void * p2,void * p3)333 static void prio_recv_thread(void *p1, void *p2, void *p3)
334 {
335 	const struct device *dev = p1;
336 
337 	while (1) {
338 		struct node_rx_pdu *node_rx;
339 		struct net_buf *buf;
340 		bool iso_received;
341 		uint8_t num_cmplt;
342 		uint16_t handle;
343 
344 		iso_received = false;
345 
346 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
347 		node_rx = ll_iso_rx_get();
348 		if (node_rx) {
349 			ll_iso_rx_dequeue();
350 
351 			/* Find out and store the class for this node */
352 			node_rx->hdr.user_meta = hci_get_class(node_rx);
353 
354 			/* Send the rx node up to Host thread,
355 			 * recv_thread()
356 			 */
357 			LOG_DBG("ISO RX node enqueue");
358 			k_fifo_put(&recv_fifo, node_rx);
359 
360 			iso_received = true;
361 		}
362 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
363 
364 		/* While there are completed rx nodes */
365 		while ((num_cmplt = ll_rx_get((void *)&node_rx, &handle))) {
366 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
367 	defined(CONFIG_BT_CTLR_CONN_ISO)
368 
369 			buf = bt_buf_get_evt(BT_HCI_EVT_NUM_COMPLETED_PACKETS,
370 					     false, K_FOREVER);
371 			hci_num_cmplt_encode(buf, handle, num_cmplt);
372 			LOG_DBG("Num Complete: 0x%04x:%u", handle, num_cmplt);
373 			bt_recv_prio(dev, buf);
374 			k_yield();
375 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
376 		}
377 
378 		if (node_rx) {
379 			uint8_t evt_flags;
380 
381 			/* Until now we've only peeked, now we really do
382 			 * the handover
383 			 */
384 			ll_rx_dequeue();
385 
386 			/* Find out and store the class for this node */
387 			node_rx->hdr.user_meta = hci_get_class(node_rx);
388 
389 			buf = process_prio_evt(node_rx, &evt_flags);
390 			if (buf) {
391 				LOG_DBG("Priority event");
392 				if (!(evt_flags & BT_HCI_EVT_FLAG_RECV)) {
393 					node_rx->hdr.next = NULL;
394 					ll_rx_mem_release((void **)&node_rx);
395 				}
396 
397 				bt_recv_prio(dev, buf);
398 				/* bt_recv_prio would not release normal evt
399 				 * buf.
400 				 */
401 				if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
402 					net_buf_unref(buf);
403 				}
404 			}
405 
406 			if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
407 				/* Send the rx node up to Host thread,
408 				 * recv_thread()
409 				 */
410 				LOG_DBG("RX node enqueue");
411 				k_fifo_put(&recv_fifo, node_rx);
412 			}
413 		}
414 
415 		if (iso_received || node_rx) {
416 			/* There may still be completed nodes, continue
417 			 * pushing all those up to Host before waiting
418 			 * for ULL mayfly
419 			 */
420 			continue;
421 		}
422 
423 		LOG_DBG("sem take...");
424 		/* Wait until ULL mayfly has something to give us.
425 		 * Blocking-take of the semaphore; we take it once ULL mayfly
426 		 * has let it go in ll_rx_sched().
427 		 */
428 		k_sem_take(&sem_prio_recv, K_FOREVER);
429 		/* Now, ULL mayfly has something to give to us */
430 		LOG_DBG("sem taken");
431 	}
432 }
433 
encode_node(struct node_rx_pdu * node_rx,int8_t class)434 static inline struct net_buf *encode_node(struct node_rx_pdu *node_rx,
435 					  int8_t class)
436 {
437 	struct net_buf *buf = NULL;
438 
439 	/* Check if we need to generate an HCI event or ACL data */
440 	switch (class) {
441 	case HCI_CLASS_EVT_DISCARDABLE:
442 	case HCI_CLASS_EVT_REQUIRED:
443 	case HCI_CLASS_EVT_CONNECTION:
444 	case HCI_CLASS_EVT_LLCP:
445 		if (class == HCI_CLASS_EVT_DISCARDABLE) {
446 			buf = bt_buf_get_evt(BT_HCI_EVT_UNKNOWN, true,
447 					     K_NO_WAIT);
448 		} else {
449 			buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
450 		}
451 		if (buf) {
452 			hci_evt_encode(node_rx, buf);
453 		}
454 		break;
455 #if defined(CONFIG_BT_CONN)
456 	case HCI_CLASS_ACL_DATA:
457 		/* generate ACL data */
458 		buf = bt_buf_get_rx(BT_BUF_ACL_IN, K_FOREVER);
459 		hci_acl_encode(node_rx, buf);
460 		break;
461 #endif
462 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
463 	case HCI_CLASS_ISO_DATA: {
464 		if (false) {
465 
466 #if defined(CONFIG_BT_CTLR_CONN_ISO)
467 		} else if (IS_CIS_HANDLE(node_rx->hdr.handle)) {
468 			struct ll_conn_iso_stream *cis;
469 
470 			cis = ll_conn_iso_stream_get(node_rx->hdr.handle);
471 			if (cis && !cis->teardown) {
472 				struct ll_iso_stream_hdr *hdr;
473 				struct ll_iso_datapath *dp;
474 
475 				hdr = &cis->hdr;
476 				dp = hdr->datapath_out;
477 				if (dp && dp->path_id == BT_HCI_DATAPATH_ID_HCI) {
478 					/* If HCI datapath pass to ISO AL here */
479 					struct isoal_pdu_rx pckt_meta = {
480 						.meta = &node_rx->rx_iso_meta,
481 						.pdu  = (void *)&node_rx->pdu[0],
482 					};
483 
484 					/* Pass the ISO PDU through ISO-AL */
485 					isoal_status_t err =
486 						isoal_rx_pdu_recombine(dp->sink_hdl, &pckt_meta);
487 
488 					/* TODO handle err */
489 					LL_ASSERT(err == ISOAL_STATUS_OK);
490 				}
491 			}
492 #endif /* CONFIG_BT_CTLR_CONN_ISO */
493 
494 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
495 		} else if (IS_SYNC_ISO_HANDLE(node_rx->hdr.handle)) {
496 			const struct lll_sync_iso_stream *stream;
497 			struct isoal_pdu_rx isoal_rx;
498 			uint16_t stream_handle;
499 			isoal_status_t err;
500 
501 			stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(node_rx->hdr.handle);
502 			stream = ull_sync_iso_stream_get(stream_handle);
503 
504 			/* Check validity of the data path sink. FIXME: A channel disconnect race
505 			 * may cause ISO data pending without valid data path.
506 			 */
507 			if (stream && stream->dp &&
508 			    (stream->dp->path_id == BT_HCI_DATAPATH_ID_HCI)) {
509 				isoal_rx.meta = &node_rx->rx_iso_meta;
510 				isoal_rx.pdu = (void *)node_rx->pdu;
511 				err = isoal_rx_pdu_recombine(stream->dp->sink_hdl, &isoal_rx);
512 
513 				LL_ASSERT(err == ISOAL_STATUS_OK ||
514 					  err == ISOAL_STATUS_ERR_SDU_ALLOC);
515 			}
516 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
517 
518 		} else {
519 			LL_ASSERT(0);
520 		}
521 
522 		node_rx->hdr.next = NULL;
523 		ll_iso_rx_mem_release((void **)&node_rx);
524 
525 		return buf;
526 	}
527 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
528 
529 	default:
530 		LL_ASSERT(0);
531 		break;
532 	}
533 
534 	node_rx->hdr.next = NULL;
535 	ll_rx_mem_release((void **)&node_rx);
536 
537 	return buf;
538 }
539 
process_node(struct node_rx_pdu * node_rx)540 static inline struct net_buf *process_node(struct node_rx_pdu *node_rx)
541 {
542 	uint8_t class = node_rx->hdr.user_meta;
543 	struct net_buf *buf = NULL;
544 
545 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
546 	if (hbuf_count != -1) {
547 		bool pend = !sys_slist_is_empty(&hbuf_pend);
548 
549 		/* controller to host flow control enabled */
550 		switch (class) {
551 		case HCI_CLASS_ISO_DATA:
552 		case HCI_CLASS_EVT_DISCARDABLE:
553 		case HCI_CLASS_EVT_REQUIRED:
554 			break;
555 		case HCI_CLASS_EVT_CONNECTION:
556 		case HCI_CLASS_EVT_LLCP:
557 			/* for conn-related events, only pend is relevant */
558 			hbuf_count = 1;
559 			__fallthrough;
560 		case HCI_CLASS_ACL_DATA:
561 			if (pend || !hbuf_count) {
562 				sys_slist_append(&hbuf_pend, (void *)node_rx);
563 				LOG_DBG("FC: Queuing item: %d", class);
564 				return NULL;
565 			}
566 			break;
567 		default:
568 			LL_ASSERT(0);
569 			break;
570 		}
571 	}
572 #endif
573 
574 	/* process regular node from radio */
575 	buf = encode_node(node_rx, class);
576 
577 	return buf;
578 }
579 
580 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
process_hbuf(struct node_rx_pdu * n)581 static inline struct net_buf *process_hbuf(struct node_rx_pdu *n)
582 {
583 	/* shadow total count in case of preemption */
584 	struct node_rx_pdu *node_rx = NULL;
585 	int32_t hbuf_total = hci_hbuf_total;
586 	struct net_buf *buf = NULL;
587 	uint8_t class;
588 	int reset;
589 
590 	reset = atomic_test_and_clear_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
591 	if (reset) {
592 		/* flush queue, no need to free, the LL has already done it */
593 		sys_slist_init(&hbuf_pend);
594 	}
595 
596 	if (hbuf_total <= 0) {
597 		hbuf_count = -1;
598 		return NULL;
599 	}
600 
601 	/* available host buffers */
602 	hbuf_count = hbuf_total - (hci_hbuf_sent - hci_hbuf_acked);
603 
604 	/* host acked ACL packets, try to dequeue from hbuf */
605 	node_rx = (void *)sys_slist_peek_head(&hbuf_pend);
606 	if (!node_rx) {
607 		return NULL;
608 	}
609 
610 	/* Return early if this iteration already has a node to process */
611 	class = node_rx->hdr.user_meta;
612 	if (n) {
613 		if (class == HCI_CLASS_EVT_CONNECTION ||
614 		    class == HCI_CLASS_EVT_LLCP ||
615 		    (class == HCI_CLASS_ACL_DATA && hbuf_count)) {
616 			/* node to process later, schedule an iteration */
617 			LOG_DBG("FC: signalling");
618 			k_poll_signal_raise(&hbuf_signal, 0x0);
619 		}
620 		return NULL;
621 	}
622 
623 	switch (class) {
624 	case HCI_CLASS_EVT_CONNECTION:
625 	case HCI_CLASS_EVT_LLCP:
626 		LOG_DBG("FC: dequeueing event");
627 		(void) sys_slist_get(&hbuf_pend);
628 		break;
629 	case HCI_CLASS_ACL_DATA:
630 		if (hbuf_count) {
631 			LOG_DBG("FC: dequeueing ACL data");
632 			(void) sys_slist_get(&hbuf_pend);
633 		} else {
634 			/* no buffers, HCI will signal */
635 			node_rx = NULL;
636 		}
637 		break;
638 	case HCI_CLASS_EVT_DISCARDABLE:
639 	case HCI_CLASS_EVT_REQUIRED:
640 	default:
641 		LL_ASSERT(0);
642 		break;
643 	}
644 
645 	if (node_rx) {
646 		buf = encode_node(node_rx, class);
647 		/* Update host buffers after encoding */
648 		hbuf_count = hbuf_total - (hci_hbuf_sent - hci_hbuf_acked);
649 		/* next node */
650 		node_rx = (void *)sys_slist_peek_head(&hbuf_pend);
651 		if (node_rx) {
652 			class = node_rx->hdr.user_meta;
653 
654 			if (class == HCI_CLASS_EVT_CONNECTION ||
655 			    class == HCI_CLASS_EVT_LLCP ||
656 			    (class == HCI_CLASS_ACL_DATA && hbuf_count)) {
657 				/* more to process, schedule an
658 				 * iteration
659 				 */
660 				LOG_DBG("FC: signalling");
661 				k_poll_signal_raise(&hbuf_signal, 0x0);
662 			}
663 		}
664 	}
665 
666 	return buf;
667 }
668 #endif
669 
670 /**
671  * @brief Blockingly pull from Controller thread's recv_fifo
672  * @details Execution context: Host thread
673  */
recv_thread(void * p1,void * p2,void * p3)674 static void recv_thread(void *p1, void *p2, void *p3)
675 {
676 	const struct device *dev = p1;
677 	struct hci_driver_data *data = dev->data;
678 
679 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
680 	/* @todo: check if the events structure really needs to be static */
681 	static struct k_poll_event events[2] = {
682 		K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_SIGNAL,
683 						K_POLL_MODE_NOTIFY_ONLY,
684 						&hbuf_signal, 0),
685 		K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
686 						K_POLL_MODE_NOTIFY_ONLY,
687 						&recv_fifo, 0),
688 	};
689 #endif
690 
691 	while (1) {
692 		struct node_rx_pdu *node_rx = NULL;
693 		struct net_buf *buf = NULL;
694 
695 		LOG_DBG("blocking");
696 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
697 		int err;
698 
699 		err = k_poll(events, 2, K_FOREVER);
700 		LL_ASSERT(err == 0 || err == -EINTR);
701 		if (events[0].state == K_POLL_STATE_SIGNALED) {
702 			events[0].signal->signaled = 0U;
703 		} else if (events[1].state ==
704 			   K_POLL_STATE_FIFO_DATA_AVAILABLE) {
705 			node_rx = k_fifo_get(events[1].fifo, K_NO_WAIT);
706 		}
707 
708 		events[0].state = K_POLL_STATE_NOT_READY;
709 		events[1].state = K_POLL_STATE_NOT_READY;
710 
711 		/* process host buffers first if any */
712 		buf = process_hbuf(node_rx);
713 
714 #else
715 		node_rx = k_fifo_get(&recv_fifo, K_FOREVER);
716 #endif
717 		LOG_DBG("unblocked");
718 
719 		if (node_rx && !buf) {
720 			/* process regular node from radio */
721 			buf = process_node(node_rx);
722 		}
723 
724 		while (buf) {
725 			struct net_buf *frag;
726 
727 			/* Increment ref count, which will be
728 			 * unref on call to net_buf_frag_del
729 			 */
730 			frag = net_buf_ref(buf);
731 			buf = net_buf_frag_del(NULL, buf);
732 
733 			if (frag->len) {
734 				LOG_DBG("Packet in: type:%u len:%u", bt_buf_get_type(frag),
735 					frag->len);
736 
737 				data->recv(dev, frag);
738 			} else {
739 				net_buf_unref(frag);
740 			}
741 
742 			k_yield();
743 		}
744 	}
745 }
746 
cmd_handle(const struct device * dev,struct net_buf * buf)747 static int cmd_handle(const struct device *dev, struct net_buf *buf)
748 {
749 	struct node_rx_pdu *node_rx = NULL;
750 	struct net_buf *evt;
751 
752 	evt = hci_cmd_handle(buf, (void **) &node_rx);
753 	if (evt) {
754 		LOG_DBG("Replying with event of %u bytes", evt->len);
755 		bt_recv_prio(dev, evt);
756 
757 		if (node_rx) {
758 			LOG_DBG("RX node enqueue");
759 			node_rx->hdr.user_meta = hci_get_class(node_rx);
760 			k_fifo_put(&recv_fifo, node_rx);
761 		}
762 	}
763 
764 	return 0;
765 }
766 
767 #if defined(CONFIG_BT_CONN)
acl_handle(const struct device * dev,struct net_buf * buf)768 static int acl_handle(const struct device *dev, struct net_buf *buf)
769 {
770 	struct net_buf *evt;
771 	int err;
772 
773 	err = hci_acl_handle(buf, &evt);
774 	if (evt) {
775 		LOG_DBG("Replying with event of %u bytes", evt->len);
776 		bt_recv_prio(dev, evt);
777 	}
778 
779 	return err;
780 }
781 #endif /* CONFIG_BT_CONN */
782 
783 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
iso_handle(const struct device * dev,struct net_buf * buf)784 static int iso_handle(const struct device *dev, struct net_buf *buf)
785 {
786 	struct net_buf *evt;
787 	int err;
788 
789 	err = hci_iso_handle(buf, &evt);
790 	if (evt) {
791 		LOG_DBG("Replying with event of %u bytes", evt->len);
792 		bt_recv_prio(dev, evt);
793 	}
794 
795 	return err;
796 }
797 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
798 
hci_driver_send(const struct device * dev,struct net_buf * buf)799 static int hci_driver_send(const struct device *dev, struct net_buf *buf)
800 {
801 	uint8_t type;
802 	int err;
803 
804 	LOG_DBG("enter");
805 
806 	if (!buf->len) {
807 		LOG_ERR("Empty HCI packet");
808 		return -EINVAL;
809 	}
810 
811 	type = bt_buf_get_type(buf);
812 	switch (type) {
813 #if defined(CONFIG_BT_CONN)
814 	case BT_BUF_ACL_OUT:
815 		err = acl_handle(dev, buf);
816 		break;
817 #endif /* CONFIG_BT_CONN */
818 	case BT_BUF_CMD:
819 		err = cmd_handle(dev, buf);
820 		break;
821 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
822 	case BT_BUF_ISO_OUT:
823 		err = iso_handle(dev, buf);
824 		break;
825 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
826 	default:
827 		LOG_ERR("Unknown HCI type %u", type);
828 		return -EINVAL;
829 	}
830 
831 	if (!err) {
832 		net_buf_unref(buf);
833 	}
834 
835 	LOG_DBG("exit: %d", err);
836 
837 	return err;
838 }
839 
hci_driver_open(const struct device * dev,bt_hci_recv_t recv)840 static int hci_driver_open(const struct device *dev, bt_hci_recv_t recv)
841 {
842 	struct hci_driver_data *data = dev->data;
843 	uint32_t err;
844 
845 	DEBUG_INIT();
846 
847 	k_fifo_init(&recv_fifo);
848 	k_sem_init(&sem_prio_recv, 0, K_SEM_MAX_LIMIT);
849 
850 	err = ll_init(&sem_prio_recv);
851 	if (err) {
852 		LOG_ERR("LL initialization failed: %d", err);
853 		return err;
854 	}
855 
856 	data->recv = recv;
857 
858 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
859 	k_poll_signal_init(&hbuf_signal);
860 	hci_init(&hbuf_signal);
861 #else
862 	hci_init(NULL);
863 #endif
864 
865 	k_thread_create(&prio_recv_thread_data, prio_recv_thread_stack,
866 			K_KERNEL_STACK_SIZEOF(prio_recv_thread_stack),
867 			prio_recv_thread, (void *)dev, NULL, NULL,
868 			K_PRIO_COOP(CONFIG_BT_DRIVER_RX_HIGH_PRIO), 0, K_NO_WAIT);
869 	k_thread_name_set(&prio_recv_thread_data, "BT RX pri");
870 
871 	k_thread_create(&recv_thread_data, recv_thread_stack,
872 			K_KERNEL_STACK_SIZEOF(recv_thread_stack),
873 			recv_thread, (void *)dev, NULL, NULL,
874 			K_PRIO_COOP(CONFIG_BT_RX_PRIO), 0, K_NO_WAIT);
875 	k_thread_name_set(&recv_thread_data, "BT RX");
876 
877 	LOG_DBG("Success.");
878 
879 	return 0;
880 }
881 
hci_driver_close(const struct device * dev)882 static int hci_driver_close(const struct device *dev)
883 {
884 	int err;
885 	struct hci_driver_data *data = dev->data;
886 
887 	/* Resetting the LL stops all roles */
888 	err = ll_deinit();
889 	LL_ASSERT(!err);
890 
891 	/* Abort prio RX thread */
892 	k_thread_abort(&prio_recv_thread_data);
893 
894 	/* Abort RX thread */
895 	k_thread_abort(&recv_thread_data);
896 
897 	/* Clear the (host) receive callback */
898 	data->recv = NULL;
899 
900 	return 0;
901 }
902 
903 static DEVICE_API(bt_hci, hci_driver_api) = {
904 	.open = hci_driver_open,
905 	.close	= hci_driver_close,
906 	.send = hci_driver_send,
907 };
908 
909 #define BT_HCI_CONTROLLER_INIT(inst) \
910 	static struct hci_driver_data data_##inst; \
911 	DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &data_##inst, NULL, POST_KERNEL, \
912 			      CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &hci_driver_api)
913 
914 /* Only a single instance is supported */
915 BT_HCI_CONTROLLER_INIT(0)
916