1 /*
2 * Copyright (c) 2016 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <errno.h>
9 #include <stddef.h>
10 #include <string.h>
11
12 #include <zephyr/kernel.h>
13 #include <soc.h>
14 #include <zephyr/init.h>
15 #include <zephyr/device.h>
16 #include <zephyr/drivers/clock_control.h>
17 #include <zephyr/sys/atomic.h>
18
19 #include <zephyr/sys/util.h>
20 #include <zephyr/debug/stack.h>
21 #include <zephyr/sys/byteorder.h>
22
23 #include <zephyr/bluetooth/hci_types.h>
24 #include <zephyr/drivers/bluetooth/hci_driver.h>
25
26 #ifdef CONFIG_CLOCK_CONTROL_NRF
27 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
28 #endif
29
30 #include "hal/debug.h"
31
32 #include "util/util.h"
33 #include "util/memq.h"
34 #include "util/dbuf.h"
35
36 #include "hal/ccm.h"
37
38 #if defined(CONFIG_SOC_FAMILY_NRF)
39 #include "hal/radio.h"
40 #endif /* CONFIG_SOC_FAMILY_NRF */
41
42 #include "ll_sw/pdu_df.h"
43 #include "lll/pdu_vendor.h"
44 #include "ll_sw/pdu.h"
45
46 #include "ll_sw/lll.h"
47 #include "lll/lll_df_types.h"
48 #include "ll_sw/lll_sync_iso.h"
49 #include "ll_sw/lll_conn.h"
50 #include "ll_sw/lll_conn_iso.h"
51 #include "ll_sw/isoal.h"
52
53 #include "ll_sw/ull_iso_types.h"
54 #include "ll_sw/ull_conn_iso_types.h"
55
56 #include "ll_sw/ull_iso_internal.h"
57 #include "ll_sw/ull_sync_iso_internal.h"
58 #include "ll_sw/ull_conn_internal.h"
59 #include "ll_sw/ull_conn_iso_internal.h"
60
61 #include "ll.h"
62
63 #include "hci_internal.h"
64
65 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
66 #include <zephyr/logging/log.h>
67 LOG_MODULE_REGISTER(bt_ctlr_hci_driver);
68
69 static struct k_sem sem_prio_recv;
70 static struct k_fifo recv_fifo;
71
72 struct k_thread prio_recv_thread_data;
73 static K_KERNEL_STACK_DEFINE(prio_recv_thread_stack,
74 CONFIG_BT_CTLR_RX_PRIO_STACK_SIZE);
75 struct k_thread recv_thread_data;
76 static K_KERNEL_STACK_DEFINE(recv_thread_stack, CONFIG_BT_RX_STACK_SIZE);
77
78 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
79 static struct k_poll_signal hbuf_signal;
80 static sys_slist_t hbuf_pend;
81 static int32_t hbuf_count;
82 #endif
83
84 #if defined(CONFIG_BT_CTLR_ISO)
85
86 #define SDU_HCI_HDR_SIZE (BT_HCI_ISO_HDR_SIZE + BT_HCI_ISO_TS_DATA_HDR_SIZE)
87
sink_sdu_alloc_hci(const struct isoal_sink * sink_ctx,const struct isoal_pdu_rx * valid_pdu,struct isoal_sdu_buffer * sdu_buffer)88 isoal_status_t sink_sdu_alloc_hci(const struct isoal_sink *sink_ctx,
89 const struct isoal_pdu_rx *valid_pdu,
90 struct isoal_sdu_buffer *sdu_buffer)
91 {
92 ARG_UNUSED(sink_ctx);
93 ARG_UNUSED(valid_pdu); /* TODO copy valid pdu into netbuf ? */
94
95 struct net_buf *buf = bt_buf_get_rx(BT_BUF_ISO_IN, K_FOREVER);
96
97 if (buf) {
98 /* Reserve space for headers */
99 net_buf_reserve(buf, SDU_HCI_HDR_SIZE);
100
101 sdu_buffer->dbuf = buf;
102 sdu_buffer->size = net_buf_tailroom(buf);
103 } else {
104 LL_ASSERT(0);
105 }
106
107 return ISOAL_STATUS_OK;
108 }
109
110
sink_sdu_emit_hci(const struct isoal_sink * sink_ctx,const struct isoal_emitted_sdu_frag * sdu_frag,const struct isoal_emitted_sdu * sdu)111 isoal_status_t sink_sdu_emit_hci(const struct isoal_sink *sink_ctx,
112 const struct isoal_emitted_sdu_frag *sdu_frag,
113 const struct isoal_emitted_sdu *sdu)
114 {
115 struct bt_hci_iso_ts_data_hdr *data_hdr;
116 uint16_t packet_status_flag;
117 struct bt_hci_iso_hdr *hdr;
118 uint16_t handle_packed;
119 uint16_t slen_packed;
120 struct net_buf *buf;
121 uint16_t total_len;
122 uint16_t handle;
123 uint8_t ts, pb;
124 uint16_t len;
125
126 buf = (struct net_buf *) sdu_frag->sdu.contents.dbuf;
127
128
129 if (buf) {
130 #if defined(CONFIG_BT_CTLR_CONN_ISO_HCI_DATAPATH_SKIP_INVALID_DATA)
131 if (sdu_frag->sdu.status != ISOAL_SDU_STATUS_VALID) {
132 /* unref buffer if invalid fragment */
133 net_buf_unref(buf);
134
135 return ISOAL_STATUS_OK;
136 }
137 #endif /* CONFIG_BT_CTLR_CONN_ISO_HCI_DATAPATH_SKIP_INVALID_DATA */
138
139 pb = sdu_frag->sdu_state;
140 len = sdu_frag->sdu_frag_size;
141 total_len = sdu->total_sdu_size;
142 packet_status_flag = sdu->collated_status;
143
144 /* BT Core V5.3 : Vol 4 HCI I/F : Part G HCI Func. Spec.:
145 * 5.4.5 HCI ISO Data packets
146 * If Packet_Status_Flag equals 0b10 then PB_Flag shall equal 0b10.
147 * When Packet_Status_Flag is set to 0b10 in packets from the Controller to the
148 * Host, there is no data and ISO_SDU_Length shall be set to zero.
149 */
150 if (packet_status_flag == ISOAL_SDU_STATUS_LOST_DATA) {
151 if (len > 0 && buf->len >= len) {
152 /* Discard data */
153 net_buf_pull_mem(buf, len);
154 }
155 len = 0;
156 total_len = 0;
157 }
158
159 /*
160 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
161 * 5.4.5 HCI ISO Data packets
162 *
163 * PB_Flag:
164 * Value Parameter Description
165 * 0b00 The ISO_Data_Load field contains a header and the first fragment
166 * of a fragmented SDU.
167 * 0b01 The ISO_Data_Load field contains a continuation fragment of an SDU.
168 * 0b10 The ISO_Data_Load field contains a header and a complete SDU.
169 * 0b11 The ISO_Data_Load field contains the last fragment of an SDU.
170 *
171 * The TS_Flag bit shall be set if the ISO_Data_Load field contains a
172 * Time_Stamp field. This bit shall only be set if the PB_Flag field equals 0b00 or
173 * 0b10.
174 */
175 ts = (pb & 0x1) == 0x0;
176
177 if (ts) {
178 data_hdr = net_buf_push(buf, BT_HCI_ISO_TS_DATA_HDR_SIZE);
179 slen_packed = bt_iso_pkt_len_pack(total_len, packet_status_flag);
180
181 data_hdr->ts = sys_cpu_to_le32((uint32_t) sdu_frag->sdu.timestamp);
182 data_hdr->data.sn = sys_cpu_to_le16((uint16_t) sdu_frag->sdu.sn);
183 data_hdr->data.slen = sys_cpu_to_le16(slen_packed);
184
185 len += BT_HCI_ISO_TS_DATA_HDR_SIZE;
186 }
187
188 hdr = net_buf_push(buf, BT_HCI_ISO_HDR_SIZE);
189
190 handle = sink_ctx->session.handle;
191 handle_packed = bt_iso_handle_pack(handle, pb, ts);
192
193 hdr->handle = sys_cpu_to_le16(handle_packed);
194 hdr->len = sys_cpu_to_le16(len);
195
196 /* send fragment up the chain */
197 bt_recv(buf);
198 }
199
200 return ISOAL_STATUS_OK;
201 }
202
sink_sdu_write_hci(void * dbuf,const uint8_t * pdu_payload,const size_t consume_len)203 isoal_status_t sink_sdu_write_hci(void *dbuf,
204 const uint8_t *pdu_payload,
205 const size_t consume_len)
206 {
207 struct net_buf *buf = (struct net_buf *) dbuf;
208
209 LL_ASSERT(buf);
210 net_buf_add_mem(buf, pdu_payload, consume_len);
211
212 return ISOAL_STATUS_OK;
213 }
214 #endif
215
hci_recv_fifo_reset(void)216 void hci_recv_fifo_reset(void)
217 {
218 /* NOTE: As there is no equivalent API to wake up a waiting thread and
219 * reinitialize the queue so it is empty, we use the cancel wait and
220 * initialize the queue. As the Tx thread and Rx thread are co-operative
221 * we should be relatively safe doing the below.
222 * Added k_sched_lock and k_sched_unlock, as native_posix seems to
223 * swap to waiting thread on call to k_fifo_cancel_wait!.
224 */
225 k_sched_lock();
226 k_fifo_cancel_wait(&recv_fifo);
227 k_fifo_init(&recv_fifo);
228 k_sched_unlock();
229 }
230
process_prio_evt(struct node_rx_pdu * node_rx,uint8_t * evt_flags)231 static struct net_buf *process_prio_evt(struct node_rx_pdu *node_rx,
232 uint8_t *evt_flags)
233 {
234 #if defined(CONFIG_BT_CONN)
235 if (node_rx->hdr.user_meta == HCI_CLASS_EVT_CONNECTION) {
236 uint16_t handle;
237 struct pdu_data *pdu_data = (void *)node_rx->pdu;
238
239 handle = node_rx->hdr.handle;
240 if (node_rx->hdr.type == NODE_RX_TYPE_TERMINATE) {
241 struct net_buf *buf;
242
243 buf = bt_buf_get_evt(BT_HCI_EVT_DISCONN_COMPLETE, false,
244 K_FOREVER);
245 hci_disconn_complete_encode(pdu_data, handle, buf);
246 hci_disconn_complete_process(handle);
247 *evt_flags = BT_HCI_EVT_FLAG_RECV_PRIO | BT_HCI_EVT_FLAG_RECV;
248 return buf;
249 }
250 }
251 #endif /* CONFIG_BT_CONN */
252
253 *evt_flags = BT_HCI_EVT_FLAG_RECV;
254 return NULL;
255 }
256
257 /**
258 * @brief Handover from Controller thread to Host thread
259 * @details Execution context: Controller thread
260 * Pull from memq_ll_rx and push up to Host thread recv_thread() via recv_fifo
261 * @param p1 Unused. Required to conform with Zephyr thread prototype
262 * @param p2 Unused. Required to conform with Zephyr thread prototype
263 * @param p3 Unused. Required to conform with Zephyr thread prototype
264 */
prio_recv_thread(void * p1,void * p2,void * p3)265 static void prio_recv_thread(void *p1, void *p2, void *p3)
266 {
267 while (1) {
268 struct node_rx_pdu *node_rx;
269 struct net_buf *buf;
270 bool iso_received;
271 uint8_t num_cmplt;
272 uint16_t handle;
273
274 iso_received = false;
275
276 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
277 node_rx = ll_iso_rx_get();
278 if (node_rx) {
279 ll_iso_rx_dequeue();
280
281 /* Find out and store the class for this node */
282 node_rx->hdr.user_meta = hci_get_class(node_rx);
283
284 /* Send the rx node up to Host thread,
285 * recv_thread()
286 */
287 LOG_DBG("ISO RX node enqueue");
288 k_fifo_put(&recv_fifo, node_rx);
289
290 iso_received = true;
291 }
292 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
293
294 /* While there are completed rx nodes */
295 while ((num_cmplt = ll_rx_get((void *)&node_rx, &handle))) {
296 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
297 defined(CONFIG_BT_CTLR_CONN_ISO)
298
299 buf = bt_buf_get_evt(BT_HCI_EVT_NUM_COMPLETED_PACKETS,
300 false, K_FOREVER);
301 hci_num_cmplt_encode(buf, handle, num_cmplt);
302 LOG_DBG("Num Complete: 0x%04x:%u", handle, num_cmplt);
303 bt_recv_prio(buf);
304 k_yield();
305 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
306 }
307
308 if (node_rx) {
309 uint8_t evt_flags;
310
311 /* Until now we've only peeked, now we really do
312 * the handover
313 */
314 ll_rx_dequeue();
315
316 /* Find out and store the class for this node */
317 node_rx->hdr.user_meta = hci_get_class(node_rx);
318
319 buf = process_prio_evt(node_rx, &evt_flags);
320 if (buf) {
321 LOG_DBG("Priority event");
322 if (!(evt_flags & BT_HCI_EVT_FLAG_RECV)) {
323 node_rx->hdr.next = NULL;
324 ll_rx_mem_release((void **)&node_rx);
325 }
326
327 bt_recv_prio(buf);
328 /* bt_recv_prio would not release normal evt
329 * buf.
330 */
331 if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
332 net_buf_unref(buf);
333 }
334 }
335
336 if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
337 /* Send the rx node up to Host thread,
338 * recv_thread()
339 */
340 LOG_DBG("RX node enqueue");
341 k_fifo_put(&recv_fifo, node_rx);
342 }
343 }
344
345 if (iso_received || node_rx) {
346 /* There may still be completed nodes, continue
347 * pushing all those up to Host before waiting
348 * for ULL mayfly
349 */
350 continue;
351 }
352
353 LOG_DBG("sem take...");
354 /* Wait until ULL mayfly has something to give us.
355 * Blocking-take of the semaphore; we take it once ULL mayfly
356 * has let it go in ll_rx_sched().
357 */
358 k_sem_take(&sem_prio_recv, K_FOREVER);
359 /* Now, ULL mayfly has something to give to us */
360 LOG_DBG("sem taken");
361 }
362 }
363
encode_node(struct node_rx_pdu * node_rx,int8_t class)364 static inline struct net_buf *encode_node(struct node_rx_pdu *node_rx,
365 int8_t class)
366 {
367 struct net_buf *buf = NULL;
368
369 /* Check if we need to generate an HCI event or ACL data */
370 switch (class) {
371 case HCI_CLASS_EVT_DISCARDABLE:
372 case HCI_CLASS_EVT_REQUIRED:
373 case HCI_CLASS_EVT_CONNECTION:
374 case HCI_CLASS_EVT_LLCP:
375 if (class == HCI_CLASS_EVT_DISCARDABLE) {
376 buf = bt_buf_get_evt(BT_HCI_EVT_UNKNOWN, true,
377 K_NO_WAIT);
378 } else {
379 buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
380 }
381 if (buf) {
382 hci_evt_encode(node_rx, buf);
383 }
384 break;
385 #if defined(CONFIG_BT_CONN)
386 case HCI_CLASS_ACL_DATA:
387 /* generate ACL data */
388 buf = bt_buf_get_rx(BT_BUF_ACL_IN, K_FOREVER);
389 hci_acl_encode(node_rx, buf);
390 break;
391 #endif
392 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
393 case HCI_CLASS_ISO_DATA: {
394 if (false) {
395
396 #if defined(CONFIG_BT_CTLR_CONN_ISO)
397 } else if (IS_CIS_HANDLE(node_rx->hdr.handle)) {
398 struct ll_conn_iso_stream *cis;
399
400 cis = ll_conn_iso_stream_get(node_rx->hdr.handle);
401 if (cis && !cis->teardown) {
402 struct ll_iso_stream_hdr *hdr;
403 struct ll_iso_datapath *dp;
404
405 hdr = &cis->hdr;
406 dp = hdr->datapath_out;
407 if (dp && dp->path_id == BT_HCI_DATAPATH_ID_HCI) {
408 /* If HCI datapath pass to ISO AL here */
409 struct isoal_pdu_rx pckt_meta = {
410 .meta = &node_rx->hdr.rx_iso_meta,
411 .pdu = (void *)&node_rx->pdu[0],
412 };
413
414 /* Pass the ISO PDU through ISO-AL */
415 isoal_status_t err =
416 isoal_rx_pdu_recombine(dp->sink_hdl, &pckt_meta);
417
418 /* TODO handle err */
419 LL_ASSERT(err == ISOAL_STATUS_OK);
420 }
421 }
422 #endif /* CONFIG_BT_CTLR_CONN_ISO */
423
424 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
425 } else if (IS_SYNC_ISO_HANDLE(node_rx->hdr.handle)) {
426 const struct lll_sync_iso_stream *stream;
427 struct isoal_pdu_rx isoal_rx;
428 uint16_t stream_handle;
429 isoal_status_t err;
430
431 stream_handle = LL_BIS_SYNC_IDX_FROM_HANDLE(node_rx->hdr.handle);
432 stream = ull_sync_iso_stream_get(stream_handle);
433
434 /* Check validity of the data path sink. FIXME: A channel disconnect race
435 * may cause ISO data pending without valid data path.
436 */
437 if (stream && stream->dp) {
438 isoal_rx.meta = &node_rx->hdr.rx_iso_meta;
439 isoal_rx.pdu = (void *)node_rx->pdu;
440 err = isoal_rx_pdu_recombine(stream->dp->sink_hdl, &isoal_rx);
441
442 LL_ASSERT(err == ISOAL_STATUS_OK ||
443 err == ISOAL_STATUS_ERR_SDU_ALLOC);
444 }
445 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
446
447 } else {
448 LL_ASSERT(0);
449 }
450
451 node_rx->hdr.next = NULL;
452 ll_iso_rx_mem_release((void **)&node_rx);
453
454 return buf;
455 }
456 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
457
458 default:
459 LL_ASSERT(0);
460 break;
461 }
462
463 node_rx->hdr.next = NULL;
464 ll_rx_mem_release((void **)&node_rx);
465
466 return buf;
467 }
468
process_node(struct node_rx_pdu * node_rx)469 static inline struct net_buf *process_node(struct node_rx_pdu *node_rx)
470 {
471 uint8_t class = node_rx->hdr.user_meta;
472 struct net_buf *buf = NULL;
473
474 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
475 if (hbuf_count != -1) {
476 bool pend = !sys_slist_is_empty(&hbuf_pend);
477
478 /* controller to host flow control enabled */
479 switch (class) {
480 case HCI_CLASS_ISO_DATA:
481 case HCI_CLASS_EVT_DISCARDABLE:
482 case HCI_CLASS_EVT_REQUIRED:
483 break;
484 case HCI_CLASS_EVT_CONNECTION:
485 case HCI_CLASS_EVT_LLCP:
486 /* for conn-related events, only pend is relevant */
487 hbuf_count = 1;
488 __fallthrough;
489 case HCI_CLASS_ACL_DATA:
490 if (pend || !hbuf_count) {
491 sys_slist_append(&hbuf_pend, (void *)node_rx);
492 LOG_DBG("FC: Queuing item: %d", class);
493 return NULL;
494 }
495 break;
496 default:
497 LL_ASSERT(0);
498 break;
499 }
500 }
501 #endif
502
503 /* process regular node from radio */
504 buf = encode_node(node_rx, class);
505
506 return buf;
507 }
508
509 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
process_hbuf(struct node_rx_pdu * n)510 static inline struct net_buf *process_hbuf(struct node_rx_pdu *n)
511 {
512 /* shadow total count in case of preemption */
513 struct node_rx_pdu *node_rx = NULL;
514 int32_t hbuf_total = hci_hbuf_total;
515 struct net_buf *buf = NULL;
516 uint8_t class;
517 int reset;
518
519 reset = atomic_test_and_clear_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
520 if (reset) {
521 /* flush queue, no need to free, the LL has already done it */
522 sys_slist_init(&hbuf_pend);
523 }
524
525 if (hbuf_total <= 0) {
526 hbuf_count = -1;
527 return NULL;
528 }
529
530 /* available host buffers */
531 hbuf_count = hbuf_total - (hci_hbuf_sent - hci_hbuf_acked);
532
533 /* host acked ACL packets, try to dequeue from hbuf */
534 node_rx = (void *)sys_slist_peek_head(&hbuf_pend);
535 if (!node_rx) {
536 return NULL;
537 }
538
539 /* Return early if this iteration already has a node to process */
540 class = node_rx->hdr.user_meta;
541 if (n) {
542 if (class == HCI_CLASS_EVT_CONNECTION ||
543 class == HCI_CLASS_EVT_LLCP ||
544 (class == HCI_CLASS_ACL_DATA && hbuf_count)) {
545 /* node to process later, schedule an iteration */
546 LOG_DBG("FC: signalling");
547 k_poll_signal_raise(&hbuf_signal, 0x0);
548 }
549 return NULL;
550 }
551
552 switch (class) {
553 case HCI_CLASS_EVT_CONNECTION:
554 case HCI_CLASS_EVT_LLCP:
555 LOG_DBG("FC: dequeueing event");
556 (void) sys_slist_get(&hbuf_pend);
557 break;
558 case HCI_CLASS_ACL_DATA:
559 if (hbuf_count) {
560 LOG_DBG("FC: dequeueing ACL data");
561 (void) sys_slist_get(&hbuf_pend);
562 } else {
563 /* no buffers, HCI will signal */
564 node_rx = NULL;
565 }
566 break;
567 case HCI_CLASS_EVT_DISCARDABLE:
568 case HCI_CLASS_EVT_REQUIRED:
569 default:
570 LL_ASSERT(0);
571 break;
572 }
573
574 if (node_rx) {
575 buf = encode_node(node_rx, class);
576 /* Update host buffers after encoding */
577 hbuf_count = hbuf_total - (hci_hbuf_sent - hci_hbuf_acked);
578 /* next node */
579 node_rx = (void *)sys_slist_peek_head(&hbuf_pend);
580 if (node_rx) {
581 class = node_rx->hdr.user_meta;
582
583 if (class == HCI_CLASS_EVT_CONNECTION ||
584 class == HCI_CLASS_EVT_LLCP ||
585 (class == HCI_CLASS_ACL_DATA && hbuf_count)) {
586 /* more to process, schedule an
587 * iteration
588 */
589 LOG_DBG("FC: signalling");
590 k_poll_signal_raise(&hbuf_signal, 0x0);
591 }
592 }
593 }
594
595 return buf;
596 }
597 #endif
598
599 /**
600 * @brief Blockingly pull from Controller thread's recv_fifo
601 * @details Execution context: Host thread
602 */
recv_thread(void * p1,void * p2,void * p3)603 static void recv_thread(void *p1, void *p2, void *p3)
604 {
605 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
606 /* @todo: check if the events structure really needs to be static */
607 static struct k_poll_event events[2] = {
608 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_SIGNAL,
609 K_POLL_MODE_NOTIFY_ONLY,
610 &hbuf_signal, 0),
611 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
612 K_POLL_MODE_NOTIFY_ONLY,
613 &recv_fifo, 0),
614 };
615 #endif
616
617 while (1) {
618 struct node_rx_pdu *node_rx = NULL;
619 struct net_buf *buf = NULL;
620
621 LOG_DBG("blocking");
622 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
623 int err;
624
625 err = k_poll(events, 2, K_FOREVER);
626 LL_ASSERT(err == 0 || err == -EINTR);
627 if (events[0].state == K_POLL_STATE_SIGNALED) {
628 events[0].signal->signaled = 0U;
629 } else if (events[1].state ==
630 K_POLL_STATE_FIFO_DATA_AVAILABLE) {
631 node_rx = k_fifo_get(events[1].fifo, K_NO_WAIT);
632 }
633
634 events[0].state = K_POLL_STATE_NOT_READY;
635 events[1].state = K_POLL_STATE_NOT_READY;
636
637 /* process host buffers first if any */
638 buf = process_hbuf(node_rx);
639
640 #else
641 node_rx = k_fifo_get(&recv_fifo, K_FOREVER);
642 #endif
643 LOG_DBG("unblocked");
644
645 if (node_rx && !buf) {
646 /* process regular node from radio */
647 buf = process_node(node_rx);
648 }
649
650 while (buf) {
651 struct net_buf *frag;
652
653 /* Increment ref count, which will be
654 * unref on call to net_buf_frag_del
655 */
656 frag = net_buf_ref(buf);
657 buf = net_buf_frag_del(NULL, buf);
658
659 if (frag->len) {
660 LOG_DBG("Packet in: type:%u len:%u", bt_buf_get_type(frag),
661 frag->len);
662
663 bt_recv(frag);
664 } else {
665 net_buf_unref(frag);
666 }
667
668 k_yield();
669 }
670 }
671 }
672
cmd_handle(struct net_buf * buf)673 static int cmd_handle(struct net_buf *buf)
674 {
675 struct node_rx_pdu *node_rx = NULL;
676 struct net_buf *evt;
677
678 evt = hci_cmd_handle(buf, (void **) &node_rx);
679 if (evt) {
680 LOG_DBG("Replying with event of %u bytes", evt->len);
681 bt_recv_prio(evt);
682
683 if (node_rx) {
684 LOG_DBG("RX node enqueue");
685 node_rx->hdr.user_meta = hci_get_class(node_rx);
686 k_fifo_put(&recv_fifo, node_rx);
687 }
688 }
689
690 return 0;
691 }
692
693 #if defined(CONFIG_BT_CONN)
acl_handle(struct net_buf * buf)694 static int acl_handle(struct net_buf *buf)
695 {
696 struct net_buf *evt;
697 int err;
698
699 err = hci_acl_handle(buf, &evt);
700 if (evt) {
701 LOG_DBG("Replying with event of %u bytes", evt->len);
702 bt_recv_prio(evt);
703 }
704
705 return err;
706 }
707 #endif /* CONFIG_BT_CONN */
708
709 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
iso_handle(struct net_buf * buf)710 static int iso_handle(struct net_buf *buf)
711 {
712 struct net_buf *evt;
713 int err;
714
715 err = hci_iso_handle(buf, &evt);
716 if (evt) {
717 LOG_DBG("Replying with event of %u bytes", evt->len);
718 bt_recv_prio(evt);
719 }
720
721 return err;
722 }
723 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
724
hci_driver_send(struct net_buf * buf)725 static int hci_driver_send(struct net_buf *buf)
726 {
727 uint8_t type;
728 int err;
729
730 LOG_DBG("enter");
731
732 if (!buf->len) {
733 LOG_ERR("Empty HCI packet");
734 return -EINVAL;
735 }
736
737 type = bt_buf_get_type(buf);
738 switch (type) {
739 #if defined(CONFIG_BT_CONN)
740 case BT_BUF_ACL_OUT:
741 err = acl_handle(buf);
742 break;
743 #endif /* CONFIG_BT_CONN */
744 case BT_BUF_CMD:
745 err = cmd_handle(buf);
746 break;
747 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
748 case BT_BUF_ISO_OUT:
749 err = iso_handle(buf);
750 break;
751 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
752 default:
753 LOG_ERR("Unknown HCI type %u", type);
754 return -EINVAL;
755 }
756
757 if (!err) {
758 net_buf_unref(buf);
759 }
760
761 LOG_DBG("exit: %d", err);
762
763 return err;
764 }
765
hci_driver_open(void)766 static int hci_driver_open(void)
767 {
768 uint32_t err;
769
770 DEBUG_INIT();
771
772 k_fifo_init(&recv_fifo);
773 k_sem_init(&sem_prio_recv, 0, K_SEM_MAX_LIMIT);
774
775 err = ll_init(&sem_prio_recv);
776 if (err) {
777 LOG_ERR("LL initialization failed: %d", err);
778 return err;
779 }
780
781 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
782 k_poll_signal_init(&hbuf_signal);
783 hci_init(&hbuf_signal);
784 #else
785 hci_init(NULL);
786 #endif
787
788 k_thread_create(&prio_recv_thread_data, prio_recv_thread_stack,
789 K_KERNEL_STACK_SIZEOF(prio_recv_thread_stack),
790 prio_recv_thread, NULL, NULL, NULL,
791 K_PRIO_COOP(CONFIG_BT_DRIVER_RX_HIGH_PRIO), 0, K_NO_WAIT);
792 k_thread_name_set(&prio_recv_thread_data, "BT RX pri");
793
794 k_thread_create(&recv_thread_data, recv_thread_stack,
795 K_KERNEL_STACK_SIZEOF(recv_thread_stack),
796 recv_thread, NULL, NULL, NULL,
797 K_PRIO_COOP(CONFIG_BT_RX_PRIO), 0, K_NO_WAIT);
798 k_thread_name_set(&recv_thread_data, "BT RX");
799
800 LOG_DBG("Success.");
801
802 return 0;
803 }
804
hci_driver_close(void)805 static int hci_driver_close(void)
806 {
807 /* Resetting the LL stops all roles */
808 ll_deinit();
809
810 /* Abort prio RX thread */
811 k_thread_abort(&prio_recv_thread_data);
812
813 /* Abort RX thread */
814 k_thread_abort(&recv_thread_data);
815
816 return 0;
817 }
818
819 static const struct bt_hci_driver drv = {
820 .name = "Controller",
821 .bus = BT_HCI_DRIVER_BUS_VIRTUAL,
822 .quirks = BT_QUIRK_NO_AUTO_DLE,
823 .open = hci_driver_open,
824 .close = hci_driver_close,
825 .send = hci_driver_send,
826 };
827
hci_driver_init(void)828 static int hci_driver_init(void)
829 {
830
831 bt_hci_driver_register(&drv);
832
833 return 0;
834 }
835
836 SYS_INIT(hci_driver_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
837