1 /*
2 * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stddef.h>
9 #include <string.h>
10
11 #include <zephyr/version.h>
12 #include <errno.h>
13
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/atomic.h>
17
18 #include <zephyr/drivers/bluetooth/hci_driver.h>
19
20 #include <zephyr/bluetooth/hci_types.h>
21 #include <zephyr/bluetooth/hci_vs.h>
22 #include <zephyr/bluetooth/buf.h>
23
24 #include "../host/hci_ecc.h"
25
26 #include "util/util.h"
27 #include "util/memq.h"
28 #include "util/mem.h"
29 #include "util/dbuf.h"
30
31 #include "hal/ecb.h"
32 #include "hal/ccm.h"
33 #include "hal/ticker.h"
34
35 #include "ticker/ticker.h"
36
37 #include "ll_sw/pdu_df.h"
38 #include "lll/pdu_vendor.h"
39 #include "ll_sw/pdu.h"
40
41 #include "ll_sw/lll.h"
42 #include "lll/lll_adv_types.h"
43 #include "ll_sw/lll_adv.h"
44 #include "lll/lll_adv_pdu.h"
45 #include "ll_sw/lll_scan.h"
46 #include "lll/lll_df_types.h"
47 #include "ll_sw/lll_sync.h"
48 #include "ll_sw/lll_sync_iso.h"
49 #include "ll_sw/lll_conn.h"
50 #include "ll_sw/lll_conn_iso.h"
51 #include "ll_sw/lll_iso_tx.h"
52
53 #include "ll_sw/isoal.h"
54
55 #include "ll_sw/ull_tx_queue.h"
56
57 #include "ll_sw/ull_adv_types.h"
58 #include "ll_sw/ull_scan_types.h"
59 #include "ll_sw/ull_sync_types.h"
60 #include "ll_sw/ull_conn_types.h"
61 #include "ll_sw/ull_iso_types.h"
62 #include "ll_sw/ull_conn_iso_types.h"
63 #include "ll_sw/ull_conn_iso_internal.h"
64 #include "ll_sw/ull_df_types.h"
65 #include "ll_sw/ull_internal.h"
66
67 #include "ll_sw/ull_adv_internal.h"
68 #include "ll_sw/ull_sync_internal.h"
69 #include "ll_sw/ull_conn_internal.h"
70 #include "ll_sw/ull_sync_iso_internal.h"
71 #include "ll_sw/ull_iso_internal.h"
72 #include "ll_sw/ull_df_internal.h"
73
74 #include "ll.h"
75 #include "ll_feat.h"
76 #include "ll_settings.h"
77
78 #include "hci_internal.h"
79 #include "hci_vendor.h"
80
81 #if defined(CONFIG_BT_HCI_MESH_EXT)
82 #include "ll_sw/ll_mesh.h"
83 #endif /* CONFIG_BT_HCI_MESH_EXT */
84
85 #if defined(CONFIG_BT_CTLR_DTM_HCI)
86 #include "ll_sw/ll_test.h"
87 #endif /* CONFIG_BT_CTLR_DTM_HCI */
88
89 #if defined(CONFIG_BT_CTLR_USER_EXT)
90 #include "hci_user_ext.h"
91 #endif /* CONFIG_BT_CTLR_USER_EXT */
92
93 #include "common/bt_str.h"
94 #include "hal/debug.h"
95
96 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
97 #include <zephyr/logging/log.h>
98 LOG_MODULE_REGISTER(bt_ctlr_hci);
99
100 #define STR_NULL_TERMINATOR 0x00
101
102 /* opcode of the HCI command currently being processed. The opcode is stored
103 * by hci_cmd_handle() and then used during the creation of cmd complete and
104 * cmd status events to avoid passing it up the call chain.
105 */
106 static uint16_t _opcode;
107
108 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
109 /* NOTE: Duplicate filter uses two LS bits value of standard advertising modes:
110 * 0 - Non-Connectable Non-Scannable advertising report
111 * 1 - Connectable Non-Scannable advertising report
112 * 2 - Non-Connectable Scannable advertisig report
113 * 3 - Connectable Scannable advertising report
114 *
115 * FIXME: Duplicate filtering of Connectable Directed low and high duty
116 * cycle. If advertiser changes between Connectable Non-Scannable,
117 * Connectable Directed low, and high duty cycle without changing
118 * SID and DID, then such reports will be filtered out by the
119 * implementation. Needs enhancement to current implementation.
120 *
121 * Define a custom duplicate filter mode for periodic advertising:
122 * 4 - Periodic Advertising report
123 */
124
125 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
126 #define DUP_EXT_ADV_MODE_MAX 5
127 #define DUP_EXT_ADV_MODE_PERIODIC BIT(2)
128 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
129 #define DUP_EXT_ADV_MODE_MAX 4
130 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
131
132 #define DUP_EXT_ADV_MODE_COUNT 4
133
134 /* Duplicate filter entries, one per Bluetooth address */
135 static struct dup_entry {
136 bt_addr_le_t addr;
137
138 /* Mask to accumulate advertising PDU type as bitmask */
139 uint8_t mask;
140
141 #if defined(CONFIG_BT_CTLR_ADV_EXT)
142 struct dup_ext_adv_mode {
143 uint16_t set_count:5;
144 uint16_t set_curr:5;
145 struct dup_ext_adv_set {
146 uint8_t data_cmplt:1;
147 struct pdu_adv_adi adi;
148 } set[CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX];
149 } adv_mode[DUP_EXT_ADV_MODE_MAX];
150 #endif
151 } dup_filter[CONFIG_BT_CTLR_DUP_FILTER_LEN];
152
153 /* Duplicate filtering is disabled if count value is set to negative integer */
154 #define DUP_FILTER_DISABLED (-1)
155
156 /* Duplicate filtering array entry count, filtering disabled if negative */
157 static int32_t dup_count;
158 /* Duplicate filtering current free entry, overwrites entries after rollover */
159 static uint32_t dup_curr;
160
161 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
162 /* Helper function to reset non-periodic advertising entries in filter table */
163 static void dup_ext_adv_reset(void);
164 /* Flag for advertising reports be filtered for duplicates. */
165 static bool dup_scan;
166 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
167 /* Set constant true so that (dup_count >= 0) decides if advertising duplicate
168 * filter is enabled when Periodic Advertising ADI support is disabled.
169 */
170 static const bool dup_scan = true;
171 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
172 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
173
174 #if defined(CONFIG_BT_HCI_MESH_EXT)
175 struct scan_filter {
176 uint8_t count;
177 uint8_t lengths[CONFIG_BT_CTLR_MESH_SF_PATTERNS];
178 uint8_t patterns[CONFIG_BT_CTLR_MESH_SF_PATTERNS]
179 [BT_HCI_MESH_PATTERN_LEN_MAX];
180 };
181
182 static struct scan_filter scan_filters[CONFIG_BT_CTLR_MESH_SCAN_FILTERS];
183 static uint8_t sf_curr;
184 #endif
185
186 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
187 int32_t hci_hbuf_total;
188 uint32_t hci_hbuf_sent;
189 uint32_t hci_hbuf_acked;
190 uint16_t hci_hbuf_pend[CONFIG_BT_MAX_CONN];
191 atomic_t hci_state_mask;
192 static struct k_poll_signal *hbuf_signal;
193 #endif
194
195 #if defined(CONFIG_BT_CONN)
196 static uint32_t conn_count;
197 #endif
198
199 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
200 static uint32_t cis_pending_count;
201 #endif
202
203 /* In HCI event PHY indices start at 1 compare to 0 indexed in aux_ptr field in
204 * the Common Extended Payload Format in the PDUs.
205 */
206 #define HCI_AUX_PHY_TO_HCI_PHY(aux_phy) ((aux_phy) + 1)
207
208 #define DEFAULT_EVENT_MASK 0x1fffffffffff
209 #define DEFAULT_EVENT_MASK_PAGE_2 0x0
210 #define DEFAULT_LE_EVENT_MASK 0x1f
211
212 static uint64_t event_mask = DEFAULT_EVENT_MASK;
213 static uint64_t event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
214 static uint64_t le_event_mask = DEFAULT_LE_EVENT_MASK;
215 #if defined(CONFIG_BT_HCI_VS)
216 __maybe_unused static uint64_t vs_events_mask = DEFAULT_VS_EVT_MASK;
217 #endif /* CONFIG_BT_HCI_VS */
218
219 static struct net_buf *cmd_complete_status(uint8_t status);
220
221 #if defined(CONFIG_BT_CTLR_ADV_EXT)
222 #define BUF_GET_TIMEOUT K_SECONDS(10)
223
224 #if defined(CONFIG_BT_HCI_RAW)
225 static uint8_t ll_adv_cmds;
226
ll_adv_cmds_set(uint8_t adv_cmds)227 __weak int ll_adv_cmds_set(uint8_t adv_cmds)
228 {
229 if (!ll_adv_cmds) {
230 ll_adv_cmds = adv_cmds;
231 }
232
233 if (ll_adv_cmds != adv_cmds) {
234 return -EINVAL;
235 }
236
237 return 0;
238 }
239
ll_adv_cmds_is_ext(void)240 __weak int ll_adv_cmds_is_ext(void)
241 {
242 return ll_adv_cmds == LL_ADV_CMDS_EXT;
243 }
244
245 #else /* !CONFIG_BT_HCI_RAW */
ll_adv_cmds_is_ext(void)246 __weak int ll_adv_cmds_is_ext(void)
247 {
248 return 1;
249 }
250 #endif /* !CONFIG_BT_HCI_RAW */
251
adv_cmds_legacy_check(struct net_buf ** cc_evt)252 static int adv_cmds_legacy_check(struct net_buf **cc_evt)
253 {
254 int err;
255
256 #if defined(CONFIG_BT_HCI_RAW)
257 err = ll_adv_cmds_set(LL_ADV_CMDS_LEGACY);
258 if (err && cc_evt) {
259 *cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
260 }
261 #else
262 if (cc_evt) {
263 *cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
264 }
265
266 err = -EINVAL;
267 #endif /* CONFIG_BT_HCI_RAW */
268
269 return err;
270 }
271
adv_cmds_ext_check(struct net_buf ** cc_evt)272 static int adv_cmds_ext_check(struct net_buf **cc_evt)
273 {
274 int err;
275
276 #if defined(CONFIG_BT_HCI_RAW)
277 err = ll_adv_cmds_set(LL_ADV_CMDS_EXT);
278 if (err && cc_evt) {
279 *cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
280 }
281 #else
282 err = 0;
283 #endif /* CONFIG_BT_HCI_RAW */
284
285 return err;
286 }
287 #else
adv_cmds_legacy_check(struct net_buf ** cc_evt)288 static inline int adv_cmds_legacy_check(struct net_buf **cc_evt)
289 {
290 return 0;
291 }
292 #endif /* CONFIG_BT_CTLR_ADV_EXT */
293
294 #if defined(CONFIG_BT_CONN)
295 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
296 struct net_buf *buf);
297 #endif /* CONFIG_BT_CONN */
298
hci_evt_create(struct net_buf * buf,uint8_t evt,uint8_t len)299 static void hci_evt_create(struct net_buf *buf, uint8_t evt, uint8_t len)
300 {
301 struct bt_hci_evt_hdr *hdr;
302
303 hdr = net_buf_add(buf, sizeof(*hdr));
304 hdr->evt = evt;
305 hdr->len = len;
306 }
307
hci_cmd_complete(struct net_buf ** buf,uint8_t plen)308 void *hci_cmd_complete(struct net_buf **buf, uint8_t plen)
309 {
310 *buf = bt_hci_cmd_complete_create(_opcode, plen);
311
312 return net_buf_add(*buf, plen);
313 }
314
cmd_status(uint8_t status)315 static struct net_buf *cmd_status(uint8_t status)
316 {
317 return bt_hci_cmd_status_create(_opcode, status);
318 }
319
cmd_complete_status(uint8_t status)320 static struct net_buf *cmd_complete_status(uint8_t status)
321 {
322 struct net_buf *buf;
323 struct bt_hci_evt_cc_status *ccst;
324
325 buf = bt_hci_cmd_complete_create(_opcode, sizeof(*ccst));
326 ccst = net_buf_add(buf, sizeof(*ccst));
327 ccst->status = status;
328
329 return buf;
330 }
331
meta_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)332 static void *meta_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
333 {
334 struct bt_hci_evt_le_meta_event *me;
335
336 hci_evt_create(buf, BT_HCI_EVT_LE_META_EVENT, sizeof(*me) + melen);
337 me = net_buf_add(buf, sizeof(*me));
338 me->subevent = subevt;
339
340 return net_buf_add(buf, melen);
341 }
342
343 #if defined(CONFIG_BT_HCI_VS)
vs_event(struct net_buf * buf,uint8_t subevt,uint8_t evt_len)344 __maybe_unused static void *vs_event(struct net_buf *buf, uint8_t subevt, uint8_t evt_len)
345 {
346 struct bt_hci_evt_vs *evt;
347
348 hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*evt) + evt_len);
349 evt = net_buf_add(buf, sizeof(*evt));
350 evt->subevent = subevt;
351
352 return net_buf_add(buf, evt_len);
353 }
354 #endif /* CONFIG_BT_HCI_VS */
355
356 #if defined(CONFIG_BT_HCI_MESH_EXT)
mesh_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)357 static void *mesh_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
358 {
359 struct bt_hci_evt_mesh *me;
360
361 hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*me) + melen);
362 me = net_buf_add(buf, sizeof(*me));
363 me->prefix = BT_HCI_MESH_EVT_PREFIX;
364 me->subevent = subevt;
365
366 return net_buf_add(buf, melen);
367 }
368 #endif /* CONFIG_BT_HCI_MESH_EXT */
369
370 #if defined(CONFIG_BT_CONN)
disconnect(struct net_buf * buf,struct net_buf ** evt)371 static void disconnect(struct net_buf *buf, struct net_buf **evt)
372 {
373 struct bt_hci_cp_disconnect *cmd = (void *)buf->data;
374 uint16_t handle;
375 uint8_t status;
376
377 handle = sys_le16_to_cpu(cmd->handle);
378 status = ll_terminate_ind_send(handle, cmd->reason);
379
380 *evt = cmd_status(status);
381 }
382
read_remote_ver_info(struct net_buf * buf,struct net_buf ** evt)383 static void read_remote_ver_info(struct net_buf *buf, struct net_buf **evt)
384 {
385 struct bt_hci_cp_read_remote_version_info *cmd = (void *)buf->data;
386 uint16_t handle;
387 uint8_t status;
388
389 handle = sys_le16_to_cpu(cmd->handle);
390 status = ll_version_ind_send(handle);
391
392 *evt = cmd_status(status);
393 }
394 #endif /* CONFIG_BT_CONN */
395
link_control_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)396 static int link_control_cmd_handle(uint16_t ocf, struct net_buf *cmd,
397 struct net_buf **evt)
398 {
399 switch (ocf) {
400 #if defined(CONFIG_BT_CONN)
401 case BT_OCF(BT_HCI_OP_DISCONNECT):
402 disconnect(cmd, evt);
403 break;
404 case BT_OCF(BT_HCI_OP_READ_REMOTE_VERSION_INFO):
405 read_remote_ver_info(cmd, evt);
406 break;
407 #endif /* CONFIG_BT_CONN */
408 default:
409 return -EINVAL;
410 }
411
412 return 0;
413 }
414
set_event_mask(struct net_buf * buf,struct net_buf ** evt)415 static void set_event_mask(struct net_buf *buf, struct net_buf **evt)
416 {
417 struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
418
419 event_mask = sys_get_le64(cmd->events);
420
421 *evt = cmd_complete_status(0x00);
422 }
423
set_event_mask_page_2(struct net_buf * buf,struct net_buf ** evt)424 static void set_event_mask_page_2(struct net_buf *buf, struct net_buf **evt)
425 {
426 struct bt_hci_cp_set_event_mask_page_2 *cmd = (void *)buf->data;
427
428 event_mask_page_2 = sys_get_le64(cmd->events_page_2);
429
430 *evt = cmd_complete_status(0x00);
431 }
432
reset(struct net_buf * buf,struct net_buf ** evt)433 static void reset(struct net_buf *buf, struct net_buf **evt)
434 {
435 #if defined(CONFIG_BT_HCI_MESH_EXT)
436 int i;
437
438 for (i = 0; i < ARRAY_SIZE(scan_filters); i++) {
439 scan_filters[i].count = 0U;
440 }
441 sf_curr = 0xFF;
442 #endif
443
444 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
445 dup_count = DUP_FILTER_DISABLED;
446 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
447 dup_scan = false;
448 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
449 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
450
451 /* reset event masks */
452 event_mask = DEFAULT_EVENT_MASK;
453 event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
454 le_event_mask = DEFAULT_LE_EVENT_MASK;
455
456 if (buf) {
457 ll_reset();
458 *evt = cmd_complete_status(0x00);
459 }
460
461 #if defined(CONFIG_BT_CONN)
462 conn_count = 0U;
463 #endif
464
465 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
466 cis_pending_count = 0U;
467 #endif
468
469 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
470 hci_hbuf_total = 0;
471 hci_hbuf_sent = 0U;
472 hci_hbuf_acked = 0U;
473 (void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
474 if (buf) {
475 atomic_set_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
476 k_poll_signal_raise(hbuf_signal, 0x0);
477 }
478 #endif
479
480 hci_recv_fifo_reset();
481 }
482
483 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_ctl_to_host_flow(struct net_buf * buf,struct net_buf ** evt)484 static void set_ctl_to_host_flow(struct net_buf *buf, struct net_buf **evt)
485 {
486 struct bt_hci_cp_set_ctl_to_host_flow *cmd = (void *)buf->data;
487 uint8_t flow_enable = cmd->flow_enable;
488 struct bt_hci_evt_cc_status *ccst;
489
490 ccst = hci_cmd_complete(evt, sizeof(*ccst));
491
492 /* require host buffer size before enabling flow control, and
493 * disallow if any connections are up
494 */
495 if (!hci_hbuf_total || conn_count) {
496 ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
497 return;
498 } else {
499 ccst->status = 0x00;
500 }
501
502 switch (flow_enable) {
503 case BT_HCI_CTL_TO_HOST_FLOW_DISABLE:
504 if (hci_hbuf_total < 0) {
505 /* already disabled */
506 return;
507 }
508 break;
509 case BT_HCI_CTL_TO_HOST_FLOW_ENABLE:
510 if (hci_hbuf_total > 0) {
511 /* already enabled */
512 return;
513 }
514 break;
515 default:
516 ccst->status = BT_HCI_ERR_INVALID_PARAM;
517 return;
518 }
519
520 hci_hbuf_sent = 0U;
521 hci_hbuf_acked = 0U;
522 (void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
523 hci_hbuf_total = -hci_hbuf_total;
524 }
525
host_buffer_size(struct net_buf * buf,struct net_buf ** evt)526 static void host_buffer_size(struct net_buf *buf, struct net_buf **evt)
527 {
528 struct bt_hci_cp_host_buffer_size *cmd = (void *)buf->data;
529 uint16_t acl_pkts = sys_le16_to_cpu(cmd->acl_pkts);
530 uint16_t acl_mtu = sys_le16_to_cpu(cmd->acl_mtu);
531 struct bt_hci_evt_cc_status *ccst;
532
533 ccst = hci_cmd_complete(evt, sizeof(*ccst));
534
535 if (hci_hbuf_total) {
536 ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
537 return;
538 }
539 /* fragmentation from controller to host not supported, require
540 * ACL MTU to be at least the LL MTU
541 */
542 if (acl_mtu < LL_LENGTH_OCTETS_RX_MAX) {
543 ccst->status = BT_HCI_ERR_INVALID_PARAM;
544 return;
545 }
546
547 LOG_DBG("FC: host buf size: %d", acl_pkts);
548 hci_hbuf_total = -acl_pkts;
549 }
550
host_num_completed_packets(struct net_buf * buf,struct net_buf ** evt)551 static void host_num_completed_packets(struct net_buf *buf,
552 struct net_buf **evt)
553 {
554 struct bt_hci_cp_host_num_completed_packets *cmd = (void *)buf->data;
555 struct bt_hci_evt_cc_status *ccst;
556 uint32_t count = 0U;
557
558 /* special case, no event returned except for error conditions */
559 if (hci_hbuf_total <= 0) {
560 ccst = hci_cmd_complete(evt, sizeof(*ccst));
561 ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
562 return;
563 } else if (!conn_count) {
564 ccst = hci_cmd_complete(evt, sizeof(*ccst));
565 ccst->status = BT_HCI_ERR_INVALID_PARAM;
566 return;
567 }
568
569 /* leave *evt == NULL so no event is generated */
570 for (uint8_t i = 0; i < cmd->num_handles; i++) {
571 uint16_t h = sys_le16_to_cpu(cmd->h[i].handle);
572 uint16_t c = sys_le16_to_cpu(cmd->h[i].count);
573
574 if ((h >= ARRAY_SIZE(hci_hbuf_pend)) ||
575 (c > hci_hbuf_pend[h])) {
576 ccst = hci_cmd_complete(evt, sizeof(*ccst));
577 ccst->status = BT_HCI_ERR_INVALID_PARAM;
578 return;
579 }
580
581 hci_hbuf_pend[h] -= c;
582 count += c;
583 }
584
585 LOG_DBG("FC: acked: %d", count);
586 hci_hbuf_acked += count;
587 k_poll_signal_raise(hbuf_signal, 0x0);
588 }
589 #endif
590
591 #if defined(CONFIG_BT_CTLR_LE_PING)
read_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)592 static void read_auth_payload_timeout(struct net_buf *buf, struct net_buf **evt)
593 {
594 struct bt_hci_cp_read_auth_payload_timeout *cmd = (void *)buf->data;
595 struct bt_hci_rp_read_auth_payload_timeout *rp;
596 uint16_t auth_payload_timeout;
597 uint16_t handle;
598 uint8_t status;
599
600 handle = sys_le16_to_cpu(cmd->handle);
601
602 status = ll_apto_get(handle, &auth_payload_timeout);
603
604 rp = hci_cmd_complete(evt, sizeof(*rp));
605 rp->status = status;
606 rp->handle = sys_cpu_to_le16(handle);
607 rp->auth_payload_timeout = sys_cpu_to_le16(auth_payload_timeout);
608 }
609
write_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)610 static void write_auth_payload_timeout(struct net_buf *buf,
611 struct net_buf **evt)
612 {
613 struct bt_hci_cp_write_auth_payload_timeout *cmd = (void *)buf->data;
614 struct bt_hci_rp_write_auth_payload_timeout *rp;
615 uint16_t auth_payload_timeout;
616 uint16_t handle;
617 uint8_t status;
618
619 handle = sys_le16_to_cpu(cmd->handle);
620 auth_payload_timeout = sys_le16_to_cpu(cmd->auth_payload_timeout);
621
622 status = ll_apto_set(handle, auth_payload_timeout);
623
624 rp = hci_cmd_complete(evt, sizeof(*rp));
625 rp->status = status;
626 rp->handle = sys_cpu_to_le16(handle);
627 }
628 #endif /* CONFIG_BT_CTLR_LE_PING */
629
630 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
configure_data_path(struct net_buf * buf,struct net_buf ** evt)631 static void configure_data_path(struct net_buf *buf,
632 struct net_buf **evt)
633 {
634 struct bt_hci_cp_configure_data_path *cmd = (void *)buf->data;
635 struct bt_hci_rp_configure_data_path *rp;
636
637 uint8_t *vs_config;
638 uint8_t status;
639
640 vs_config = &cmd->vs_config[0];
641
642 if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)) {
643 status = ll_configure_data_path(cmd->data_path_dir,
644 cmd->data_path_id,
645 cmd->vs_config_len,
646 vs_config);
647 } else {
648 status = BT_HCI_ERR_INVALID_PARAM;
649 }
650
651 rp = hci_cmd_complete(evt, sizeof(*rp));
652 rp->status = status;
653 }
654 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
655
656 #if defined(CONFIG_BT_CTLR_CONN_ISO)
read_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)657 static void read_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
658 {
659 struct bt_hci_rp_read_conn_accept_timeout *rp;
660 uint16_t timeout;
661
662 ARG_UNUSED(buf);
663
664 rp = hci_cmd_complete(evt, sizeof(*rp));
665
666 rp->status = ll_conn_iso_accept_timeout_get(&timeout);
667 rp->conn_accept_timeout = sys_cpu_to_le16(timeout);
668 }
669
write_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)670 static void write_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
671 {
672 struct bt_hci_cp_write_conn_accept_timeout *cmd = (void *)buf->data;
673 struct bt_hci_rp_write_conn_accept_timeout *rp;
674 uint16_t timeout;
675
676 timeout = sys_le16_to_cpu(cmd->conn_accept_timeout);
677
678 rp = hci_cmd_complete(evt, sizeof(*rp));
679
680 rp->status = ll_conn_iso_accept_timeout_set(timeout);
681 }
682 #endif /* CONFIG_BT_CTLR_CONN_ISO */
683
684 #if defined(CONFIG_BT_CONN)
read_tx_power_level(struct net_buf * buf,struct net_buf ** evt)685 static void read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
686 {
687 struct bt_hci_cp_read_tx_power_level *cmd = (void *)buf->data;
688 struct bt_hci_rp_read_tx_power_level *rp;
689 uint16_t handle;
690 uint8_t status;
691 uint8_t type;
692
693 handle = sys_le16_to_cpu(cmd->handle);
694 type = cmd->type;
695
696 rp = hci_cmd_complete(evt, sizeof(*rp));
697
698 status = ll_tx_pwr_lvl_get(BT_HCI_VS_LL_HANDLE_TYPE_CONN,
699 handle, type, &rp->tx_power_level);
700
701 rp->status = status;
702 rp->handle = sys_cpu_to_le16(handle);
703 }
704 #endif /* CONFIG_BT_CONN */
705
ctrl_bb_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)706 static int ctrl_bb_cmd_handle(uint16_t ocf, struct net_buf *cmd,
707 struct net_buf **evt)
708 {
709 switch (ocf) {
710 case BT_OCF(BT_HCI_OP_SET_EVENT_MASK):
711 set_event_mask(cmd, evt);
712 break;
713
714 case BT_OCF(BT_HCI_OP_RESET):
715 reset(cmd, evt);
716 break;
717
718 case BT_OCF(BT_HCI_OP_SET_EVENT_MASK_PAGE_2):
719 set_event_mask_page_2(cmd, evt);
720 break;
721
722 #if defined(CONFIG_BT_CTLR_CONN_ISO)
723 case BT_OCF(BT_HCI_OP_READ_CONN_ACCEPT_TIMEOUT):
724 read_conn_accept_timeout(cmd, evt);
725 break;
726
727 case BT_OCF(BT_HCI_OP_WRITE_CONN_ACCEPT_TIMEOUT):
728 write_conn_accept_timeout(cmd, evt);
729 break;
730 #endif /* CONFIG_BT_CTLR_CONN_ISO */
731
732 #if defined(CONFIG_BT_CONN)
733 case BT_OCF(BT_HCI_OP_READ_TX_POWER_LEVEL):
734 read_tx_power_level(cmd, evt);
735 break;
736 #endif /* CONFIG_BT_CONN */
737
738 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
739 case BT_OCF(BT_HCI_OP_SET_CTL_TO_HOST_FLOW):
740 set_ctl_to_host_flow(cmd, evt);
741 break;
742
743 case BT_OCF(BT_HCI_OP_HOST_BUFFER_SIZE):
744 host_buffer_size(cmd, evt);
745 break;
746
747 case BT_OCF(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS):
748 host_num_completed_packets(cmd, evt);
749 break;
750 #endif
751
752 #if defined(CONFIG_BT_CTLR_LE_PING)
753 case BT_OCF(BT_HCI_OP_READ_AUTH_PAYLOAD_TIMEOUT):
754 read_auth_payload_timeout(cmd, evt);
755 break;
756
757 case BT_OCF(BT_HCI_OP_WRITE_AUTH_PAYLOAD_TIMEOUT):
758 write_auth_payload_timeout(cmd, evt);
759 break;
760 #endif /* CONFIG_BT_CTLR_LE_PING */
761
762 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
763 case BT_OCF(BT_HCI_OP_CONFIGURE_DATA_PATH):
764 configure_data_path(cmd, evt);
765 break;
766 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
767
768 default:
769 return -EINVAL;
770 }
771
772 return 0;
773 }
774
read_local_version_info(struct net_buf * buf,struct net_buf ** evt)775 static void read_local_version_info(struct net_buf *buf, struct net_buf **evt)
776 {
777 struct bt_hci_rp_read_local_version_info *rp;
778
779 rp = hci_cmd_complete(evt, sizeof(*rp));
780
781 rp->status = 0x00;
782 rp->hci_version = LL_VERSION_NUMBER;
783 rp->hci_revision = sys_cpu_to_le16(0);
784 rp->lmp_version = LL_VERSION_NUMBER;
785 rp->manufacturer = sys_cpu_to_le16(ll_settings_company_id());
786 rp->lmp_subversion = sys_cpu_to_le16(ll_settings_subversion_number());
787 }
788
read_supported_commands(struct net_buf * buf,struct net_buf ** evt)789 static void read_supported_commands(struct net_buf *buf, struct net_buf **evt)
790 {
791 struct bt_hci_rp_read_supported_commands *rp;
792
793 rp = hci_cmd_complete(evt, sizeof(*rp));
794
795 rp->status = 0x00;
796 (void)memset(&rp->commands[0], 0, sizeof(rp->commands));
797
798 #if defined(CONFIG_BT_REMOTE_VERSION)
799 /* Read Remote Version Info. */
800 rp->commands[2] |= BIT(7);
801 #endif
802 /* Set Event Mask, and Reset. */
803 rp->commands[5] |= BIT(6) | BIT(7);
804
805 #if defined(CONFIG_BT_CTLR_CONN_ISO)
806 /* Read/Write Connection Accept Timeout */
807 rp->commands[7] |= BIT(2) | BIT(3);
808 #endif /* CONFIG_BT_CTLR_CONN_ISO */
809
810 /* Read TX Power Level. */
811 rp->commands[10] |= BIT(2);
812
813 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
814 /* Set FC, Host Buffer Size and Host Num Completed */
815 rp->commands[10] |= BIT(5) | BIT(6) | BIT(7);
816 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
817
818 /* Read Local Version Info, Read Local Supported Features. */
819 rp->commands[14] |= BIT(3) | BIT(5);
820 /* Read BD ADDR. */
821 rp->commands[15] |= BIT(1);
822
823 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
824 /* Read RSSI. */
825 rp->commands[15] |= BIT(5);
826 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
827
828 /* Set Event Mask Page 2 */
829 rp->commands[22] |= BIT(2);
830 /* LE Set Event Mask, LE Read Buffer Size, LE Read Local Supp Feats,
831 * Set Random Addr
832 */
833 rp->commands[25] |= BIT(0) | BIT(1) | BIT(2) | BIT(4);
834
835 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
836 /* LE Read FAL Size, LE Clear FAL */
837 rp->commands[26] |= BIT(6) | BIT(7);
838 /* LE Add Dev to FAL, LE Remove Dev from FAL */
839 rp->commands[27] |= BIT(0) | BIT(1);
840 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
841
842 /* LE Encrypt, LE Rand */
843 rp->commands[27] |= BIT(6) | BIT(7);
844 /* LE Read Supported States */
845 rp->commands[28] |= BIT(3);
846
847 #if defined(CONFIG_BT_BROADCASTER)
848 /* LE Set Adv Params, LE Read Adv Channel TX Power, LE Set Adv Data */
849 rp->commands[25] |= BIT(5) | BIT(6) | BIT(7);
850 /* LE Set Scan Response Data, LE Set Adv Enable */
851 rp->commands[26] |= BIT(0) | BIT(1);
852
853 #if defined(CONFIG_BT_CTLR_ADV_EXT)
854 /* LE Set Adv Set Random Addr, LE Set Ext Adv Params, LE Set Ext Adv
855 * Data, LE Set Ext Adv Scan Rsp Data, LE Set Ext Adv Enable, LE Read
856 * Max Adv Data Len, LE Read Num Supp Adv Sets
857 */
858 rp->commands[36] |= BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
859 BIT(6) | BIT(7);
860 /* LE Remove Adv Set, LE Clear Adv Sets */
861 rp->commands[37] |= BIT(0) | BIT(1);
862 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
863 /* LE Set PA Params, LE Set PA Data, LE Set PA Enable */
864 rp->commands[37] |= BIT(2) | BIT(3) | BIT(4);
865 #if defined(CONFIG_BT_CTLR_ADV_ISO)
866 /* LE Create BIG, LE Create BIG Test, LE Terminate BIG */
867 rp->commands[42] |= BIT(5) | BIT(6) | BIT(7);
868 #endif /* CONFIG_BT_CTLR_ADV_ISO */
869 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
870 #endif /* CONFIG_BT_CTLR_ADV_EXT */
871 #endif /* CONFIG_BT_BROADCASTER */
872
873 #if defined(CONFIG_BT_OBSERVER)
874 /* LE Set Scan Params, LE Set Scan Enable */
875 rp->commands[26] |= BIT(2) | BIT(3);
876
877 #if defined(CONFIG_BT_CTLR_ADV_EXT)
878 /* LE Set Extended Scan Params, LE Set Extended Scan Enable */
879 rp->commands[37] |= BIT(5) | BIT(6);
880 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
881 /* LE PA Create Sync, LE PA Create Sync Cancel, LE PA Terminate Sync */
882 rp->commands[38] |= BIT(0) | BIT(1) | BIT(2);
883 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
884 /* LE PA Add Device to Periodic Advertiser List,
885 * LE PA Remove Device from Periodic Advertiser List,
886 * LE Clear Periodic Advertiser List,
887 * LE Read Periodic Adveritiser List Size
888 */
889 rp->commands[38] |= BIT(3) | BIT(4) | BIT(5) | BIT(6);
890 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
891 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
892 /* LE Set PA Receive Enable */
893 rp->commands[40] |= BIT(5);
894 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
895 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
896 /* LE BIG Create Sync, LE BIG Terminate Sync */
897 rp->commands[43] |= BIT(0) | BIT(1);
898 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
899 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
900 #endif /* CONFIG_BT_CTLR_ADV_EXT */
901
902 #endif /* CONFIG_BT_OBSERVER */
903
904 #if defined(CONFIG_BT_CONN)
905 #if defined(CONFIG_BT_CENTRAL)
906 /* LE Create Connection, LE Create Connection Cancel */
907 rp->commands[26] |= BIT(4) | BIT(5);
908 /* Set Host Channel Classification */
909 rp->commands[27] |= BIT(3);
910
911 #if defined(CONFIG_BT_CTLR_ADV_EXT)
912 /* LE Extended Create Connection */
913 rp->commands[37] |= BIT(7);
914 #endif /* CONFIG_BT_CTLR_ADV_EXT */
915
916 #if defined(CONFIG_BT_CTLR_LE_ENC)
917 /* LE Start Encryption */
918 rp->commands[28] |= BIT(0);
919 #endif /* CONFIG_BT_CTLR_LE_ENC */
920
921 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
922 /* LE Set CIG Parameters */
923 rp->commands[41] |= BIT(7);
924 /* LE Set CIG Parameters Test, LE Create CIS, LE Remove CIS */
925 rp->commands[42] |= BIT(0) | BIT(1) | BIT(2);
926 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
927 #endif /* CONFIG_BT_CENTRAL */
928
929 #if defined(CONFIG_BT_PERIPHERAL)
930 #if defined(CONFIG_BT_CTLR_LE_ENC)
931 /* LE LTK Request Reply, LE LTK Request Negative Reply */
932 rp->commands[28] |= BIT(1) | BIT(2);
933 #endif /* CONFIG_BT_CTLR_LE_ENC */
934 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
935 /* LE Accept CIS Request, LE Reject CIS Request */
936 rp->commands[42] |= BIT(3) | BIT(4);
937 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
938 #endif /* CONFIG_BT_PERIPHERAL */
939
940 /* Disconnect. */
941 rp->commands[0] |= BIT(5);
942 /* LE Connection Update, LE Read Channel Map, LE Read Remote Features */
943 rp->commands[27] |= BIT(2) | BIT(4) | BIT(5);
944
945 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
946 /* LE Remote Conn Param Req and Neg Reply */
947 rp->commands[33] |= BIT(4) | BIT(5);
948 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
949
950 #if defined(CONFIG_BT_CTLR_LE_PING)
951 /* Read and Write authenticated payload timeout */
952 rp->commands[32] |= BIT(4) | BIT(5);
953 #endif /* CONFIG_BT_CTLR_LE_PING */
954
955 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
956 /* LE Set Data Length, and LE Read Suggested Data Length. */
957 rp->commands[33] |= BIT(6) | BIT(7);
958 /* LE Write Suggested Data Length. */
959 rp->commands[34] |= BIT(0);
960 /* LE Read Maximum Data Length. */
961 rp->commands[35] |= BIT(3);
962 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
963
964 #if defined(CONFIG_BT_CTLR_PHY)
965 /* LE Read PHY Command. */
966 rp->commands[35] |= BIT(4);
967 /* LE Set Default PHY Command. */
968 rp->commands[35] |= BIT(5);
969 /* LE Set PHY Command. */
970 rp->commands[35] |= BIT(6);
971 #endif /* CONFIG_BT_CTLR_PHY */
972 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
973 /* LE Request Peer SCA */
974 rp->commands[43] |= BIT(2);
975 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
976 #endif /* CONFIG_BT_CONN */
977
978 #if defined(CONFIG_BT_CTLR_DTM_HCI)
979 /* LE RX Test, LE TX Test, LE Test End */
980 rp->commands[28] |= BIT(4) | BIT(5) | BIT(6);
981 /* LE Enhanced RX Test. */
982 rp->commands[35] |= BIT(7);
983 /* LE Enhanced TX Test. */
984 rp->commands[36] |= BIT(0);
985 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
986 rp->commands[39] |= BIT(3);
987 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
988
989 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
990 rp->commands[39] |= BIT(4);
991 #endif
992
993 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
994 rp->commands[45] |= BIT(0);
995 #endif
996 #endif /* CONFIG_BT_CTLR_DTM_HCI */
997
998 #if defined(CONFIG_BT_CTLR_PRIVACY)
999 /* LE resolving list commands, LE Read Peer RPA */
1000 rp->commands[34] |= BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7);
1001 /* LE Read Local RPA, LE Set AR Enable, Set RPA Timeout */
1002 rp->commands[35] |= BIT(0) | BIT(1) | BIT(2);
1003 /* LE Set Privacy Mode */
1004 rp->commands[39] |= BIT(2);
1005 #endif /* CONFIG_BT_CTLR_PRIVACY */
1006
1007 #if defined(CONFIG_BT_CTLR_DF)
1008 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1009 /* LE Set Connectionless CTE Transmit Parameters,
1010 * LE Set Connectionless CTE Transmit Enable
1011 */
1012 rp->commands[39] |= BIT(5) | BIT(6);
1013 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1014 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1015 /* LE Set Connectionless IQ Sampling Enable */
1016 rp->commands[39] |= BIT(7);
1017 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1018 /* LE Read Antenna Information */
1019 rp->commands[40] |= BIT(4);
1020 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1021 /* LE Set Connection CTE Transmit Parameters */
1022 rp->commands[40] |= BIT(1);
1023 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1024 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1025 /* LE Set Connection CTE Receive Parameters */
1026 rp->commands[40] |= BIT(0);
1027 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1028 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1029 /* LE Connection CTE Request Enable */
1030 rp->commands[40] |= BIT(2);
1031 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1032 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1033 /* LE Connection CTE Response Enable */
1034 rp->commands[40] |= BIT(3);
1035 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1036
1037 #endif /* CONFIG_BT_CTLR_DF */
1038
1039 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_TINYCRYPT_ECC)
1040 bt_hci_ecc_supported_commands(rp->commands);
1041 #endif /* CONFIG_BT_HCI_RAW && CONFIG_BT_TINYCRYPT_ECC */
1042
1043 /* LE Read TX Power. */
1044 rp->commands[38] |= BIT(7);
1045
1046 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1047 /* LE Read Buffer Size v2, LE Read ISO TX Sync */
1048 rp->commands[41] |= BIT(5) | BIT(6);
1049 /* LE ISO Transmit Test */
1050 rp->commands[43] |= BIT(5);
1051 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1052
1053 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1054 /* LE ISO Receive Test, LE ISO Read Test Counters */
1055 rp->commands[43] |= BIT(6) | BIT(7);
1056
1057 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
1058 /* LE Read ISO Link Quality */
1059 rp->commands[44] |= BIT(2);
1060 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
1061 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
1062
1063 #if defined(CONFIG_BT_CTLR_ISO)
1064 /* LE Setup ISO Data Path, LE Remove ISO Data Path */
1065 rp->commands[43] |= BIT(3) | BIT(4);
1066 /* LE ISO Test End */
1067 rp->commands[44] |= BIT(0);
1068 #endif /* CONFIG_BT_CTLR_ISO */
1069
1070 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
1071 /* LE Set Host Feature */
1072 rp->commands[44] |= BIT(1);
1073 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
1074
1075 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1076 /* Read Supported Codecs [v2], Codec Capabilities, Controller Delay, Configure Data Path */
1077 rp->commands[45] |= BIT(2) | BIT(3) | BIT(4) | BIT(5);
1078 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1079 }
1080
read_local_features(struct net_buf * buf,struct net_buf ** evt)1081 static void read_local_features(struct net_buf *buf, struct net_buf **evt)
1082 {
1083 struct bt_hci_rp_read_local_features *rp;
1084
1085 rp = hci_cmd_complete(evt, sizeof(*rp));
1086
1087 rp->status = 0x00;
1088 (void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1089 /* BR/EDR not supported and LE supported */
1090 rp->features[4] = (1 << 5) | (1 << 6);
1091 }
1092
read_bd_addr(struct net_buf * buf,struct net_buf ** evt)1093 static void read_bd_addr(struct net_buf *buf, struct net_buf **evt)
1094 {
1095 struct bt_hci_rp_read_bd_addr *rp;
1096
1097 rp = hci_cmd_complete(evt, sizeof(*rp));
1098
1099 rp->status = 0x00;
1100
1101 (void)ll_addr_read(0, &rp->bdaddr.val[0]);
1102 }
1103
1104 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
hci_vendor_read_std_codecs(const struct bt_hci_std_codec_info_v2 ** codecs)1105 uint8_t __weak hci_vendor_read_std_codecs(
1106 const struct bt_hci_std_codec_info_v2 **codecs)
1107 {
1108 ARG_UNUSED(codecs);
1109
1110 /* return number of supported codecs */
1111 return 0;
1112 }
1113
hci_vendor_read_vs_codecs(const struct bt_hci_vs_codec_info_v2 ** codecs)1114 uint8_t __weak hci_vendor_read_vs_codecs(
1115 const struct bt_hci_vs_codec_info_v2 **codecs)
1116 {
1117 ARG_UNUSED(codecs);
1118
1119 /* return number of supported codecs */
1120 return 0;
1121 }
1122
1123 /* NOTE: Not implementing the [v1] version.
1124 * Refer to BT Spec v5.3 Vol 4, Part E 7.4.8 Read Local Supported Codecs command
1125 * The [v1] version of this command shall only return codecs supported on the
1126 * BR/EDR physical transport, while the [v2] version shall return codecs
1127 * supported on all physical transports.
1128 */
read_codecs_v2(struct net_buf * buf,struct net_buf ** evt)1129 static void read_codecs_v2(struct net_buf *buf, struct net_buf **evt)
1130 {
1131 struct bt_hci_rp_read_codecs_v2 *rp;
1132 const struct bt_hci_std_codec_info_v2 *std_codec_info;
1133 const struct bt_hci_vs_codec_info_v2 *vs_codec_info;
1134 struct bt_hci_std_codecs_v2 *std_codecs;
1135 struct bt_hci_vs_codecs_v2 *vs_codecs;
1136 size_t std_codecs_bytes;
1137 size_t vs_codecs_bytes;
1138 uint8_t num_std_codecs;
1139 uint8_t num_vs_codecs;
1140 uint8_t i;
1141
1142 /* read standard codec information */
1143 num_std_codecs = hci_vendor_read_std_codecs(&std_codec_info);
1144 std_codecs_bytes = sizeof(struct bt_hci_std_codecs_v2) +
1145 num_std_codecs * sizeof(struct bt_hci_std_codec_info_v2);
1146 /* read vendor-specific codec information */
1147 num_vs_codecs = hci_vendor_read_vs_codecs(&vs_codec_info);
1148 vs_codecs_bytes = sizeof(struct bt_hci_vs_codecs_v2) +
1149 num_vs_codecs * sizeof(struct bt_hci_vs_codec_info_v2);
1150
1151 /* allocate response packet */
1152 rp = hci_cmd_complete(evt, sizeof(*rp) +
1153 std_codecs_bytes +
1154 vs_codecs_bytes);
1155 rp->status = 0x00;
1156
1157 /* copy standard codec information */
1158 std_codecs = (struct bt_hci_std_codecs_v2 *)&rp->codecs[0];
1159 std_codecs->num_codecs = num_std_codecs;
1160 for (i = 0; i < num_std_codecs; i++) {
1161 struct bt_hci_std_codec_info_v2 *codec;
1162
1163 codec = &std_codecs->codec_info[i];
1164 codec->codec_id = std_codec_info[i].codec_id;
1165 codec->transports = std_codec_info[i].transports;
1166 }
1167
1168 /* copy vendor specific codec information */
1169 vs_codecs = (struct bt_hci_vs_codecs_v2 *)&rp->codecs[std_codecs_bytes];
1170 vs_codecs->num_codecs = num_vs_codecs;
1171 for (i = 0; i < num_vs_codecs; i++) {
1172 struct bt_hci_vs_codec_info_v2 *codec;
1173
1174 codec = &vs_codecs->codec_info[i];
1175 codec->company_id =
1176 sys_cpu_to_le16(vs_codec_info[i].company_id);
1177 codec->codec_id = sys_cpu_to_le16(vs_codec_info[i].codec_id);
1178 codec->transports = vs_codec_info[i].transports;
1179 }
1180 }
1181
hci_vendor_read_codec_capabilities(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t * num_capabilities,size_t * capabilities_bytes,const uint8_t ** capabilities)1182 uint8_t __weak hci_vendor_read_codec_capabilities(uint8_t coding_format,
1183 uint16_t company_id,
1184 uint16_t vs_codec_id,
1185 uint8_t transport,
1186 uint8_t direction,
1187 uint8_t *num_capabilities,
1188 size_t *capabilities_bytes,
1189 const uint8_t **capabilities)
1190 {
1191 ARG_UNUSED(coding_format);
1192 ARG_UNUSED(company_id);
1193 ARG_UNUSED(vs_codec_id);
1194 ARG_UNUSED(transport);
1195 ARG_UNUSED(direction);
1196 ARG_UNUSED(capabilities);
1197
1198 *num_capabilities = 0;
1199 *capabilities_bytes = 0;
1200
1201 /* return status */
1202 return 0x00;
1203 }
1204
read_codec_capabilities(struct net_buf * buf,struct net_buf ** evt)1205 static void read_codec_capabilities(struct net_buf *buf, struct net_buf **evt)
1206 {
1207 struct bt_hci_cp_read_codec_capabilities *cmd = (void *)buf->data;
1208 struct bt_hci_rp_read_codec_capabilities *rp;
1209 const uint8_t *capabilities;
1210 size_t capabilities_bytes;
1211 uint8_t num_capabilities;
1212 uint16_t vs_codec_id;
1213 uint16_t company_id;
1214 uint8_t status;
1215
1216 company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1217 vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1218
1219 /* read codec capabilities */
1220 status = hci_vendor_read_codec_capabilities(cmd->codec_id.coding_format,
1221 company_id,
1222 vs_codec_id,
1223 cmd->transport,
1224 cmd->direction,
1225 &num_capabilities,
1226 &capabilities_bytes,
1227 &capabilities);
1228
1229 /* allocate response packet */
1230 rp = hci_cmd_complete(evt, sizeof(*rp) + capabilities_bytes);
1231 rp->status = status;
1232
1233 /* copy codec capabilities information */
1234 rp->num_capabilities = num_capabilities;
1235 memcpy(&rp->capabilities, capabilities, capabilities_bytes);
1236 }
1237
hci_vendor_read_ctlr_delay(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t codec_config_len,const uint8_t * codec_config,uint32_t * min_delay,uint32_t * max_delay)1238 uint8_t __weak hci_vendor_read_ctlr_delay(uint8_t coding_format,
1239 uint16_t company_id,
1240 uint16_t vs_codec_id,
1241 uint8_t transport,
1242 uint8_t direction,
1243 uint8_t codec_config_len,
1244 const uint8_t *codec_config,
1245 uint32_t *min_delay,
1246 uint32_t *max_delay)
1247 {
1248 ARG_UNUSED(coding_format);
1249 ARG_UNUSED(company_id);
1250 ARG_UNUSED(vs_codec_id);
1251 ARG_UNUSED(transport);
1252 ARG_UNUSED(direction);
1253 ARG_UNUSED(codec_config_len);
1254 ARG_UNUSED(codec_config);
1255
1256 *min_delay = 0;
1257 *max_delay = 0x3D0900; /* 4 seconds, maximum value allowed by spec */
1258
1259 /* return status */
1260 return 0x00;
1261 }
1262
read_ctlr_delay(struct net_buf * buf,struct net_buf ** evt)1263 static void read_ctlr_delay(struct net_buf *buf, struct net_buf **evt)
1264 {
1265 struct bt_hci_cp_read_ctlr_delay *cmd = (void *)buf->data;
1266 struct bt_hci_rp_read_ctlr_delay *rp;
1267 uint16_t vs_codec_id;
1268 uint16_t company_id;
1269 uint32_t min_delay;
1270 uint32_t max_delay;
1271 uint8_t status;
1272
1273 company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1274 vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1275
1276 status = hci_vendor_read_ctlr_delay(cmd->codec_id.coding_format,
1277 company_id,
1278 vs_codec_id,
1279 cmd->transport,
1280 cmd->direction,
1281 cmd->codec_config_len,
1282 cmd->codec_config,
1283 &min_delay,
1284 &max_delay);
1285
1286 rp = hci_cmd_complete(evt, sizeof(*rp));
1287 rp->status = status;
1288 sys_put_le24(min_delay, rp->min_ctlr_delay);
1289 sys_put_le24(max_delay, rp->max_ctlr_delay);
1290 }
1291 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1292
info_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1293 static int info_cmd_handle(uint16_t ocf, struct net_buf *cmd,
1294 struct net_buf **evt)
1295 {
1296 switch (ocf) {
1297 case BT_OCF(BT_HCI_OP_READ_LOCAL_VERSION_INFO):
1298 read_local_version_info(cmd, evt);
1299 break;
1300
1301 case BT_OCF(BT_HCI_OP_READ_SUPPORTED_COMMANDS):
1302 read_supported_commands(cmd, evt);
1303 break;
1304
1305 case BT_OCF(BT_HCI_OP_READ_LOCAL_FEATURES):
1306 read_local_features(cmd, evt);
1307 break;
1308
1309 case BT_OCF(BT_HCI_OP_READ_BD_ADDR):
1310 read_bd_addr(cmd, evt);
1311 break;
1312
1313 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1314 case BT_OCF(BT_HCI_OP_READ_CODECS_V2):
1315 read_codecs_v2(cmd, evt);
1316 break;
1317
1318 case BT_OCF(BT_HCI_OP_READ_CODEC_CAPABILITIES):
1319 read_codec_capabilities(cmd, evt);
1320 break;
1321
1322 case BT_OCF(BT_HCI_OP_READ_CTLR_DELAY):
1323 read_ctlr_delay(cmd, evt);
1324 break;
1325 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1326
1327 default:
1328 return -EINVAL;
1329 }
1330
1331 return 0;
1332 }
1333
1334 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
read_rssi(struct net_buf * buf,struct net_buf ** evt)1335 static void read_rssi(struct net_buf *buf, struct net_buf **evt)
1336 {
1337 struct bt_hci_cp_read_rssi *cmd = (void *)buf->data;
1338 struct bt_hci_rp_read_rssi *rp;
1339 uint16_t handle;
1340
1341 handle = sys_le16_to_cpu(cmd->handle);
1342
1343 rp = hci_cmd_complete(evt, sizeof(*rp));
1344
1345 rp->status = ll_rssi_get(handle, &rp->rssi);
1346
1347 rp->handle = sys_cpu_to_le16(handle);
1348 /* The Link Layer currently returns RSSI as an absolute value */
1349 rp->rssi = (!rp->status) ? -rp->rssi : 127;
1350 }
1351 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1352
status_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1353 static int status_cmd_handle(uint16_t ocf, struct net_buf *cmd,
1354 struct net_buf **evt)
1355 {
1356 switch (ocf) {
1357 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1358 case BT_OCF(BT_HCI_OP_READ_RSSI):
1359 read_rssi(cmd, evt);
1360 break;
1361 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1362
1363 default:
1364 return -EINVAL;
1365 }
1366
1367 return 0;
1368 }
1369
le_set_event_mask(struct net_buf * buf,struct net_buf ** evt)1370 static void le_set_event_mask(struct net_buf *buf, struct net_buf **evt)
1371 {
1372 struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
1373
1374 le_event_mask = sys_get_le64(cmd->events);
1375
1376 *evt = cmd_complete_status(0x00);
1377 }
1378
le_read_buffer_size(struct net_buf * buf,struct net_buf ** evt)1379 static void le_read_buffer_size(struct net_buf *buf, struct net_buf **evt)
1380 {
1381 struct bt_hci_rp_le_read_buffer_size *rp;
1382
1383 rp = hci_cmd_complete(evt, sizeof(*rp));
1384
1385 rp->status = 0x00;
1386
1387 rp->le_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1388 rp->le_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1389 }
1390
1391 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_read_buffer_size_v2(struct net_buf * buf,struct net_buf ** evt)1392 static void le_read_buffer_size_v2(struct net_buf *buf, struct net_buf **evt)
1393 {
1394 struct bt_hci_rp_le_read_buffer_size_v2 *rp;
1395
1396 rp = hci_cmd_complete(evt, sizeof(*rp));
1397
1398 rp->status = 0x00;
1399
1400 rp->acl_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1401 rp->acl_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1402 rp->iso_max_len = sys_cpu_to_le16(BT_CTLR_ISO_TX_BUFFER_SIZE);
1403 rp->iso_max_num = CONFIG_BT_CTLR_ISO_TX_BUFFERS;
1404 }
1405 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1406
le_read_local_features(struct net_buf * buf,struct net_buf ** evt)1407 static void le_read_local_features(struct net_buf *buf, struct net_buf **evt)
1408 {
1409 struct bt_hci_rp_le_read_local_features *rp;
1410
1411 rp = hci_cmd_complete(evt, sizeof(*rp));
1412
1413 rp->status = 0x00;
1414
1415 (void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1416 sys_put_le64(ll_feat_get(), rp->features);
1417 }
1418
le_set_random_address(struct net_buf * buf,struct net_buf ** evt)1419 static void le_set_random_address(struct net_buf *buf, struct net_buf **evt)
1420 {
1421 struct bt_hci_cp_le_set_random_address *cmd = (void *)buf->data;
1422 uint8_t status;
1423
1424 status = ll_addr_set(1, &cmd->bdaddr.val[0]);
1425
1426 *evt = cmd_complete_status(status);
1427 }
1428
1429 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
le_read_fal_size(struct net_buf * buf,struct net_buf ** evt)1430 static void le_read_fal_size(struct net_buf *buf, struct net_buf **evt)
1431 {
1432 struct bt_hci_rp_le_read_fal_size *rp;
1433
1434 rp = hci_cmd_complete(evt, sizeof(*rp));
1435 rp->status = 0x00;
1436
1437 rp->fal_size = ll_fal_size_get();
1438 }
1439
le_clear_fal(struct net_buf * buf,struct net_buf ** evt)1440 static void le_clear_fal(struct net_buf *buf, struct net_buf **evt)
1441 {
1442 uint8_t status;
1443
1444 status = ll_fal_clear();
1445
1446 *evt = cmd_complete_status(status);
1447 }
1448
le_add_dev_to_fal(struct net_buf * buf,struct net_buf ** evt)1449 static void le_add_dev_to_fal(struct net_buf *buf, struct net_buf **evt)
1450 {
1451 struct bt_hci_cp_le_add_dev_to_fal *cmd = (void *)buf->data;
1452 uint8_t status;
1453
1454 status = ll_fal_add(&cmd->addr);
1455
1456 *evt = cmd_complete_status(status);
1457 }
1458
le_rem_dev_from_fal(struct net_buf * buf,struct net_buf ** evt)1459 static void le_rem_dev_from_fal(struct net_buf *buf, struct net_buf **evt)
1460 {
1461 struct bt_hci_cp_le_rem_dev_from_fal *cmd = (void *)buf->data;
1462 uint8_t status;
1463
1464 status = ll_fal_remove(&cmd->addr);
1465
1466 *evt = cmd_complete_status(status);
1467 }
1468 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1469
1470 #if defined(CONFIG_BT_CTLR_CRYPTO)
le_encrypt(struct net_buf * buf,struct net_buf ** evt)1471 static void le_encrypt(struct net_buf *buf, struct net_buf **evt)
1472 {
1473 struct bt_hci_cp_le_encrypt *cmd = (void *)buf->data;
1474 struct bt_hci_rp_le_encrypt *rp;
1475 uint8_t enc_data[16];
1476
1477 ecb_encrypt(cmd->key, cmd->plaintext, enc_data, NULL);
1478
1479 rp = hci_cmd_complete(evt, sizeof(*rp));
1480
1481 rp->status = 0x00;
1482 memcpy(rp->enc_data, enc_data, 16);
1483 }
1484 #endif /* CONFIG_BT_CTLR_CRYPTO */
1485
le_rand(struct net_buf * buf,struct net_buf ** evt)1486 static void le_rand(struct net_buf *buf, struct net_buf **evt)
1487 {
1488 struct bt_hci_rp_le_rand *rp;
1489 uint8_t count = sizeof(rp->rand);
1490
1491 rp = hci_cmd_complete(evt, sizeof(*rp));
1492 rp->status = 0x00;
1493
1494 lll_csrand_get(rp->rand, count);
1495 }
1496
le_read_supp_states(struct net_buf * buf,struct net_buf ** evt)1497 static void le_read_supp_states(struct net_buf *buf, struct net_buf **evt)
1498 {
1499 struct bt_hci_rp_le_read_supp_states *rp;
1500 uint64_t states = 0U;
1501
1502 rp = hci_cmd_complete(evt, sizeof(*rp));
1503 rp->status = 0x00;
1504
1505 #define ST_ADV (BIT64(0) | BIT64(1) | BIT64(8) | BIT64(9) | BIT64(12) | \
1506 BIT64(13) | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1507 BIT64(20) | BIT64(21))
1508
1509 #define ST_SCA (BIT64(4) | BIT64(5) | BIT64(8) | BIT64(9) | BIT64(10) | \
1510 BIT64(11) | BIT64(12) | BIT64(13) | BIT64(14) | BIT64(15) | \
1511 BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(26) | \
1512 BIT64(27) | BIT64(30) | BIT64(31))
1513
1514 #define ST_PER (BIT64(2) | BIT64(3) | BIT64(7) | BIT64(10) | BIT64(11) | \
1515 BIT64(14) | BIT64(15) | BIT64(20) | BIT64(21) | BIT64(26) | \
1516 BIT64(27) | BIT64(29) | BIT64(30) | BIT64(31) | BIT64(32) | \
1517 BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | BIT64(37) | \
1518 BIT64(38) | BIT64(39) | BIT64(40) | BIT64(41))
1519
1520 #define ST_CEN (BIT64(6) | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1521 BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(28) | \
1522 BIT64(32) | BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | \
1523 BIT64(37) | BIT64(41))
1524
1525 #if defined(CONFIG_BT_BROADCASTER)
1526 states |= ST_ADV;
1527 #else
1528 states &= ~ST_ADV;
1529 #endif
1530 #if defined(CONFIG_BT_OBSERVER)
1531 states |= ST_SCA;
1532 #else
1533 states &= ~ST_SCA;
1534 #endif
1535 #if defined(CONFIG_BT_PERIPHERAL)
1536 states |= ST_PER;
1537 #else
1538 states &= ~ST_PER;
1539 #endif
1540 #if defined(CONFIG_BT_CENTRAL)
1541 states |= ST_CEN;
1542 #else
1543 states &= ~ST_CEN;
1544 #endif
1545 /* All states and combinations supported except:
1546 * Initiating State + Passive Scanning
1547 * Initiating State + Active Scanning
1548 */
1549 states &= ~(BIT64(22) | BIT64(23));
1550 LOG_DBG("states: 0x%08x%08x", (uint32_t)(states >> 32), (uint32_t)(states & 0xffffffff));
1551 sys_put_le64(states, rp->le_states);
1552 }
1553
1554 #if defined(CONFIG_BT_BROADCASTER)
le_set_adv_param(struct net_buf * buf,struct net_buf ** evt)1555 static void le_set_adv_param(struct net_buf *buf, struct net_buf **evt)
1556 {
1557 struct bt_hci_cp_le_set_adv_param *cmd = (void *)buf->data;
1558 uint16_t min_interval;
1559 uint8_t status;
1560
1561 if (adv_cmds_legacy_check(evt)) {
1562 return;
1563 }
1564
1565 min_interval = sys_le16_to_cpu(cmd->min_interval);
1566
1567 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
1568 (cmd->type != BT_HCI_ADV_DIRECT_IND)) {
1569 uint16_t max_interval = sys_le16_to_cpu(cmd->max_interval);
1570
1571 if ((min_interval > max_interval) ||
1572 (min_interval < 0x0020) ||
1573 (max_interval > 0x4000)) {
1574 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
1575 return;
1576 }
1577 }
1578
1579 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1580 status = ll_adv_params_set(0, 0, min_interval, cmd->type,
1581 cmd->own_addr_type, cmd->direct_addr.type,
1582 &cmd->direct_addr.a.val[0], cmd->channel_map,
1583 cmd->filter_policy, 0, 0, 0, 0, 0, 0);
1584 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1585 status = ll_adv_params_set(min_interval, cmd->type,
1586 cmd->own_addr_type, cmd->direct_addr.type,
1587 &cmd->direct_addr.a.val[0], cmd->channel_map,
1588 cmd->filter_policy);
1589 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1590
1591 *evt = cmd_complete_status(status);
1592 }
1593
le_read_adv_chan_tx_power(struct net_buf * buf,struct net_buf ** evt)1594 static void le_read_adv_chan_tx_power(struct net_buf *buf, struct net_buf **evt)
1595 {
1596 struct bt_hci_rp_le_read_chan_tx_power *rp;
1597
1598 if (adv_cmds_legacy_check(evt)) {
1599 return;
1600 }
1601
1602 rp = hci_cmd_complete(evt, sizeof(*rp));
1603
1604 rp->status = 0x00;
1605
1606 rp->tx_power_level = 0;
1607 }
1608
le_set_adv_data(struct net_buf * buf,struct net_buf ** evt)1609 static void le_set_adv_data(struct net_buf *buf, struct net_buf **evt)
1610 {
1611 struct bt_hci_cp_le_set_adv_data *cmd = (void *)buf->data;
1612 uint8_t status;
1613
1614 if (adv_cmds_legacy_check(evt)) {
1615 return;
1616 }
1617
1618 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1619 status = ll_adv_data_set(0, cmd->len, &cmd->data[0]);
1620 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1621 status = ll_adv_data_set(cmd->len, &cmd->data[0]);
1622 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1623
1624 *evt = cmd_complete_status(status);
1625 }
1626
le_set_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)1627 static void le_set_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
1628 {
1629 struct bt_hci_cp_le_set_scan_rsp_data *cmd = (void *)buf->data;
1630 uint8_t status;
1631
1632 if (adv_cmds_legacy_check(evt)) {
1633 return;
1634 }
1635
1636 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1637 status = ll_adv_scan_rsp_set(0, cmd->len, &cmd->data[0]);
1638 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1639 status = ll_adv_scan_rsp_set(cmd->len, &cmd->data[0]);
1640 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1641
1642 *evt = cmd_complete_status(status);
1643 }
1644
le_set_adv_enable(struct net_buf * buf,struct net_buf ** evt)1645 static void le_set_adv_enable(struct net_buf *buf, struct net_buf **evt)
1646 {
1647 struct bt_hci_cp_le_set_adv_enable *cmd = (void *)buf->data;
1648 uint8_t status;
1649
1650 if (adv_cmds_legacy_check(evt)) {
1651 return;
1652 }
1653
1654 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
1655 #if defined(CONFIG_BT_HCI_MESH_EXT)
1656 status = ll_adv_enable(0, cmd->enable, 0, 0, 0, 0, 0);
1657 #else /* !CONFIG_BT_HCI_MESH_EXT */
1658 status = ll_adv_enable(0, cmd->enable, 0, 0);
1659 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1660 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1661 status = ll_adv_enable(cmd->enable);
1662 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1663
1664 *evt = cmd_complete_status(status);
1665 }
1666
1667 #if defined(CONFIG_BT_CTLR_ADV_ISO)
le_create_big(struct net_buf * buf,struct net_buf ** evt)1668 static void le_create_big(struct net_buf *buf, struct net_buf **evt)
1669 {
1670 struct bt_hci_cp_le_create_big *cmd = (void *)buf->data;
1671 uint32_t sdu_interval;
1672 uint16_t max_latency;
1673 uint8_t big_handle;
1674 uint8_t adv_handle;
1675 uint16_t max_sdu;
1676 uint8_t status;
1677
1678 status = ll_adv_iso_by_hci_handle_new(cmd->big_handle, &big_handle);
1679 if (status) {
1680 *evt = cmd_status(status);
1681 return;
1682 }
1683
1684 status = ll_adv_set_by_hci_handle_get(cmd->adv_handle, &adv_handle);
1685 if (status) {
1686 *evt = cmd_status(status);
1687 return;
1688 }
1689
1690 sdu_interval = sys_get_le24(cmd->sdu_interval);
1691 max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1692 max_latency = sys_le16_to_cpu(cmd->max_latency);
1693
1694 status = ll_big_create(big_handle, adv_handle, cmd->num_bis,
1695 sdu_interval, max_sdu, max_latency, cmd->rtn,
1696 cmd->phy, cmd->packing, cmd->framing,
1697 cmd->encryption, cmd->bcode);
1698
1699 *evt = cmd_status(status);
1700 }
1701
le_create_big_test(struct net_buf * buf,struct net_buf ** evt)1702 static void le_create_big_test(struct net_buf *buf, struct net_buf **evt)
1703 {
1704 struct bt_hci_cp_le_create_big_test *cmd = (void *)buf->data;
1705 uint32_t sdu_interval;
1706 uint16_t iso_interval;
1707 uint16_t max_sdu;
1708 uint16_t max_pdu;
1709 uint8_t status;
1710
1711 sdu_interval = sys_get_le24(cmd->sdu_interval);
1712 iso_interval = sys_le16_to_cpu(cmd->iso_interval);
1713 max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1714 max_pdu = sys_le16_to_cpu(cmd->max_pdu);
1715
1716 status = ll_big_test_create(cmd->big_handle, cmd->adv_handle,
1717 cmd->num_bis, sdu_interval, iso_interval,
1718 cmd->nse, max_sdu, max_pdu, cmd->phy,
1719 cmd->packing, cmd->framing, cmd->bn,
1720 cmd->irc, cmd->pto, cmd->encryption,
1721 cmd->bcode);
1722
1723 *evt = cmd_status(status);
1724 }
1725
le_terminate_big(struct net_buf * buf,struct net_buf ** evt)1726 static void le_terminate_big(struct net_buf *buf, struct net_buf **evt)
1727 {
1728 struct bt_hci_cp_le_terminate_big *cmd = (void *)buf->data;
1729 uint8_t status;
1730
1731 status = ll_big_terminate(cmd->big_handle, cmd->reason);
1732
1733 *evt = cmd_status(status);
1734 }
1735 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1736 #endif /* CONFIG_BT_BROADCASTER */
1737
1738 #if defined(CONFIG_BT_OBSERVER)
le_set_scan_param(struct net_buf * buf,struct net_buf ** evt)1739 static void le_set_scan_param(struct net_buf *buf, struct net_buf **evt)
1740 {
1741 struct bt_hci_cp_le_set_scan_param *cmd = (void *)buf->data;
1742 uint16_t interval;
1743 uint16_t window;
1744 uint8_t status;
1745
1746 if (adv_cmds_legacy_check(evt)) {
1747 return;
1748 }
1749
1750 interval = sys_le16_to_cpu(cmd->interval);
1751 window = sys_le16_to_cpu(cmd->window);
1752
1753 status = ll_scan_params_set(cmd->scan_type, interval, window,
1754 cmd->addr_type, cmd->filter_policy);
1755
1756 *evt = cmd_complete_status(status);
1757 }
1758
le_set_scan_enable(struct net_buf * buf,struct net_buf ** evt)1759 static void le_set_scan_enable(struct net_buf *buf, struct net_buf **evt)
1760 {
1761 struct bt_hci_cp_le_set_scan_enable *cmd = (void *)buf->data;
1762 uint8_t status;
1763
1764 if (adv_cmds_legacy_check(evt)) {
1765 return;
1766 }
1767
1768 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
1769 /* Initialize duplicate filtering */
1770 if (cmd->enable && cmd->filter_dup) {
1771 if (0) {
1772
1773 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1774 } else if (dup_count == DUP_FILTER_DISABLED) {
1775 dup_scan = true;
1776
1777 /* All entries reset */
1778 dup_count = 0;
1779 dup_curr = 0U;
1780 } else if (!dup_scan) {
1781 dup_scan = true;
1782 dup_ext_adv_reset();
1783 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1784
1785 } else {
1786 /* All entries reset */
1787 dup_count = 0;
1788 dup_curr = 0U;
1789 }
1790 } else {
1791 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1792 dup_scan = false;
1793 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1794 dup_count = DUP_FILTER_DISABLED;
1795 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1796 }
1797 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
1798
1799 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1800 status = ll_scan_enable(cmd->enable, 0, 0);
1801 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1802 status = ll_scan_enable(cmd->enable);
1803 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1804
1805 /* NOTE: As filter duplicates is implemented here in HCI source code,
1806 * enabling of already enabled scanning shall succeed after
1807 * updates to filter duplicates is handled in the above
1808 * statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
1809 */
1810 if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
1811 (status == BT_HCI_ERR_CMD_DISALLOWED)) {
1812 status = BT_HCI_ERR_SUCCESS;
1813 }
1814
1815 *evt = cmd_complete_status(status);
1816 }
1817
1818 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
le_big_create_sync(struct net_buf * buf,struct net_buf ** evt)1819 static void le_big_create_sync(struct net_buf *buf, struct net_buf **evt)
1820 {
1821 struct bt_hci_cp_le_big_create_sync *cmd = (void *)buf->data;
1822 uint8_t status;
1823 uint16_t sync_handle;
1824 uint16_t sync_timeout;
1825
1826 sync_handle = sys_le16_to_cpu(cmd->sync_handle);
1827 sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
1828
1829 status = ll_big_sync_create(cmd->big_handle, sync_handle,
1830 cmd->encryption, cmd->bcode, cmd->mse,
1831 sync_timeout, cmd->num_bis, cmd->bis);
1832
1833 *evt = cmd_status(status);
1834 }
1835
1836
le_big_terminate_sync(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1837 static void le_big_terminate_sync(struct net_buf *buf, struct net_buf **evt,
1838 void **node_rx)
1839 {
1840 struct bt_hci_cp_le_big_terminate_sync *cmd = (void *)buf->data;
1841 struct bt_hci_rp_le_big_terminate_sync *rp;
1842 uint8_t big_handle;
1843 uint8_t status;
1844
1845 big_handle = cmd->big_handle;
1846 status = ll_big_sync_terminate(big_handle, node_rx);
1847
1848 rp = hci_cmd_complete(evt, sizeof(*rp));
1849 rp->status = status;
1850 rp->big_handle = big_handle;
1851 }
1852 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1853 #endif /* CONFIG_BT_OBSERVER */
1854
1855 #if defined(CONFIG_BT_CENTRAL)
check_cconn_params(bool ext,uint16_t scan_interval,uint16_t scan_window,uint16_t conn_interval_max,uint16_t conn_latency,uint16_t supervision_timeout)1856 static uint8_t check_cconn_params(bool ext, uint16_t scan_interval,
1857 uint16_t scan_window,
1858 uint16_t conn_interval_max,
1859 uint16_t conn_latency,
1860 uint16_t supervision_timeout)
1861 {
1862 if (scan_interval < 0x0004 || scan_window < 0x0004 ||
1863 (!ext && (scan_interval > 0x4000 || scan_window > 0x4000))) {
1864 return BT_HCI_ERR_INVALID_PARAM;
1865 }
1866
1867 if (conn_interval_max < 0x0006 || conn_interval_max > 0x0C80) {
1868 return BT_HCI_ERR_INVALID_PARAM;
1869 }
1870
1871 if (conn_latency > 0x01F3) {
1872 return BT_HCI_ERR_INVALID_PARAM;
1873 }
1874
1875 if (supervision_timeout < 0x000A || supervision_timeout > 0x0C80) {
1876 return BT_HCI_ERR_INVALID_PARAM;
1877 }
1878
1879 /* sto * 10ms > (1 + lat) * ci * 1.25ms * 2
1880 * sto * 10 > (1 + lat) * ci * 2.5
1881 * sto * 2 > (1 + lat) * ci * 0.5
1882 * sto * 4 > (1 + lat) * ci
1883 */
1884 if ((supervision_timeout << 2) <= ((1 + conn_latency) *
1885 conn_interval_max)) {
1886 return BT_HCI_ERR_INVALID_PARAM;
1887 }
1888
1889 return 0;
1890 }
1891
le_create_connection(struct net_buf * buf,struct net_buf ** evt)1892 static void le_create_connection(struct net_buf *buf, struct net_buf **evt)
1893 {
1894 struct bt_hci_cp_le_create_conn *cmd = (void *)buf->data;
1895 uint16_t supervision_timeout;
1896 uint16_t conn_interval_max;
1897 uint16_t scan_interval;
1898 uint16_t conn_latency;
1899 uint16_t scan_window;
1900 uint8_t status;
1901
1902 if (adv_cmds_legacy_check(NULL)) {
1903 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
1904 return;
1905 }
1906
1907 scan_interval = sys_le16_to_cpu(cmd->scan_interval);
1908 scan_window = sys_le16_to_cpu(cmd->scan_window);
1909 conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
1910 conn_latency = sys_le16_to_cpu(cmd->conn_latency);
1911 supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
1912
1913 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
1914 status = check_cconn_params(false, scan_interval,
1915 scan_window,
1916 conn_interval_max,
1917 conn_latency,
1918 supervision_timeout);
1919 if (status) {
1920 *evt = cmd_status(status);
1921 return;
1922 }
1923 }
1924
1925 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1926 status = ll_create_connection(scan_interval, scan_window,
1927 cmd->filter_policy,
1928 cmd->peer_addr.type,
1929 &cmd->peer_addr.a.val[0],
1930 cmd->own_addr_type, conn_interval_max,
1931 conn_latency, supervision_timeout,
1932 PHY_LEGACY);
1933 if (status) {
1934 *evt = cmd_status(status);
1935 return;
1936 }
1937
1938 status = ll_connect_enable(0U);
1939
1940 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1941 status = ll_create_connection(scan_interval, scan_window,
1942 cmd->filter_policy,
1943 cmd->peer_addr.type,
1944 &cmd->peer_addr.a.val[0],
1945 cmd->own_addr_type, conn_interval_max,
1946 conn_latency, supervision_timeout);
1947 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1948
1949 *evt = cmd_status(status);
1950 }
1951
le_create_conn_cancel(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1952 static void le_create_conn_cancel(struct net_buf *buf, struct net_buf **evt,
1953 void **node_rx)
1954 {
1955 uint8_t status;
1956
1957 status = ll_connect_disable(node_rx);
1958
1959 *evt = cmd_complete_status(status);
1960 }
1961
le_set_host_chan_classif(struct net_buf * buf,struct net_buf ** evt)1962 static void le_set_host_chan_classif(struct net_buf *buf, struct net_buf **evt)
1963 {
1964 struct bt_hci_cp_le_set_host_chan_classif *cmd = (void *)buf->data;
1965 uint8_t status;
1966
1967 status = ll_chm_update(&cmd->ch_map[0]);
1968
1969 *evt = cmd_complete_status(status);
1970 }
1971
1972 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_start_encryption(struct net_buf * buf,struct net_buf ** evt)1973 static void le_start_encryption(struct net_buf *buf, struct net_buf **evt)
1974 {
1975 struct bt_hci_cp_le_start_encryption *cmd = (void *)buf->data;
1976 uint16_t handle;
1977 uint8_t status;
1978
1979 handle = sys_le16_to_cpu(cmd->handle);
1980 status = ll_enc_req_send(handle,
1981 (uint8_t *)&cmd->rand,
1982 (uint8_t *)&cmd->ediv,
1983 &cmd->ltk[0]);
1984
1985 *evt = cmd_status(status);
1986 }
1987 #endif /* CONFIG_BT_CTLR_LE_ENC */
1988
1989 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
le_set_cig_parameters(struct net_buf * buf,struct net_buf ** evt)1990 static void le_set_cig_parameters(struct net_buf *buf, struct net_buf **evt)
1991 {
1992 struct bt_hci_cp_le_set_cig_params *cmd = (void *)buf->data;
1993 struct bt_hci_rp_le_set_cig_params *rp;
1994 uint32_t c_interval;
1995 uint32_t p_interval;
1996 uint16_t c_latency;
1997 uint16_t p_latency;
1998 uint8_t cis_count;
1999 uint8_t cig_id;
2000 uint8_t status;
2001 uint8_t i;
2002
2003 c_interval = sys_get_le24(cmd->c_interval);
2004 p_interval = sys_get_le24(cmd->p_interval);
2005 c_latency = sys_le16_to_cpu(cmd->c_latency);
2006 p_latency = sys_le16_to_cpu(cmd->p_latency);
2007
2008 cig_id = cmd->cig_id;
2009 cis_count = cmd->num_cis;
2010
2011 /* Create CIG or start modifying existing CIG */
2012 status = ll_cig_parameters_open(cig_id, c_interval, p_interval,
2013 cmd->sca, cmd->packing, cmd->framing,
2014 c_latency, p_latency, cis_count);
2015
2016 /* Configure individual CISes */
2017 for (i = 0; !status && i < cis_count; i++) {
2018 struct bt_hci_cis_params *params = &cmd->cis[i];
2019 uint16_t c_sdu;
2020 uint16_t p_sdu;
2021
2022 c_sdu = sys_le16_to_cpu(params->c_sdu);
2023 p_sdu = sys_le16_to_cpu(params->p_sdu);
2024
2025 status = ll_cis_parameters_set(params->cis_id, c_sdu, p_sdu,
2026 params->c_phy, params->p_phy,
2027 params->c_rtn, params->p_rtn);
2028 }
2029
2030 rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2031 rp->cig_id = cig_id;
2032
2033 /* Only apply parameters if all went well */
2034 if (!status) {
2035 uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2036
2037 status = ll_cig_parameters_commit(cig_id, handles);
2038
2039 if (status == BT_HCI_ERR_SUCCESS) {
2040 for (uint8_t j = 0; j < cis_count; j++) {
2041 rp->handle[j] = sys_cpu_to_le16(handles[j]);
2042 }
2043 }
2044 }
2045
2046 rp->num_handles = status ? 0U : cis_count;
2047 rp->status = status;
2048 }
2049
le_set_cig_params_test(struct net_buf * buf,struct net_buf ** evt)2050 static void le_set_cig_params_test(struct net_buf *buf, struct net_buf **evt)
2051 {
2052 struct bt_hci_cp_le_set_cig_params_test *cmd = (void *)buf->data;
2053 struct bt_hci_rp_le_set_cig_params_test *rp;
2054
2055 uint32_t c_interval;
2056 uint32_t p_interval;
2057 uint16_t iso_interval;
2058 uint8_t cis_count;
2059 uint8_t cig_id;
2060 uint8_t status;
2061 uint8_t i;
2062
2063 c_interval = sys_get_le24(cmd->c_interval);
2064 p_interval = sys_get_le24(cmd->p_interval);
2065 iso_interval = sys_le16_to_cpu(cmd->iso_interval);
2066
2067 cig_id = cmd->cig_id;
2068 cis_count = cmd->num_cis;
2069
2070 /* Create CIG or start modifying existing CIG */
2071 status = ll_cig_parameters_test_open(cig_id, c_interval,
2072 p_interval, cmd->c_ft,
2073 cmd->p_ft, iso_interval,
2074 cmd->sca, cmd->packing,
2075 cmd->framing,
2076 cis_count);
2077
2078 /* Configure individual CISes */
2079 for (i = 0; !status && i < cis_count; i++) {
2080 struct bt_hci_cis_params_test *params = &cmd->cis[i];
2081 uint16_t c_sdu;
2082 uint16_t p_sdu;
2083 uint16_t c_pdu;
2084 uint16_t p_pdu;
2085 uint8_t nse;
2086
2087 nse = params->nse;
2088 c_sdu = sys_le16_to_cpu(params->c_sdu);
2089 p_sdu = sys_le16_to_cpu(params->p_sdu);
2090 c_pdu = sys_le16_to_cpu(params->c_pdu);
2091 p_pdu = sys_le16_to_cpu(params->p_pdu);
2092
2093 status = ll_cis_parameters_test_set(params->cis_id, nse,
2094 c_sdu, p_sdu,
2095 c_pdu, p_pdu,
2096 params->c_phy,
2097 params->p_phy,
2098 params->c_bn,
2099 params->p_bn);
2100 }
2101
2102 rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2103 rp->cig_id = cig_id;
2104
2105 /* Only apply parameters if all went well */
2106 if (!status) {
2107 uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2108
2109 status = ll_cig_parameters_commit(cig_id, handles);
2110
2111 if (status == BT_HCI_ERR_SUCCESS) {
2112 for (uint8_t j = 0; j < cis_count; j++) {
2113 rp->handle[j] = sys_cpu_to_le16(handles[j]);
2114 }
2115 }
2116 }
2117
2118 rp->num_handles = status ? 0U : cis_count;
2119 rp->status = status;
2120 }
2121
le_create_cis(struct net_buf * buf,struct net_buf ** evt)2122 static void le_create_cis(struct net_buf *buf, struct net_buf **evt)
2123 {
2124 uint16_t handle_used[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP] = {0};
2125 struct bt_hci_cp_le_create_cis *cmd = (void *)buf->data;
2126 uint8_t status;
2127 uint8_t i;
2128
2129 /*
2130 * Only create a CIS if the Isochronous Channels (Host Support) feature bit
2131 * is set. Refer to BT Spec v5.4 Vol 6 Part B Section 4.6.33.1.
2132 */
2133 if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS))) {
2134 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2135 return;
2136 }
2137
2138 /*
2139 * Creating new CISes is disallowed until all previous CIS
2140 * established events have been generated
2141 */
2142 if (cis_pending_count) {
2143 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2144 return;
2145 }
2146
2147 /* Check all handles before actually starting to create CISes */
2148 status = 0x00;
2149 for (i = 0; !status && i < cmd->num_cis; i++) {
2150 uint16_t cis_handle;
2151 uint16_t acl_handle;
2152 uint8_t cis_idx;
2153
2154 cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2155 acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2156
2157 cis_idx = LL_CIS_IDX_FROM_HANDLE(cis_handle);
2158 if (handle_used[cis_idx]) {
2159 /* Handle must be unique in request */
2160 status = BT_HCI_ERR_INVALID_PARAM;
2161 break;
2162 }
2163
2164 handle_used[cis_idx]++;
2165 status = ll_cis_create_check(cis_handle, acl_handle);
2166 }
2167
2168 if (status) {
2169 *evt = cmd_status(status);
2170 return;
2171 }
2172
2173 /*
2174 * Actually create CISes, any errors are to be reported
2175 * through CIS established events
2176 */
2177 cis_pending_count = cmd->num_cis;
2178 for (i = 0; i < cmd->num_cis; i++) {
2179 uint16_t cis_handle;
2180 uint16_t acl_handle;
2181
2182 cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2183 acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2184 ll_cis_create(cis_handle, acl_handle);
2185 }
2186
2187 *evt = cmd_status(status);
2188 }
2189
le_remove_cig(struct net_buf * buf,struct net_buf ** evt)2190 static void le_remove_cig(struct net_buf *buf, struct net_buf **evt)
2191 {
2192 struct bt_hci_cp_le_remove_cig *cmd = (void *)buf->data;
2193 struct bt_hci_rp_le_remove_cig *rp;
2194 uint8_t status;
2195
2196 status = ll_cig_remove(cmd->cig_id);
2197
2198 rp = hci_cmd_complete(evt, sizeof(*rp));
2199 rp->status = status;
2200 rp->cig_id = cmd->cig_id;
2201 }
2202 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
2203
2204 #endif /* CONFIG_BT_CENTRAL */
2205
2206 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_transmit_test(struct net_buf * buf,struct net_buf ** evt)2207 static void le_iso_transmit_test(struct net_buf *buf, struct net_buf **evt)
2208 {
2209 struct bt_hci_cp_le_iso_transmit_test *cmd = (void *)buf->data;
2210 struct bt_hci_rp_le_iso_transmit_test *rp;
2211 uint16_t handle;
2212 uint8_t status;
2213
2214 handle = sys_le16_to_cpu(cmd->handle);
2215
2216 status = ll_iso_transmit_test(handle, cmd->payload_type);
2217
2218 rp = hci_cmd_complete(evt, sizeof(*rp));
2219 rp->status = status;
2220 rp->handle = sys_cpu_to_le16(handle);
2221 }
2222
le_read_iso_tx_sync(struct net_buf * buf,struct net_buf ** evt)2223 static void le_read_iso_tx_sync(struct net_buf *buf, struct net_buf **evt)
2224 {
2225 struct bt_hci_cp_le_read_iso_tx_sync *cmd = (void *)buf->data;
2226 struct bt_hci_rp_le_read_iso_tx_sync *rp;
2227 uint16_t handle_le16;
2228 uint32_t timestamp;
2229 uint32_t offset;
2230 uint16_t handle;
2231 uint8_t status;
2232 uint16_t seq;
2233
2234 handle_le16 = cmd->handle;
2235 handle = sys_le16_to_cpu(handle_le16);
2236
2237 status = ll_read_iso_tx_sync(handle, &seq, ×tamp, &offset);
2238
2239 rp = hci_cmd_complete(evt, sizeof(*rp));
2240 rp->status = status;
2241 rp->handle = handle_le16;
2242 rp->seq = sys_cpu_to_le16(seq);
2243 rp->timestamp = sys_cpu_to_le32(timestamp);
2244 sys_put_le24(offset, rp->offset);
2245 }
2246 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2247
2248 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_receive_test(struct net_buf * buf,struct net_buf ** evt)2249 static void le_iso_receive_test(struct net_buf *buf, struct net_buf **evt)
2250 {
2251 struct bt_hci_cp_le_iso_receive_test *cmd = (void *)buf->data;
2252 struct bt_hci_rp_le_iso_receive_test *rp;
2253 uint16_t handle;
2254 uint8_t status;
2255
2256 handle = sys_le16_to_cpu(cmd->handle);
2257
2258 status = ll_iso_receive_test(handle, cmd->payload_type);
2259
2260 rp = hci_cmd_complete(evt, sizeof(*rp));
2261 rp->status = status;
2262 rp->handle = sys_cpu_to_le16(handle);
2263 }
2264
le_iso_read_test_counters(struct net_buf * buf,struct net_buf ** evt)2265 static void le_iso_read_test_counters(struct net_buf *buf, struct net_buf **evt)
2266 {
2267 struct bt_hci_cp_le_read_test_counters *cmd = (void *)buf->data;
2268 struct bt_hci_rp_le_read_test_counters *rp;
2269 uint32_t received_cnt;
2270 uint32_t missed_cnt;
2271 uint32_t failed_cnt;
2272 uint16_t handle;
2273 uint8_t status;
2274
2275 handle = sys_le16_to_cpu(cmd->handle);
2276 status = ll_iso_read_test_counters(handle, &received_cnt,
2277 &missed_cnt, &failed_cnt);
2278
2279 rp = hci_cmd_complete(evt, sizeof(*rp));
2280 rp->status = status;
2281 rp->handle = sys_cpu_to_le16(handle);
2282 rp->received_cnt = sys_cpu_to_le32(received_cnt);
2283 rp->missed_cnt = sys_cpu_to_le32(missed_cnt);
2284 rp->failed_cnt = sys_cpu_to_le32(failed_cnt);
2285 }
2286
2287 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
le_read_iso_link_quality(struct net_buf * buf,struct net_buf ** evt)2288 static void le_read_iso_link_quality(struct net_buf *buf, struct net_buf **evt)
2289 {
2290 struct bt_hci_cp_le_read_iso_link_quality *cmd = (void *)buf->data;
2291 struct bt_hci_rp_le_read_iso_link_quality *rp;
2292 uint32_t tx_last_subevent_packets;
2293 uint32_t retransmitted_packets;
2294 uint32_t rx_unreceived_packets;
2295 uint32_t tx_unacked_packets;
2296 uint32_t tx_flushed_packets;
2297 uint32_t crc_error_packets;
2298 uint32_t duplicate_packets;
2299 uint16_t handle_le16;
2300 uint16_t handle;
2301 uint8_t status;
2302
2303 handle_le16 = cmd->handle;
2304 handle = sys_le16_to_cpu(handle_le16);
2305 status = ll_read_iso_link_quality(handle, &tx_unacked_packets,
2306 &tx_flushed_packets,
2307 &tx_last_subevent_packets,
2308 &retransmitted_packets,
2309 &crc_error_packets,
2310 &rx_unreceived_packets,
2311 &duplicate_packets);
2312
2313 rp = hci_cmd_complete(evt, sizeof(*rp));
2314 rp->status = status;
2315 rp->handle = handle_le16;
2316 rp->tx_unacked_packets = sys_cpu_to_le32(tx_unacked_packets);
2317 rp->tx_flushed_packets = sys_cpu_to_le32(tx_flushed_packets);
2318 rp->tx_last_subevent_packets =
2319 sys_cpu_to_le32(tx_last_subevent_packets);
2320 rp->retransmitted_packets = sys_cpu_to_le32(retransmitted_packets);
2321 rp->crc_error_packets = sys_cpu_to_le32(crc_error_packets);
2322 rp->rx_unreceived_packets = sys_cpu_to_le32(rx_unreceived_packets);
2323 rp->duplicate_packets = sys_cpu_to_le32(duplicate_packets);
2324 }
2325 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
2326
2327 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
2328
2329 #if defined(CONFIG_BT_CTLR_ISO)
le_setup_iso_path(struct net_buf * buf,struct net_buf ** evt)2330 static void le_setup_iso_path(struct net_buf *buf, struct net_buf **evt)
2331 {
2332 struct bt_hci_cp_le_setup_iso_path *cmd = (void *)buf->data;
2333 struct bt_hci_rp_le_setup_iso_path *rp;
2334 uint32_t controller_delay;
2335 uint8_t *codec_config;
2336 uint8_t coding_format;
2337 uint16_t vs_codec_id;
2338 uint16_t company_id;
2339 uint16_t handle;
2340 uint8_t status;
2341
2342 handle = sys_le16_to_cpu(cmd->handle);
2343 coding_format = cmd->codec_id.coding_format;
2344 company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
2345 vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
2346 controller_delay = sys_get_le24(cmd->controller_delay);
2347 codec_config = &cmd->codec_config[0];
2348
2349 status = ll_setup_iso_path(handle, cmd->path_dir, cmd->path_id,
2350 coding_format, company_id, vs_codec_id,
2351 controller_delay, cmd->codec_config_len,
2352 codec_config);
2353
2354 rp = hci_cmd_complete(evt, sizeof(*rp));
2355 rp->status = status;
2356 rp->handle = sys_cpu_to_le16(handle);
2357 }
2358
le_remove_iso_path(struct net_buf * buf,struct net_buf ** evt)2359 static void le_remove_iso_path(struct net_buf *buf, struct net_buf **evt)
2360 {
2361 struct bt_hci_cp_le_remove_iso_path *cmd = (void *)buf->data;
2362 struct bt_hci_rp_le_remove_iso_path *rp;
2363 uint16_t handle;
2364 uint8_t status;
2365
2366 handle = sys_le16_to_cpu(cmd->handle);
2367
2368 status = ll_remove_iso_path(handle, cmd->path_dir);
2369
2370 rp = hci_cmd_complete(evt, sizeof(*rp));
2371 rp->status = status;
2372 rp->handle = sys_cpu_to_le16(handle);
2373 }
2374
le_iso_test_end(struct net_buf * buf,struct net_buf ** evt)2375 static void le_iso_test_end(struct net_buf *buf, struct net_buf **evt)
2376 {
2377 struct bt_hci_cp_le_iso_test_end *cmd = (void *)buf->data;
2378 struct bt_hci_rp_le_iso_test_end *rp;
2379 uint32_t received_cnt;
2380 uint32_t missed_cnt;
2381 uint32_t failed_cnt;
2382 uint16_t handle;
2383 uint8_t status;
2384
2385 handle = sys_le16_to_cpu(cmd->handle);
2386 status = ll_iso_test_end(handle, &received_cnt, &missed_cnt,
2387 &failed_cnt);
2388
2389 rp = hci_cmd_complete(evt, sizeof(*rp));
2390 rp->status = status;
2391 rp->handle = sys_cpu_to_le16(handle);
2392 rp->received_cnt = sys_cpu_to_le32(received_cnt);
2393 rp->missed_cnt = sys_cpu_to_le32(missed_cnt);
2394 rp->failed_cnt = sys_cpu_to_le32(failed_cnt);
2395 }
2396 #endif /* CONFIG_BT_CTLR_ISO */
2397
2398 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
le_set_host_feature(struct net_buf * buf,struct net_buf ** evt)2399 static void le_set_host_feature(struct net_buf *buf, struct net_buf **evt)
2400 {
2401 struct bt_hci_cp_le_set_host_feature *cmd = (void *)buf->data;
2402 struct bt_hci_rp_le_set_host_feature *rp;
2403 uint8_t status;
2404
2405 status = ll_set_host_feature(cmd->bit_number, cmd->bit_value);
2406
2407 rp = hci_cmd_complete(evt, sizeof(*rp));
2408 rp->status = status;
2409 }
2410 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
2411
2412 #if defined(CONFIG_BT_PERIPHERAL)
2413 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_ltk_req_reply(struct net_buf * buf,struct net_buf ** evt)2414 static void le_ltk_req_reply(struct net_buf *buf, struct net_buf **evt)
2415 {
2416 struct bt_hci_cp_le_ltk_req_reply *cmd = (void *)buf->data;
2417 struct bt_hci_rp_le_ltk_req_reply *rp;
2418 uint16_t handle;
2419 uint8_t status;
2420
2421 handle = sys_le16_to_cpu(cmd->handle);
2422 status = ll_start_enc_req_send(handle, 0x00, &cmd->ltk[0]);
2423
2424 rp = hci_cmd_complete(evt, sizeof(*rp));
2425 rp->status = status;
2426 rp->handle = sys_cpu_to_le16(handle);
2427 }
2428
le_ltk_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2429 static void le_ltk_req_neg_reply(struct net_buf *buf, struct net_buf **evt)
2430 {
2431 struct bt_hci_cp_le_ltk_req_neg_reply *cmd = (void *)buf->data;
2432 struct bt_hci_rp_le_ltk_req_neg_reply *rp;
2433 uint16_t handle;
2434 uint8_t status;
2435
2436 handle = sys_le16_to_cpu(cmd->handle);
2437 status = ll_start_enc_req_send(handle, BT_HCI_ERR_PIN_OR_KEY_MISSING,
2438 NULL);
2439
2440 rp = hci_cmd_complete(evt, sizeof(*rp));
2441 rp->status = status;
2442 rp->handle = sys_le16_to_cpu(handle);
2443 }
2444 #endif /* CONFIG_BT_CTLR_LE_ENC */
2445
2446 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
le_accept_cis(struct net_buf * buf,struct net_buf ** evt)2447 static void le_accept_cis(struct net_buf *buf, struct net_buf **evt)
2448 {
2449 struct bt_hci_cp_le_accept_cis *cmd = (void *)buf->data;
2450 uint16_t handle;
2451 uint8_t status;
2452
2453 handle = sys_le16_to_cpu(cmd->handle);
2454 status = ll_cis_accept(handle);
2455 *evt = cmd_status(status);
2456 }
2457
le_reject_cis(struct net_buf * buf,struct net_buf ** evt)2458 static void le_reject_cis(struct net_buf *buf, struct net_buf **evt)
2459 {
2460 struct bt_hci_cp_le_reject_cis *cmd = (void *)buf->data;
2461 struct bt_hci_rp_le_reject_cis *rp;
2462 uint16_t handle;
2463 uint8_t status;
2464
2465 handle = sys_le16_to_cpu(cmd->handle);
2466 status = ll_cis_reject(handle, cmd->reason);
2467
2468 rp = hci_cmd_complete(evt, sizeof(*rp));
2469 rp->status = status;
2470 rp->handle = sys_cpu_to_le16(handle);
2471 }
2472 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
2473
2474 #endif /* CONFIG_BT_PERIPHERAL */
2475
2476 #if defined(CONFIG_BT_CONN)
2477 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
le_req_peer_sca(struct net_buf * buf,struct net_buf ** evt)2478 static void le_req_peer_sca(struct net_buf *buf, struct net_buf **evt)
2479 {
2480 struct bt_hci_cp_le_req_peer_sca *cmd = (void *)buf->data;
2481 uint16_t handle;
2482 uint8_t status;
2483
2484 handle = sys_le16_to_cpu(cmd->handle);
2485 status = ll_req_peer_sca(handle);
2486
2487 *evt = cmd_status(status);
2488 }
2489 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
2490
2491 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
le_read_remote_features(struct net_buf * buf,struct net_buf ** evt)2492 static void le_read_remote_features(struct net_buf *buf, struct net_buf **evt)
2493 {
2494 struct bt_hci_cp_le_read_remote_features *cmd = (void *)buf->data;
2495 uint16_t handle;
2496 uint8_t status;
2497
2498 handle = sys_le16_to_cpu(cmd->handle);
2499 status = ll_feature_req_send(handle);
2500
2501 *evt = cmd_status(status);
2502 }
2503 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
2504
le_read_chan_map(struct net_buf * buf,struct net_buf ** evt)2505 static void le_read_chan_map(struct net_buf *buf, struct net_buf **evt)
2506 {
2507 struct bt_hci_cp_le_read_chan_map *cmd = (void *)buf->data;
2508 struct bt_hci_rp_le_read_chan_map *rp;
2509 uint16_t handle;
2510 uint8_t status;
2511
2512 handle = sys_le16_to_cpu(cmd->handle);
2513
2514 rp = hci_cmd_complete(evt, sizeof(*rp));
2515
2516 status = ll_chm_get(handle, rp->ch_map);
2517
2518 rp->status = status;
2519 rp->handle = sys_le16_to_cpu(handle);
2520 }
2521
le_conn_update(struct net_buf * buf,struct net_buf ** evt)2522 static void le_conn_update(struct net_buf *buf, struct net_buf **evt)
2523 {
2524 struct hci_cp_le_conn_update *cmd = (void *)buf->data;
2525 uint16_t supervision_timeout;
2526 uint16_t conn_interval_min;
2527 uint16_t conn_interval_max;
2528 uint16_t conn_latency;
2529 uint16_t handle;
2530 uint8_t status;
2531
2532 handle = sys_le16_to_cpu(cmd->handle);
2533 conn_interval_min = sys_le16_to_cpu(cmd->conn_interval_min);
2534 conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
2535 conn_latency = sys_le16_to_cpu(cmd->conn_latency);
2536 supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
2537
2538 status = ll_conn_update(handle, 0, 0, conn_interval_min,
2539 conn_interval_max, conn_latency,
2540 supervision_timeout, NULL);
2541
2542 *evt = cmd_status(status);
2543 }
2544
2545 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
le_conn_param_req_reply(struct net_buf * buf,struct net_buf ** evt)2546 static void le_conn_param_req_reply(struct net_buf *buf, struct net_buf **evt)
2547 {
2548 struct bt_hci_cp_le_conn_param_req_reply *cmd = (void *)buf->data;
2549 struct bt_hci_rp_le_conn_param_req_reply *rp;
2550 uint16_t interval_min;
2551 uint16_t interval_max;
2552 uint16_t latency;
2553 uint16_t timeout;
2554 uint16_t handle;
2555 uint8_t status;
2556
2557 handle = sys_le16_to_cpu(cmd->handle);
2558 interval_min = sys_le16_to_cpu(cmd->interval_min);
2559 interval_max = sys_le16_to_cpu(cmd->interval_max);
2560 latency = sys_le16_to_cpu(cmd->latency);
2561 timeout = sys_le16_to_cpu(cmd->timeout);
2562
2563 status = ll_conn_update(handle, 2, 0, interval_min, interval_max,
2564 latency, timeout, NULL);
2565
2566 rp = hci_cmd_complete(evt, sizeof(*rp));
2567 rp->status = status;
2568 rp->handle = sys_cpu_to_le16(handle);
2569 }
2570
le_conn_param_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2571 static void le_conn_param_req_neg_reply(struct net_buf *buf,
2572 struct net_buf **evt)
2573 {
2574 struct bt_hci_cp_le_conn_param_req_neg_reply *cmd = (void *)buf->data;
2575 struct bt_hci_rp_le_conn_param_req_neg_reply *rp;
2576 uint16_t handle;
2577 uint8_t status;
2578
2579 handle = sys_le16_to_cpu(cmd->handle);
2580 status = ll_conn_update(handle, 2, cmd->reason, 0, 0, 0, 0, NULL);
2581
2582 rp = hci_cmd_complete(evt, sizeof(*rp));
2583 rp->status = status;
2584 rp->handle = sys_cpu_to_le16(handle);
2585 }
2586 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2587
2588 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
le_set_data_len(struct net_buf * buf,struct net_buf ** evt)2589 static void le_set_data_len(struct net_buf *buf, struct net_buf **evt)
2590 {
2591 struct bt_hci_cp_le_set_data_len *cmd = (void *)buf->data;
2592 struct bt_hci_rp_le_set_data_len *rp;
2593 uint16_t tx_octets;
2594 uint16_t tx_time;
2595 uint16_t handle;
2596 uint8_t status;
2597
2598 handle = sys_le16_to_cpu(cmd->handle);
2599 tx_octets = sys_le16_to_cpu(cmd->tx_octets);
2600 tx_time = sys_le16_to_cpu(cmd->tx_time);
2601 status = ll_length_req_send(handle, tx_octets, tx_time);
2602
2603 rp = hci_cmd_complete(evt, sizeof(*rp));
2604 rp->status = status;
2605 rp->handle = sys_cpu_to_le16(handle);
2606 }
2607
le_read_default_data_len(struct net_buf * buf,struct net_buf ** evt)2608 static void le_read_default_data_len(struct net_buf *buf, struct net_buf **evt)
2609 {
2610 struct bt_hci_rp_le_read_default_data_len *rp;
2611 uint16_t max_tx_octets;
2612 uint16_t max_tx_time;
2613
2614 rp = hci_cmd_complete(evt, sizeof(*rp));
2615
2616 ll_length_default_get(&max_tx_octets, &max_tx_time);
2617
2618 rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2619 rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2620 rp->status = 0x00;
2621 }
2622
le_write_default_data_len(struct net_buf * buf,struct net_buf ** evt)2623 static void le_write_default_data_len(struct net_buf *buf,
2624 struct net_buf **evt)
2625 {
2626 struct bt_hci_cp_le_write_default_data_len *cmd = (void *)buf->data;
2627 uint16_t max_tx_octets;
2628 uint16_t max_tx_time;
2629 uint8_t status;
2630
2631 max_tx_octets = sys_le16_to_cpu(cmd->max_tx_octets);
2632 max_tx_time = sys_le16_to_cpu(cmd->max_tx_time);
2633 status = ll_length_default_set(max_tx_octets, max_tx_time);
2634
2635 *evt = cmd_complete_status(status);
2636 }
2637
le_read_max_data_len(struct net_buf * buf,struct net_buf ** evt)2638 static void le_read_max_data_len(struct net_buf *buf, struct net_buf **evt)
2639 {
2640 struct bt_hci_rp_le_read_max_data_len *rp;
2641 uint16_t max_tx_octets;
2642 uint16_t max_tx_time;
2643 uint16_t max_rx_octets;
2644 uint16_t max_rx_time;
2645
2646 rp = hci_cmd_complete(evt, sizeof(*rp));
2647
2648 ll_length_max_get(&max_tx_octets, &max_tx_time,
2649 &max_rx_octets, &max_rx_time);
2650
2651 rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2652 rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2653 rp->max_rx_octets = sys_cpu_to_le16(max_rx_octets);
2654 rp->max_rx_time = sys_cpu_to_le16(max_rx_time);
2655 rp->status = 0x00;
2656 }
2657 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2658
2659 #if defined(CONFIG_BT_CTLR_PHY)
le_read_phy(struct net_buf * buf,struct net_buf ** evt)2660 static void le_read_phy(struct net_buf *buf, struct net_buf **evt)
2661 {
2662 struct bt_hci_cp_le_read_phy *cmd = (void *)buf->data;
2663 struct bt_hci_rp_le_read_phy *rp;
2664 uint16_t handle;
2665 uint8_t status;
2666
2667 handle = sys_le16_to_cpu(cmd->handle);
2668
2669 rp = hci_cmd_complete(evt, sizeof(*rp));
2670
2671 status = ll_phy_get(handle, &rp->tx_phy, &rp->rx_phy);
2672
2673 rp->status = status;
2674 rp->handle = sys_cpu_to_le16(handle);
2675 rp->tx_phy = find_lsb_set(rp->tx_phy);
2676 rp->rx_phy = find_lsb_set(rp->rx_phy);
2677 }
2678
le_set_default_phy(struct net_buf * buf,struct net_buf ** evt)2679 static void le_set_default_phy(struct net_buf *buf, struct net_buf **evt)
2680 {
2681 struct bt_hci_cp_le_set_default_phy *cmd = (void *)buf->data;
2682 uint8_t status;
2683
2684 if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2685 cmd->tx_phys = 0x07;
2686 }
2687 if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2688 cmd->rx_phys = 0x07;
2689 }
2690
2691 status = ll_phy_default_set(cmd->tx_phys, cmd->rx_phys);
2692
2693 *evt = cmd_complete_status(status);
2694 }
2695
le_set_phy(struct net_buf * buf,struct net_buf ** evt)2696 static void le_set_phy(struct net_buf *buf, struct net_buf **evt)
2697 {
2698 struct bt_hci_cp_le_set_phy *cmd = (void *)buf->data;
2699 uint16_t phy_opts;
2700 uint8_t mask_phys;
2701 uint16_t handle;
2702 uint8_t status;
2703
2704 handle = sys_le16_to_cpu(cmd->handle);
2705 phy_opts = sys_le16_to_cpu(cmd->phy_opts);
2706
2707 mask_phys = BT_HCI_LE_PHY_PREFER_1M;
2708 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_2M)) {
2709 mask_phys |= BT_HCI_LE_PHY_PREFER_2M;
2710 }
2711 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
2712 mask_phys |= BT_HCI_LE_PHY_PREFER_CODED;
2713 }
2714
2715 if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2716 cmd->tx_phys |= mask_phys;
2717 }
2718 if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2719 cmd->rx_phys |= mask_phys;
2720 }
2721
2722 if ((cmd->tx_phys | cmd->rx_phys) & ~mask_phys) {
2723 *evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
2724
2725 return;
2726 }
2727
2728 if (!(cmd->tx_phys & 0x07) ||
2729 !(cmd->rx_phys & 0x07)) {
2730 *evt = cmd_status(BT_HCI_ERR_INVALID_PARAM);
2731
2732 return;
2733 }
2734
2735 if (phy_opts & 0x03) {
2736 phy_opts -= 1U;
2737 phy_opts &= 1;
2738 } else {
2739 phy_opts = 0U;
2740 }
2741
2742 status = ll_phy_req_send(handle, cmd->tx_phys, phy_opts,
2743 cmd->rx_phys);
2744
2745 *evt = cmd_status(status);
2746 }
2747 #endif /* CONFIG_BT_CTLR_PHY */
2748 #endif /* CONFIG_BT_CONN */
2749
2750 #if defined(CONFIG_BT_CTLR_PRIVACY)
le_add_dev_to_rl(struct net_buf * buf,struct net_buf ** evt)2751 static void le_add_dev_to_rl(struct net_buf *buf, struct net_buf **evt)
2752 {
2753 struct bt_hci_cp_le_add_dev_to_rl *cmd = (void *)buf->data;
2754 uint8_t status;
2755
2756 status = ll_rl_add(&cmd->peer_id_addr, cmd->peer_irk, cmd->local_irk);
2757
2758 *evt = cmd_complete_status(status);
2759 }
2760
le_rem_dev_from_rl(struct net_buf * buf,struct net_buf ** evt)2761 static void le_rem_dev_from_rl(struct net_buf *buf, struct net_buf **evt)
2762 {
2763 struct bt_hci_cp_le_rem_dev_from_rl *cmd = (void *)buf->data;
2764 uint8_t status;
2765
2766 status = ll_rl_remove(&cmd->peer_id_addr);
2767
2768 *evt = cmd_complete_status(status);
2769 }
2770
le_clear_rl(struct net_buf * buf,struct net_buf ** evt)2771 static void le_clear_rl(struct net_buf *buf, struct net_buf **evt)
2772 {
2773 uint8_t status;
2774
2775 status = ll_rl_clear();
2776
2777 *evt = cmd_complete_status(status);
2778 }
2779
le_read_rl_size(struct net_buf * buf,struct net_buf ** evt)2780 static void le_read_rl_size(struct net_buf *buf, struct net_buf **evt)
2781 {
2782 struct bt_hci_rp_le_read_rl_size *rp;
2783
2784 rp = hci_cmd_complete(evt, sizeof(*rp));
2785
2786 rp->rl_size = ll_rl_size_get();
2787 rp->status = 0x00;
2788 }
2789
le_read_peer_rpa(struct net_buf * buf,struct net_buf ** evt)2790 static void le_read_peer_rpa(struct net_buf *buf, struct net_buf **evt)
2791 {
2792 struct bt_hci_cp_le_read_peer_rpa *cmd = (void *)buf->data;
2793 struct bt_hci_rp_le_read_peer_rpa *rp;
2794 bt_addr_le_t peer_id_addr;
2795
2796 bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2797 rp = hci_cmd_complete(evt, sizeof(*rp));
2798
2799 rp->status = ll_rl_crpa_get(&peer_id_addr, &rp->peer_rpa);
2800 }
2801
le_read_local_rpa(struct net_buf * buf,struct net_buf ** evt)2802 static void le_read_local_rpa(struct net_buf *buf, struct net_buf **evt)
2803 {
2804 struct bt_hci_cp_le_read_local_rpa *cmd = (void *)buf->data;
2805 struct bt_hci_rp_le_read_local_rpa *rp;
2806 bt_addr_le_t peer_id_addr;
2807
2808 bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2809 rp = hci_cmd_complete(evt, sizeof(*rp));
2810
2811 rp->status = ll_rl_lrpa_get(&peer_id_addr, &rp->local_rpa);
2812 }
2813
le_set_addr_res_enable(struct net_buf * buf,struct net_buf ** evt)2814 static void le_set_addr_res_enable(struct net_buf *buf, struct net_buf **evt)
2815 {
2816 struct bt_hci_cp_le_set_addr_res_enable *cmd = (void *)buf->data;
2817 uint8_t status;
2818
2819 status = ll_rl_enable(cmd->enable);
2820
2821 *evt = cmd_complete_status(status);
2822 }
2823
le_set_rpa_timeout(struct net_buf * buf,struct net_buf ** evt)2824 static void le_set_rpa_timeout(struct net_buf *buf, struct net_buf **evt)
2825 {
2826 struct bt_hci_cp_le_set_rpa_timeout *cmd = (void *)buf->data;
2827 uint16_t timeout = sys_le16_to_cpu(cmd->rpa_timeout);
2828
2829 ll_rl_timeout_set(timeout);
2830
2831 *evt = cmd_complete_status(0x00);
2832 }
2833
le_set_privacy_mode(struct net_buf * buf,struct net_buf ** evt)2834 static void le_set_privacy_mode(struct net_buf *buf, struct net_buf **evt)
2835 {
2836 struct bt_hci_cp_le_set_privacy_mode *cmd = (void *)buf->data;
2837 uint8_t status;
2838
2839 status = ll_priv_mode_set(&cmd->id_addr, cmd->mode);
2840
2841 *evt = cmd_complete_status(status);
2842 }
2843 #endif /* CONFIG_BT_CTLR_PRIVACY */
2844
le_read_tx_power(struct net_buf * buf,struct net_buf ** evt)2845 static void le_read_tx_power(struct net_buf *buf, struct net_buf **evt)
2846 {
2847 struct bt_hci_rp_le_read_tx_power *rp;
2848
2849 rp = hci_cmd_complete(evt, sizeof(*rp));
2850 rp->status = 0x00;
2851 ll_tx_pwr_get(&rp->min_tx_power, &rp->max_tx_power);
2852 }
2853
2854 #if defined(CONFIG_BT_CTLR_DF)
2855 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
le_df_set_cl_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)2856 static void le_df_set_cl_cte_tx_params(struct net_buf *buf,
2857 struct net_buf **evt)
2858 {
2859 struct bt_hci_cp_le_set_cl_cte_tx_params *cmd = (void *)buf->data;
2860 uint8_t adv_handle;
2861 uint8_t status;
2862
2863 if (adv_cmds_ext_check(evt)) {
2864 return;
2865 }
2866
2867 status = ll_adv_set_by_hci_handle_get(cmd->handle, &adv_handle);
2868 if (status) {
2869 *evt = cmd_complete_status(status);
2870 return;
2871 }
2872
2873 status = ll_df_set_cl_cte_tx_params(adv_handle, cmd->cte_len,
2874 cmd->cte_type, cmd->cte_count,
2875 cmd->switch_pattern_len,
2876 cmd->ant_ids);
2877
2878 *evt = cmd_complete_status(status);
2879 }
2880
le_df_set_cl_cte_enable(struct net_buf * buf,struct net_buf ** evt)2881 static void le_df_set_cl_cte_enable(struct net_buf *buf, struct net_buf **evt)
2882 {
2883 struct bt_hci_cp_le_set_cl_cte_tx_enable *cmd = (void *)buf->data;
2884 uint8_t status;
2885 uint8_t handle;
2886
2887 if (adv_cmds_ext_check(evt)) {
2888 return;
2889 }
2890
2891 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
2892 if (status) {
2893 *evt = cmd_complete_status(status);
2894 return;
2895 }
2896
2897 status = ll_df_set_cl_cte_tx_enable(handle, cmd->cte_enable);
2898
2899 *evt = cmd_complete_status(status);
2900 }
2901 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2902
2903 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
le_df_set_cl_iq_sampling_enable(struct net_buf * buf,struct net_buf ** evt)2904 static void le_df_set_cl_iq_sampling_enable(struct net_buf *buf, struct net_buf **evt)
2905 {
2906 struct bt_hci_cp_le_set_cl_cte_sampling_enable *cmd = (void *)buf->data;
2907 struct bt_hci_rp_le_set_cl_cte_sampling_enable *rp;
2908 uint16_t sync_handle;
2909 uint8_t status;
2910
2911 sync_handle = sys_le16_to_cpu(cmd->sync_handle);
2912
2913 status = ll_df_set_cl_iq_sampling_enable(sync_handle,
2914 cmd->sampling_enable,
2915 cmd->slot_durations,
2916 cmd->max_sampled_cte,
2917 cmd->switch_pattern_len,
2918 cmd->ant_ids);
2919
2920 rp = hci_cmd_complete(evt, sizeof(*rp));
2921
2922 rp->status = status;
2923 rp->sync_handle = sys_cpu_to_le16(sync_handle);
2924 }
2925 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2926
2927 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) || \
2928 defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
iq_convert_12_to_8_bits(int16_t data)2929 static int8_t iq_convert_12_to_8_bits(int16_t data)
2930 {
2931 if (data == IQ_SAMPLE_SATURATED_16_BIT) {
2932 return IQ_SAMPLE_SATURATED_8_BIT;
2933 }
2934
2935 #if defined(CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB)
2936 return (data > INT8_MAX || data < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2937 : IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2938 #else /* !CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2939 int16_t data_conv = IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2940
2941 return (data_conv > INT8_MAX || data_conv < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2942 : (int8_t)data_conv;
2943 #endif /* CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2944 }
2945 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT
2946 * || CONFIG_BT_CTLR_DF_CONN_CTE_RX
2947 */
2948
2949 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
le_df_connectionless_iq_report(struct pdu_data * pdu_rx,struct node_rx_pdu * node_rx,struct net_buf * buf)2950 static void le_df_connectionless_iq_report(struct pdu_data *pdu_rx,
2951 struct node_rx_pdu *node_rx,
2952 struct net_buf *buf)
2953 {
2954 struct bt_hci_evt_le_connectionless_iq_report *sep;
2955 struct node_rx_iq_report *iq_report;
2956 struct lll_sync *lll;
2957 uint8_t samples_cnt;
2958 int16_t rssi;
2959 uint16_t sync_handle;
2960 uint16_t per_evt_counter;
2961 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
2962 struct ll_sync_set *sync = NULL;
2963 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2964
2965 iq_report = (struct node_rx_iq_report *)node_rx;
2966
2967 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
2968 !(le_event_mask & BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT)) {
2969 return;
2970 }
2971
2972 lll = iq_report->rx.rx_ftr.param;
2973
2974 /* If there is not LLL context and CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT is enabled
2975 * the controller is in the Direct Test Mode and may generate
2976 * the Connectionless IQ Report.
2977 */
2978 if (!lll && IS_ENABLED(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)) {
2979 /* Set sync_handle to 0x0FFF according to the BT Core 5.3 specification
2980 * Vol 4 7.7.65.21
2981 */
2982 sync_handle = 0x0FFF;
2983 /* Set periodic event counter to 0 since there is not periodic advertising train. */
2984 per_evt_counter = 0;
2985 }
2986
2987 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
2988 else {
2989 sync = HDR_LLL2ULL(lll);
2990
2991 /* TX LL thread has higher priority than RX thread. It may happen that
2992 * host successfully disables CTE sampling in the meantime.
2993 * It should be verified here, to avoid reporting IQ samples after
2994 * the functionality was disabled or if sync was lost.
2995 */
2996 if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) ||
2997 !sync->timeout_reload) {
2998 /* Drop further processing of the event. */
2999 return;
3000 }
3001
3002 /* Get the sync handle corresponding to the LLL context passed in the
3003 * node rx footer field.
3004 */
3005 sync_handle = ull_sync_handle_get(sync);
3006 per_evt_counter = iq_report->event_counter;
3007 }
3008 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
3009
3010 /* If packet status does not indicate insufficient resources for IQ samples and for
3011 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3012 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3013 */
3014 if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3015 samples_cnt = 0U;
3016 } else {
3017 samples_cnt = MAX(1, iq_report->sample_count);
3018 }
3019
3020 sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT,
3021 (sizeof(*sep) +
3022 (samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3023
3024 rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
3025
3026
3027 sep->sync_handle = sys_cpu_to_le16(sync_handle);
3028 sep->rssi = sys_cpu_to_le16(rssi);
3029 sep->rssi_ant_id = iq_report->rssi_ant_id;
3030 sep->cte_type = iq_report->cte_info.type;
3031
3032 sep->chan_idx = iq_report->chan_idx;
3033 sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
3034
3035 if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3036 sep->slot_durations = iq_report->local_slot_durations;
3037 } else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3038 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3039 } else {
3040 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3041 }
3042
3043 sep->packet_status = iq_report->packet_status;
3044
3045 if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3046 if (iq_report->sample_count == 0U) {
3047 sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3048 sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3049 } else {
3050 for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3051 sep->sample[idx].i =
3052 iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3053 sep->sample[idx].q =
3054 iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3055 }
3056 }
3057 }
3058
3059 sep->sample_count = samples_cnt;
3060 }
3061 #endif /* defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) */
3062
3063 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
le_df_set_conn_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)3064 static void le_df_set_conn_cte_tx_params(struct net_buf *buf,
3065 struct net_buf **evt)
3066 {
3067 struct bt_hci_cp_le_set_conn_cte_tx_params *cmd = (void *)buf->data;
3068 struct bt_hci_rp_le_set_conn_cte_tx_params *rp;
3069 uint16_t handle, handle_le16;
3070 uint8_t status;
3071
3072 handle_le16 = cmd->handle;
3073 handle = sys_le16_to_cpu(handle_le16);
3074
3075 status = ll_df_set_conn_cte_tx_params(handle, cmd->cte_types,
3076 cmd->switch_pattern_len,
3077 cmd->ant_ids);
3078
3079 rp = hci_cmd_complete(evt, sizeof(*rp));
3080
3081 rp->status = status;
3082 rp->handle = handle_le16;
3083 }
3084 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
3085
3086 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
le_df_set_conn_cte_rx_params(struct net_buf * buf,struct net_buf ** evt)3087 static void le_df_set_conn_cte_rx_params(struct net_buf *buf, struct net_buf **evt)
3088 {
3089 struct bt_hci_cp_le_set_conn_cte_rx_params *cmd = (void *)buf->data;
3090 struct bt_hci_rp_le_set_conn_cte_rx_params *rp;
3091 uint16_t handle, handle_le16;
3092 uint8_t status;
3093
3094 handle_le16 = cmd->handle;
3095 handle = sys_le16_to_cpu(handle_le16);
3096
3097 status = ll_df_set_conn_cte_rx_params(handle, cmd->sampling_enable, cmd->slot_durations,
3098 cmd->switch_pattern_len, cmd->ant_ids);
3099
3100 rp = hci_cmd_complete(evt, sizeof(*rp));
3101
3102 rp->status = status;
3103 rp->handle = handle_le16;
3104 }
3105
le_df_connection_iq_report(struct node_rx_pdu * node_rx,struct net_buf * buf)3106 static void le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
3107 {
3108 struct bt_hci_evt_le_connection_iq_report *sep;
3109 struct node_rx_iq_report *iq_report;
3110 struct lll_conn *lll;
3111 uint8_t samples_cnt;
3112 uint8_t phy_rx;
3113 int16_t rssi;
3114
3115 iq_report = (struct node_rx_iq_report *)node_rx;
3116
3117 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3118 !(le_event_mask & BT_EVT_MASK_LE_CONNECTION_IQ_REPORT)) {
3119 return;
3120 }
3121
3122 lll = iq_report->rx.rx_ftr.param;
3123
3124 #if defined(CONFIG_BT_CTLR_PHY)
3125 phy_rx = lll->phy_rx;
3126
3127 /* Make sure the report is generated for connection on PHY UNCODED */
3128 LL_ASSERT(phy_rx != PHY_CODED);
3129 #else
3130 phy_rx = PHY_1M;
3131 #endif /* CONFIG_BT_CTLR_PHY */
3132
3133 /* TX LL thread has higher priority than RX thread. It may happen that host succefully
3134 * disables CTE sampling in the meantime. It should be verified here, to avoid reporting
3135 * IQ samples after the functionality was disabled.
3136 */
3137 if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
3138 /* Dropp further processing of the event. */
3139 return;
3140 }
3141
3142 /* If packet status does not indicate insufficient resources for IQ samples and for
3143 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3144 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3145 */
3146 if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3147 samples_cnt = 0;
3148 } else {
3149 samples_cnt = MAX(1, iq_report->sample_count);
3150 }
3151
3152 sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTION_IQ_REPORT,
3153 (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3154
3155 rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
3156
3157 sep->conn_handle = sys_cpu_to_le16(iq_report->rx.hdr.handle);
3158 sep->rx_phy = phy_rx;
3159 sep->rssi = sys_cpu_to_le16(rssi);
3160 sep->rssi_ant_id = iq_report->rssi_ant_id;
3161 sep->cte_type = iq_report->cte_info.type;
3162
3163 sep->data_chan_idx = iq_report->chan_idx;
3164 sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
3165
3166 if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3167 sep->slot_durations = iq_report->local_slot_durations;
3168 } else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3169 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3170 } else {
3171 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3172 }
3173
3174 sep->packet_status = iq_report->packet_status;
3175
3176 if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3177 if (iq_report->sample_count == 0U) {
3178 sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3179 sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3180 } else {
3181 for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3182 sep->sample[idx].i =
3183 iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3184 sep->sample[idx].q =
3185 iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3186 }
3187 }
3188 }
3189
3190 sep->sample_count = samples_cnt;
3191 }
3192 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
3193
3194 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
le_df_set_conn_cte_req_enable(struct net_buf * buf,struct net_buf ** evt)3195 static void le_df_set_conn_cte_req_enable(struct net_buf *buf, struct net_buf **evt)
3196 {
3197 struct bt_hci_cp_le_conn_cte_req_enable *cmd = (void *)buf->data;
3198 struct bt_hci_rp_le_conn_cte_req_enable *rp;
3199 uint16_t handle, handle_le16;
3200 uint8_t status;
3201
3202 handle_le16 = cmd->handle;
3203 handle = sys_le16_to_cpu(handle_le16);
3204
3205 status = ll_df_set_conn_cte_req_enable(handle, cmd->enable,
3206 sys_le16_to_cpu(cmd->cte_request_interval),
3207 cmd->requested_cte_length, cmd->requested_cte_type);
3208 rp = hci_cmd_complete(evt, sizeof(*rp));
3209
3210 rp->status = status;
3211 rp->handle = handle_le16;
3212 }
3213
le_df_cte_req_failed(uint8_t error_code,uint16_t handle,struct net_buf * buf)3214 static void le_df_cte_req_failed(uint8_t error_code, uint16_t handle, struct net_buf *buf)
3215 {
3216 struct bt_hci_evt_le_cte_req_failed *sep;
3217
3218 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3219 !(le_event_mask & BT_EVT_MASK_LE_CTE_REQUEST_FAILED)) {
3220 return;
3221 }
3222
3223 sep = meta_evt(buf, BT_HCI_EVT_LE_CTE_REQUEST_FAILED, sizeof(*sep));
3224
3225 sep->status = error_code;
3226 sep->conn_handle = sys_cpu_to_le16(handle);
3227 }
3228 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
3229
3230 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
le_df_set_conn_cte_rsp_enable(struct net_buf * buf,struct net_buf ** evt)3231 static void le_df_set_conn_cte_rsp_enable(struct net_buf *buf, struct net_buf **evt)
3232 {
3233 struct bt_hci_cp_le_conn_cte_rsp_enable *cmd = (void *)buf->data;
3234 struct bt_hci_rp_le_conn_cte_rsp_enable *rp;
3235 uint16_t handle, handle_le16;
3236 uint8_t status;
3237
3238 handle_le16 = cmd->handle;
3239 handle = sys_le16_to_cpu(handle_le16);
3240
3241 status = ll_df_set_conn_cte_rsp_enable(handle, cmd->enable);
3242 rp = hci_cmd_complete(evt, sizeof(*rp));
3243
3244 rp->status = status;
3245 rp->handle = handle_le16;
3246 }
3247 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
3248
le_df_read_ant_inf(struct net_buf * buf,struct net_buf ** evt)3249 static void le_df_read_ant_inf(struct net_buf *buf, struct net_buf **evt)
3250 {
3251 struct bt_hci_rp_le_read_ant_info *rp;
3252 uint8_t max_switch_pattern_len;
3253 uint8_t switch_sample_rates;
3254 uint8_t max_cte_len;
3255 uint8_t num_ant;
3256
3257 ll_df_read_ant_inf(&switch_sample_rates, &num_ant,
3258 &max_switch_pattern_len, &max_cte_len);
3259
3260 rp = hci_cmd_complete(evt, sizeof(*rp));
3261
3262 rp->max_switch_pattern_len = max_switch_pattern_len;
3263 rp->switch_sample_rates = switch_sample_rates;
3264 rp->max_cte_len = max_cte_len;
3265 rp->num_ant = num_ant;
3266 rp->status = 0x00;
3267 }
3268 #endif /* CONFIG_BT_CTLR_DF */
3269
3270 #if defined(CONFIG_BT_CTLR_DTM_HCI)
le_rx_test(struct net_buf * buf,struct net_buf ** evt)3271 static void le_rx_test(struct net_buf *buf, struct net_buf **evt)
3272 {
3273 struct bt_hci_cp_le_rx_test *cmd = (void *)buf->data;
3274 uint8_t status;
3275
3276 status = ll_test_rx(cmd->rx_ch, BT_HCI_LE_RX_PHY_1M, BT_HCI_LE_MOD_INDEX_STANDARD,
3277 BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3278 BT_HCI_LE_TEST_SLOT_DURATION_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3279 NULL);
3280
3281 *evt = cmd_complete_status(status);
3282 }
3283
le_tx_test(struct net_buf * buf,struct net_buf ** evt)3284 static void le_tx_test(struct net_buf *buf, struct net_buf **evt)
3285 {
3286 struct bt_hci_cp_le_tx_test *cmd = (void *)buf->data;
3287 uint8_t status;
3288
3289 status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload,
3290 BT_HCI_LE_TX_PHY_1M, BT_HCI_LE_TEST_CTE_DISABLED,
3291 BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3292 NULL, BT_HCI_TX_TEST_POWER_MAX_SET);
3293
3294 *evt = cmd_complete_status(status);
3295 }
3296
le_test_end(struct net_buf * buf,struct net_buf ** evt)3297 static void le_test_end(struct net_buf *buf, struct net_buf **evt)
3298 {
3299 struct bt_hci_rp_le_test_end *rp;
3300 uint16_t rx_pkt_count;
3301 uint8_t status;
3302
3303 status = ll_test_end(&rx_pkt_count);
3304
3305 rp = hci_cmd_complete(evt, sizeof(*rp));
3306 rp->status = status;
3307 rp->rx_pkt_count = sys_cpu_to_le16(rx_pkt_count);
3308 }
3309
le_enh_rx_test(struct net_buf * buf,struct net_buf ** evt)3310 static void le_enh_rx_test(struct net_buf *buf, struct net_buf **evt)
3311 {
3312 struct bt_hci_cp_le_enh_rx_test *cmd = (void *)buf->data;
3313 uint8_t status;
3314
3315 status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, BT_HCI_LE_TEST_CTE_DISABLED,
3316 BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SLOT_DURATION_ANY,
3317 BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL);
3318
3319 *evt = cmd_complete_status(status);
3320 }
3321
3322 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
le_rx_test_v3(struct net_buf * buf,struct net_buf ** evt)3323 static void le_rx_test_v3(struct net_buf *buf, struct net_buf **evt)
3324 {
3325 struct bt_hci_cp_le_rx_test_v3 *cmd = (void *)buf->data;
3326 uint8_t status;
3327
3328 status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, cmd->expected_cte_len,
3329 cmd->expected_cte_type, cmd->slot_durations, cmd->switch_pattern_len,
3330 cmd->ant_ids);
3331
3332 *evt = cmd_complete_status(status);
3333 }
3334 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
3335
le_enh_tx_test(struct net_buf * buf,struct net_buf ** evt)3336 static void le_enh_tx_test(struct net_buf *buf, struct net_buf **evt)
3337 {
3338 struct bt_hci_cp_le_enh_tx_test *cmd = (void *)buf->data;
3339 uint8_t status;
3340
3341 status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3342 BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3343 BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL,
3344 BT_HCI_TX_TEST_POWER_MAX_SET);
3345
3346 *evt = cmd_complete_status(status);
3347 }
3348
3349 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
le_tx_test_v3(struct net_buf * buf,struct net_buf ** evt)3350 static void le_tx_test_v3(struct net_buf *buf, struct net_buf **evt)
3351 {
3352 struct bt_hci_cp_le_tx_test_v3 *cmd = (void *)buf->data;
3353 uint8_t status;
3354
3355 status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3356 cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3357 BT_HCI_TX_TEST_POWER_MAX_SET);
3358
3359 *evt = cmd_complete_status(status);
3360 }
3361 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
3362
3363 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
le_tx_test_v4(struct net_buf * buf,struct net_buf ** evt)3364 static void le_tx_test_v4(struct net_buf *buf, struct net_buf **evt)
3365 {
3366 struct bt_hci_cp_le_tx_test_v4 *cmd = (void *)buf->data;
3367 struct bt_hci_cp_le_tx_test_v4_tx_power *tx_power = (void *)(buf->data +
3368 sizeof(struct bt_hci_cp_le_tx_test_v4) + cmd->switch_pattern_len);
3369 uint8_t status;
3370
3371 status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3372 cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3373 tx_power->tx_power);
3374
3375 *evt = cmd_complete_status(status);
3376 }
3377 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
3378 #endif /* CONFIG_BT_CTLR_DTM_HCI */
3379
3380 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3381 #if defined(CONFIG_BT_BROADCASTER)
3382
le_set_adv_set_random_addr(struct net_buf * buf,struct net_buf ** evt)3383 static void le_set_adv_set_random_addr(struct net_buf *buf,
3384 struct net_buf **evt)
3385 {
3386 struct bt_hci_cp_le_set_adv_set_random_addr *cmd = (void *)buf->data;
3387 uint8_t status;
3388 uint8_t handle;
3389
3390 if (adv_cmds_ext_check(evt)) {
3391 return;
3392 }
3393
3394 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3395 if (status) {
3396 *evt = cmd_complete_status(status);
3397 return;
3398 }
3399
3400 status = ll_adv_aux_random_addr_set(handle, &cmd->bdaddr.val[0]);
3401
3402 *evt = cmd_complete_status(status);
3403 }
3404
le_set_ext_adv_param(struct net_buf * buf,struct net_buf ** evt)3405 static void le_set_ext_adv_param(struct net_buf *buf, struct net_buf **evt)
3406 {
3407 struct bt_hci_cp_le_set_ext_adv_param *cmd = (void *)buf->data;
3408 struct bt_hci_rp_le_set_ext_adv_param *rp;
3409 uint32_t min_interval;
3410 uint16_t evt_prop;
3411 uint8_t tx_pwr;
3412 uint8_t status;
3413 uint8_t phy_p;
3414 uint8_t phy_s;
3415 uint8_t handle;
3416
3417 if (adv_cmds_ext_check(evt)) {
3418 return;
3419 }
3420
3421 if (cmd->handle > BT_HCI_LE_ADV_HANDLE_MAX) {
3422 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3423 return;
3424 }
3425
3426 evt_prop = sys_le16_to_cpu(cmd->props);
3427 min_interval = sys_get_le24(cmd->prim_min_interval);
3428
3429 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3430 const uint32_t max_interval =
3431 sys_get_le24(cmd->prim_max_interval);
3432
3433 /* Compare advertising interval maximum with implementation
3434 * supported advertising interval maximum value defined in the
3435 * Kconfig CONFIG_BT_CTLR_ADV_INTERVAL_MAX.
3436 */
3437 if ((!(evt_prop & BT_HCI_LE_ADV_PROP_LEGACY) ||
3438 !(evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN)) &&
3439 ((min_interval > max_interval) ||
3440 (min_interval < BT_HCI_LE_PRIM_ADV_INTERVAL_MIN) ||
3441 (max_interval > CONFIG_BT_CTLR_ADV_INTERVAL_MAX))) {
3442 *evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3443 return;
3444 }
3445
3446 if ((cmd->prim_adv_phy > BT_HCI_LE_PHY_CODED) ||
3447 (cmd->sec_adv_phy > BT_HCI_LE_PHY_CODED) ||
3448 (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
3449 ((cmd->prim_adv_phy == BT_HCI_LE_PHY_CODED) ||
3450 (cmd->sec_adv_phy == BT_HCI_LE_PHY_CODED)))) {
3451 *evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3452 return;
3453 }
3454 }
3455
3456 status = ll_adv_set_by_hci_handle_get_or_new(cmd->handle, &handle);
3457 if (status) {
3458 *evt = cmd_complete_status(status);
3459 return;
3460 }
3461
3462 tx_pwr = cmd->tx_power;
3463 phy_p = BIT(cmd->prim_adv_phy - 1);
3464 phy_s = BIT(cmd->sec_adv_phy - 1);
3465
3466 status = ll_adv_params_set(handle, evt_prop, min_interval,
3467 PDU_ADV_TYPE_EXT_IND, cmd->own_addr_type,
3468 cmd->peer_addr.type, cmd->peer_addr.a.val,
3469 cmd->prim_channel_map, cmd->filter_policy,
3470 &tx_pwr, phy_p, cmd->sec_adv_max_skip, phy_s,
3471 cmd->sid, cmd->scan_req_notify_enable);
3472
3473 rp = hci_cmd_complete(evt, sizeof(*rp));
3474 rp->status = status;
3475 rp->tx_power = tx_pwr;
3476 }
3477
le_set_ext_adv_data(struct net_buf * buf,struct net_buf ** evt)3478 static void le_set_ext_adv_data(struct net_buf *buf, struct net_buf **evt)
3479 {
3480 struct bt_hci_cp_le_set_ext_adv_data *cmd = (void *)buf->data;
3481 uint8_t status;
3482 uint8_t handle;
3483
3484 if (adv_cmds_ext_check(evt)) {
3485 return;
3486 }
3487
3488 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3489 if (status) {
3490 *evt = cmd_complete_status(status);
3491 return;
3492 }
3493
3494 status = ll_adv_aux_ad_data_set(handle, cmd->op, cmd->frag_pref,
3495 cmd->len, cmd->data);
3496
3497 *evt = cmd_complete_status(status);
3498 }
3499
le_set_ext_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)3500 static void le_set_ext_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
3501 {
3502 struct bt_hci_cp_le_set_ext_scan_rsp_data *cmd = (void *)buf->data;
3503 uint8_t status;
3504 uint8_t handle;
3505
3506 if (adv_cmds_ext_check(evt)) {
3507 return;
3508 }
3509
3510 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3511 if (status) {
3512 *evt = cmd_complete_status(status);
3513 return;
3514 }
3515
3516 status = ll_adv_aux_sr_data_set(handle, cmd->op, cmd->frag_pref,
3517 cmd->len, cmd->data);
3518
3519 *evt = cmd_complete_status(status);
3520 }
3521
le_set_ext_adv_enable(struct net_buf * buf,struct net_buf ** evt)3522 static void le_set_ext_adv_enable(struct net_buf *buf, struct net_buf **evt)
3523 {
3524 struct bt_hci_cp_le_set_ext_adv_enable *cmd = (void *)buf->data;
3525 struct bt_hci_ext_adv_set *s;
3526 uint8_t set_num;
3527 uint8_t status;
3528 uint8_t handle;
3529
3530 if (adv_cmds_ext_check(evt)) {
3531 return;
3532 }
3533
3534 set_num = cmd->set_num;
3535 if (!set_num) {
3536 if (cmd->enable) {
3537 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3538 return;
3539 }
3540
3541 status = ll_adv_disable_all();
3542
3543 *evt = cmd_complete_status(status);
3544
3545 return;
3546 }
3547
3548 /* Check for duplicate handles */
3549 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3550 for (uint8_t i = 0U; i < set_num - 1; i++) {
3551 for (uint8_t j = i + 1U; j < set_num; j++) {
3552 if (cmd->s[i].handle == cmd->s[j].handle) {
3553 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3554 return;
3555 }
3556 }
3557 }
3558 }
3559
3560 s = (void *) cmd->s;
3561 do {
3562 status = ll_adv_set_by_hci_handle_get(s->handle, &handle);
3563 if (status) {
3564 break;
3565 }
3566
3567 /* TODO: duration and events parameter use. */
3568 #if defined(CONFIG_BT_HCI_MESH_EXT)
3569 status = ll_adv_enable(handle, cmd->enable, 0, 0, 0, 0, 0);
3570 #else /* !CONFIG_BT_HCI_MESH_EXT */
3571 status = ll_adv_enable(handle, cmd->enable,
3572 sys_le16_to_cpu(s->duration), s->max_ext_adv_evts);
3573 #endif /* !CONFIG_BT_HCI_MESH_EXT */
3574 if (status) {
3575 /* TODO: how to handle succeeded ones before this
3576 * error.
3577 */
3578 break;
3579 }
3580
3581 s++;
3582 } while (--set_num);
3583
3584 *evt = cmd_complete_status(status);
3585 }
3586
le_read_max_adv_data_len(struct net_buf * buf,struct net_buf ** evt)3587 static void le_read_max_adv_data_len(struct net_buf *buf, struct net_buf **evt)
3588 {
3589 struct bt_hci_rp_le_read_max_adv_data_len *rp;
3590 uint16_t max_adv_data_len;
3591
3592 if (adv_cmds_ext_check(evt)) {
3593 return;
3594 }
3595
3596 rp = hci_cmd_complete(evt, sizeof(*rp));
3597
3598 max_adv_data_len = ll_adv_aux_max_data_length_get();
3599
3600 rp->max_adv_data_len = sys_cpu_to_le16(max_adv_data_len);
3601 rp->status = 0x00;
3602 }
3603
le_read_num_adv_sets(struct net_buf * buf,struct net_buf ** evt)3604 static void le_read_num_adv_sets(struct net_buf *buf, struct net_buf **evt)
3605 {
3606 struct bt_hci_rp_le_read_num_adv_sets *rp;
3607
3608 if (adv_cmds_ext_check(evt)) {
3609 return;
3610 }
3611
3612 rp = hci_cmd_complete(evt, sizeof(*rp));
3613
3614 rp->num_sets = ll_adv_aux_set_count_get();
3615 rp->status = 0x00;
3616 }
3617
le_remove_adv_set(struct net_buf * buf,struct net_buf ** evt)3618 static void le_remove_adv_set(struct net_buf *buf, struct net_buf **evt)
3619 {
3620 struct bt_hci_cp_le_remove_adv_set *cmd = (void *)buf->data;
3621 uint8_t status;
3622 uint8_t handle;
3623
3624 if (adv_cmds_ext_check(evt)) {
3625 return;
3626 }
3627
3628 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3629 if (status) {
3630 *evt = cmd_complete_status(status);
3631 return;
3632 }
3633
3634 status = ll_adv_aux_set_remove(handle);
3635
3636 *evt = cmd_complete_status(status);
3637 }
3638
le_clear_adv_sets(struct net_buf * buf,struct net_buf ** evt)3639 static void le_clear_adv_sets(struct net_buf *buf, struct net_buf **evt)
3640 {
3641 uint8_t status;
3642
3643 if (adv_cmds_ext_check(evt)) {
3644 return;
3645 }
3646
3647 status = ll_adv_aux_set_clear();
3648
3649 *evt = cmd_complete_status(status);
3650 }
3651
3652 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
le_set_per_adv_param(struct net_buf * buf,struct net_buf ** evt)3653 static void le_set_per_adv_param(struct net_buf *buf, struct net_buf **evt)
3654 {
3655 struct bt_hci_cp_le_set_per_adv_param *cmd = (void *)buf->data;
3656 uint16_t max_interval;
3657 uint16_t flags;
3658 uint8_t status;
3659 uint8_t handle;
3660
3661 if (adv_cmds_ext_check(evt)) {
3662 return;
3663 }
3664
3665 max_interval = sys_le16_to_cpu(cmd->max_interval);
3666
3667 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3668 const uint32_t min_interval =
3669 sys_le16_to_cpu(cmd->min_interval);
3670
3671 if ((min_interval > max_interval) ||
3672 (min_interval < BT_HCI_LE_PER_ADV_INTERVAL_MIN)) {
3673 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3674 return;
3675 }
3676
3677 /* Compare periodic advertising interval with
3678 * implementation supported periodic advertising interval
3679 * maximum value defined in the Kconfig
3680 * CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX.
3681 */
3682 if (min_interval > CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX) {
3683 *evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3684 return;
3685 }
3686
3687 if (max_interval > CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX) {
3688 max_interval = CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX;
3689 }
3690 }
3691
3692 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3693 if (status) {
3694 *evt = cmd_complete_status(status);
3695 return;
3696 }
3697
3698 flags = sys_le16_to_cpu(cmd->props);
3699
3700 status = ll_adv_sync_param_set(handle, max_interval, flags);
3701
3702 *evt = cmd_complete_status(status);
3703 }
3704
le_set_per_adv_data(struct net_buf * buf,struct net_buf ** evt)3705 static void le_set_per_adv_data(struct net_buf *buf, struct net_buf **evt)
3706 {
3707 struct bt_hci_cp_le_set_per_adv_data *cmd = (void *)buf->data;
3708 uint8_t status;
3709 uint8_t handle;
3710
3711 if (adv_cmds_ext_check(evt)) {
3712 return;
3713 }
3714
3715 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3716 if (status) {
3717 *evt = cmd_complete_status(status);
3718 return;
3719 }
3720
3721 status = ll_adv_sync_ad_data_set(handle, cmd->op, cmd->len,
3722 cmd->data);
3723
3724 *evt = cmd_complete_status(status);
3725 }
3726
le_set_per_adv_enable(struct net_buf * buf,struct net_buf ** evt)3727 static void le_set_per_adv_enable(struct net_buf *buf, struct net_buf **evt)
3728 {
3729 struct bt_hci_cp_le_set_per_adv_enable *cmd = (void *)buf->data;
3730 uint8_t status;
3731 uint8_t handle;
3732
3733 if (adv_cmds_ext_check(evt)) {
3734 return;
3735 }
3736
3737 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3738 if (status) {
3739 *evt = cmd_complete_status(status);
3740 return;
3741 }
3742
3743 status = ll_adv_sync_enable(handle, cmd->enable);
3744
3745 *evt = cmd_complete_status(status);
3746 }
3747 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
3748 #endif /* CONFIG_BT_BROADCASTER */
3749
3750 #if defined(CONFIG_BT_OBSERVER)
le_set_ext_scan_param(struct net_buf * buf,struct net_buf ** evt)3751 static void le_set_ext_scan_param(struct net_buf *buf, struct net_buf **evt)
3752 {
3753 struct bt_hci_cp_le_set_ext_scan_param *cmd = (void *)buf->data;
3754 struct bt_hci_ext_scan_phy *p;
3755 uint8_t own_addr_type;
3756 uint8_t filter_policy;
3757 uint8_t phys_bitmask;
3758 uint8_t status;
3759 uint8_t phys;
3760
3761 if (adv_cmds_ext_check(evt)) {
3762 return;
3763 }
3764
3765 /* Number of bits set indicate scan sets to be configured by calling
3766 * ll_scan_params_set function.
3767 */
3768 phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
3769 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
3770 phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
3771 }
3772
3773 phys = cmd->phys;
3774 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
3775 (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
3776 *evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3777
3778 return;
3779 }
3780
3781 own_addr_type = cmd->own_addr_type;
3782 filter_policy = cmd->filter_policy;
3783 p = cmd->p;
3784
3785 /* Irrespective of enabled PHYs to scan for, ll_scan_params_set needs
3786 * to be called to initialise the scan sets.
3787 * Passing interval and window as 0, disable the particular scan set
3788 * from being enabled.
3789 */
3790 do {
3791 uint16_t interval;
3792 uint16_t window;
3793 uint8_t type;
3794 uint8_t phy;
3795
3796 /* Get single PHY bit from the loop bitmask */
3797 phy = BIT(find_lsb_set(phys_bitmask) - 1);
3798
3799 /* Pass the PHY (1M or Coded) of scan set in MSbits of type
3800 * parameter
3801 */
3802 type = (phy << 1);
3803
3804 /* If current PHY is one of the PHY in the Scanning_PHYs,
3805 * pick the supplied scan type, interval and window.
3806 */
3807 if (phys & phy) {
3808 type |= (p->type & 0x01);
3809 interval = sys_le16_to_cpu(p->interval);
3810 window = sys_le16_to_cpu(p->window);
3811 p++;
3812 } else {
3813 interval = 0U;
3814 window = 0U;
3815 }
3816
3817 status = ll_scan_params_set(type, interval, window,
3818 own_addr_type, filter_policy);
3819 if (status) {
3820 break;
3821 }
3822
3823 phys_bitmask &= (phys_bitmask - 1);
3824 } while (phys_bitmask);
3825
3826 *evt = cmd_complete_status(status);
3827 }
3828
le_set_ext_scan_enable(struct net_buf * buf,struct net_buf ** evt)3829 static void le_set_ext_scan_enable(struct net_buf *buf, struct net_buf **evt)
3830 {
3831 struct bt_hci_cp_le_set_ext_scan_enable *cmd = (void *)buf->data;
3832 uint8_t status;
3833
3834 if (adv_cmds_ext_check(evt)) {
3835 return;
3836 }
3837
3838 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3839 /* Initialize duplicate filtering */
3840 if (cmd->enable && cmd->filter_dup) {
3841 if (0) {
3842
3843 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3844 } else if (dup_count == DUP_FILTER_DISABLED) {
3845 dup_scan = true;
3846
3847 /* All entries reset */
3848 dup_count = 0;
3849 dup_curr = 0U;
3850 } else if (!dup_scan) {
3851 dup_scan = true;
3852 dup_ext_adv_reset();
3853 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3854
3855 } else {
3856 /* All entries reset */
3857 dup_count = 0;
3858 dup_curr = 0U;
3859 }
3860 } else {
3861 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3862 dup_scan = false;
3863 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3864 dup_count = DUP_FILTER_DISABLED;
3865 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3866 }
3867 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
3868
3869 status = ll_scan_enable(cmd->enable, sys_le16_to_cpu(cmd->duration),
3870 sys_le16_to_cpu(cmd->period));
3871
3872 /* NOTE: As filter duplicates is implemented here in HCI source code,
3873 * enabling of already enabled scanning shall succeed after
3874 * updates to filter duplicates is handled in the above
3875 * statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
3876 */
3877 if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
3878 (status == BT_HCI_ERR_CMD_DISALLOWED)) {
3879 status = BT_HCI_ERR_SUCCESS;
3880 }
3881
3882 *evt = cmd_complete_status(status);
3883 }
3884
3885 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
le_per_adv_create_sync(struct net_buf * buf,struct net_buf ** evt)3886 static void le_per_adv_create_sync(struct net_buf *buf, struct net_buf **evt)
3887 {
3888 struct bt_hci_cp_le_per_adv_create_sync *cmd = (void *)buf->data;
3889 uint16_t sync_timeout;
3890 uint8_t status;
3891 uint16_t skip;
3892
3893 if (adv_cmds_ext_check(NULL)) {
3894 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
3895 return;
3896 }
3897
3898 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
3899 (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST)) {
3900 *evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3901 return;
3902 }
3903
3904 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
3905 (cmd->options &
3906 (BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED |
3907 BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE)) ==
3908 BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3909 *evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3910 return;
3911 }
3912
3913 /* FIXME: Check for HCI LE Set Periodic Advertising Receive Enable
3914 * command support and if reporting is initially disabled then
3915 * return error code Connection Failed to be Established /
3916 * Synchronization Timeout (0x3E).
3917 */
3918
3919 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3920 /* Initialize duplicate filtering */
3921 if (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3922 if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
3923 dup_count = 0;
3924 dup_curr = 0U;
3925 } else {
3926 /* NOTE: Invalidate dup_ext_adv_mode array entries is
3927 * done when sync is established.
3928 */
3929 }
3930 } else if (!dup_scan) {
3931 dup_count = DUP_FILTER_DISABLED;
3932 }
3933 #endif
3934
3935 skip = sys_le16_to_cpu(cmd->skip);
3936 sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
3937
3938 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
3939 if ((cmd->cte_type & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_INVALID_VALUE) != 0) {
3940 status = BT_HCI_ERR_CMD_DISALLOWED;
3941 #else
3942 if (cmd->cte_type != BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_FILTERING) {
3943 status = BT_HCI_ERR_INVALID_PARAM;
3944 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
3945 } else {
3946 status = ll_sync_create(cmd->options, cmd->sid, cmd->addr.type, cmd->addr.a.val,
3947 skip, sync_timeout, cmd->cte_type);
3948 }
3949 *evt = cmd_status(status);
3950 }
3951
3952 static void le_per_adv_create_sync_cancel(struct net_buf *buf,
3953 struct net_buf **evt, void **node_rx)
3954 {
3955 struct bt_hci_evt_cc_status *ccst;
3956 uint8_t status;
3957
3958 if (adv_cmds_ext_check(evt)) {
3959 return;
3960 }
3961
3962 status = ll_sync_create_cancel(node_rx);
3963
3964 ccst = hci_cmd_complete(evt, sizeof(*ccst));
3965 ccst->status = status;
3966 }
3967
3968 static void le_per_adv_terminate_sync(struct net_buf *buf, struct net_buf **evt)
3969 {
3970 struct bt_hci_cp_le_per_adv_terminate_sync *cmd = (void *)buf->data;
3971 struct bt_hci_evt_cc_status *ccst;
3972 uint16_t handle;
3973 uint8_t status;
3974
3975 if (adv_cmds_ext_check(evt)) {
3976 return;
3977 }
3978
3979 handle = sys_le16_to_cpu(cmd->handle);
3980
3981 status = ll_sync_terminate(handle);
3982
3983 ccst = hci_cmd_complete(evt, sizeof(*ccst));
3984 ccst->status = status;
3985 }
3986
3987 static void le_per_adv_recv_enable(struct net_buf *buf, struct net_buf **evt)
3988 {
3989 struct bt_hci_cp_le_set_per_adv_recv_enable *cmd = (void *)buf->data;
3990 struct bt_hci_evt_cc_status *ccst;
3991 uint16_t handle;
3992 uint8_t status;
3993
3994 if (adv_cmds_ext_check(evt)) {
3995 return;
3996 }
3997
3998 handle = sys_le16_to_cpu(cmd->handle);
3999
4000 status = ll_sync_recv_enable(handle, cmd->enable);
4001
4002 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
4003 if (!status) {
4004 if (cmd->enable &
4005 BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) {
4006 if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
4007 dup_count = 0;
4008 dup_curr = 0U;
4009 } else {
4010 /* NOTE: Invalidate dup_ext_adv_mode array
4011 * entries is done when sync is
4012 * established.
4013 */
4014 }
4015 } else if (!dup_scan) {
4016 dup_count = DUP_FILTER_DISABLED;
4017 }
4018 }
4019 #endif
4020
4021 ccst = hci_cmd_complete(evt, sizeof(*ccst));
4022 ccst->status = status;
4023 }
4024
4025 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4026 static void le_add_dev_to_pal(struct net_buf *buf, struct net_buf **evt)
4027 {
4028 struct bt_hci_cp_le_add_dev_to_per_adv_list *cmd = (void *)buf->data;
4029 uint8_t status;
4030
4031 if (adv_cmds_ext_check(evt)) {
4032 return;
4033 }
4034
4035 status = ll_pal_add(&cmd->addr, cmd->sid);
4036
4037 *evt = cmd_complete_status(status);
4038 }
4039
4040 static void le_rem_dev_from_pal(struct net_buf *buf, struct net_buf **evt)
4041 {
4042 struct bt_hci_cp_le_rem_dev_from_per_adv_list *cmd = (void *)buf->data;
4043 uint8_t status;
4044
4045 if (adv_cmds_ext_check(evt)) {
4046 return;
4047 }
4048
4049 status = ll_pal_remove(&cmd->addr, cmd->sid);
4050
4051 *evt = cmd_complete_status(status);
4052 }
4053
4054 static void le_clear_pal(struct net_buf *buf, struct net_buf **evt)
4055 {
4056 uint8_t status;
4057
4058 if (adv_cmds_ext_check(evt)) {
4059 return;
4060 }
4061
4062 status = ll_pal_clear();
4063
4064 *evt = cmd_complete_status(status);
4065 }
4066
4067 static void le_read_pal_size(struct net_buf *buf, struct net_buf **evt)
4068 {
4069 struct bt_hci_rp_le_read_per_adv_list_size *rp;
4070
4071 if (adv_cmds_ext_check(evt)) {
4072 return;
4073 }
4074
4075 rp = hci_cmd_complete(evt, sizeof(*rp));
4076 rp->status = 0x00;
4077
4078 rp->list_size = ll_pal_size_get();
4079 }
4080 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4081 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4082 #endif /* CONFIG_BT_OBSERVER */
4083
4084 #if defined(CONFIG_BT_CENTRAL)
4085 static void le_ext_create_connection(struct net_buf *buf, struct net_buf **evt)
4086 {
4087 struct bt_hci_cp_le_ext_create_conn *cmd = (void *)buf->data;
4088 struct bt_hci_ext_conn_phy *p;
4089 uint8_t peer_addr_type;
4090 uint8_t own_addr_type;
4091 uint8_t filter_policy;
4092 uint8_t phys_bitmask;
4093 uint8_t *peer_addr;
4094 uint8_t status;
4095 uint8_t phys;
4096
4097 if (adv_cmds_ext_check(NULL)) {
4098 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
4099 return;
4100 }
4101
4102 /* Number of bits set indicate scan sets to be configured by calling
4103 * ll_create_connection function.
4104 */
4105 phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
4106 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
4107 phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
4108 }
4109
4110 phys = cmd->phys;
4111
4112 /* Ignore Scan Interval and Scan Window, and ignore scanning if
4113 * Initiating PHY is set for LE 2M PHY
4114 * Refer to Bluetooth Core Specification Version 5.4 Vol 4, Part E
4115 * 7.8.66 LE Extended Create Connection command
4116 */
4117 phys &= ~BT_HCI_LE_EXT_SCAN_PHY_2M;
4118
4119 /* Check if unsupported PHY requested for scanning */
4120 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
4121 (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
4122 *evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
4123
4124 return;
4125 }
4126
4127 filter_policy = cmd->filter_policy;
4128 own_addr_type = cmd->own_addr_type;
4129 peer_addr_type = cmd->peer_addr.type;
4130 peer_addr = cmd->peer_addr.a.val;
4131 p = cmd->p;
4132
4133 do {
4134 uint16_t supervision_timeout;
4135 uint16_t conn_interval_max;
4136 uint16_t scan_interval;
4137 uint16_t conn_latency;
4138 uint16_t scan_window;
4139 uint8_t phy;
4140
4141 phy = BIT(find_lsb_set(phys_bitmask) - 1);
4142
4143 if (phys & phy) {
4144 scan_interval = sys_le16_to_cpu(p->scan_interval);
4145 scan_window = sys_le16_to_cpu(p->scan_window);
4146 conn_interval_max =
4147 sys_le16_to_cpu(p->conn_interval_max);
4148 conn_latency = sys_le16_to_cpu(p->conn_latency);
4149 supervision_timeout =
4150 sys_le16_to_cpu(p->supervision_timeout);
4151
4152 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
4153 status = check_cconn_params(true, scan_interval,
4154 scan_window,
4155 conn_interval_max,
4156 conn_latency,
4157 supervision_timeout);
4158 if (status) {
4159 *evt = cmd_status(status);
4160 return;
4161 }
4162 }
4163
4164 status = ll_create_connection(scan_interval,
4165 scan_window,
4166 filter_policy,
4167 peer_addr_type,
4168 peer_addr,
4169 own_addr_type,
4170 conn_interval_max,
4171 conn_latency,
4172 supervision_timeout,
4173 phy);
4174 p++;
4175 } else {
4176 uint8_t type;
4177
4178 type = (phy << 1);
4179 /* NOTE: Pass invalid interval value to reset the PHY
4180 * value in the scan instance so not to start
4181 * scanning on the unselected PHY.
4182 */
4183 status = ll_scan_params_set(type, 0, 0, 0, 0);
4184 }
4185
4186 if (status) {
4187 *evt = cmd_status(status);
4188 return;
4189 }
4190
4191 phys_bitmask &= (phys_bitmask - 1);
4192 } while (phys_bitmask);
4193
4194 status = ll_connect_enable(phys & BT_HCI_LE_EXT_SCAN_PHY_CODED);
4195
4196 *evt = cmd_status(status);
4197 }
4198 #endif /* CONFIG_BT_CENTRAL */
4199 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4200
4201 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4202 static void le_cis_request(struct pdu_data *pdu_data,
4203 struct node_rx_pdu *node_rx,
4204 struct net_buf *buf)
4205 {
4206 struct bt_hci_evt_le_cis_req *sep;
4207 struct node_rx_conn_iso_req *req;
4208 void *node;
4209
4210 /* Check for pdu field being aligned before accessing CIS established
4211 * event.
4212 */
4213 node = pdu_data;
4214 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4215
4216 req = node;
4217 if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS)) ||
4218 !(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4219 !(le_event_mask & BT_EVT_MASK_LE_CIS_REQ)) {
4220 ll_cis_reject(req->cis_handle, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE);
4221 return;
4222 }
4223
4224 sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_REQ, sizeof(*sep));
4225 sep->acl_handle = sys_cpu_to_le16(node_rx->hdr.handle);
4226 sep->cis_handle = sys_cpu_to_le16(req->cis_handle);
4227 sep->cig_id = req->cig_id;
4228 sep->cis_id = req->cis_id;
4229 }
4230 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4231
4232 #if defined(CONFIG_BT_CTLR_CONN_ISO)
4233 static void le_cis_established(struct pdu_data *pdu_data,
4234 struct node_rx_pdu *node_rx,
4235 struct net_buf *buf)
4236 {
4237 struct lll_conn_iso_stream_rxtx *lll_cis_c;
4238 struct lll_conn_iso_stream_rxtx *lll_cis_p;
4239 struct bt_hci_evt_le_cis_established *sep;
4240 struct lll_conn_iso_stream *lll_cis;
4241 struct node_rx_conn_iso_estab *est;
4242 struct ll_conn_iso_stream *cis;
4243 struct ll_conn_iso_group *cig;
4244 bool is_central;
4245 void *node;
4246
4247 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4248 !(le_event_mask & BT_EVT_MASK_LE_CIS_ESTABLISHED)) {
4249 return;
4250 }
4251
4252 cis = node_rx->rx_ftr.param;
4253 cig = cis->group;
4254
4255 sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_ESTABLISHED, sizeof(*sep));
4256
4257 /* Check for pdu field being aligned before accessing CIS established
4258 * event.
4259 */
4260 node = pdu_data;
4261 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4262
4263 est = node;
4264 sep->status = est->status;
4265 sep->conn_handle = sys_cpu_to_le16(est->cis_handle);
4266
4267 if (!cig) {
4268 /* CIS was not established and instance was released */
4269 return;
4270 }
4271
4272 lll_cis = &cis->lll;
4273 is_central = cig->lll.role == BT_CONN_ROLE_CENTRAL;
4274 lll_cis_c = is_central ? &lll_cis->tx : &lll_cis->rx;
4275 lll_cis_p = is_central ? &lll_cis->rx : &lll_cis->tx;
4276
4277 sys_put_le24(cig->sync_delay, sep->cig_sync_delay);
4278 sys_put_le24(cis->sync_delay, sep->cis_sync_delay);
4279 sys_put_le24(cig->c_latency, sep->c_latency);
4280 sys_put_le24(cig->p_latency, sep->p_latency);
4281 sep->c_phy = find_lsb_set(lll_cis_c->phy);
4282 sep->p_phy = find_lsb_set(lll_cis_p->phy);
4283 sep->nse = lll_cis->nse;
4284 sep->c_bn = lll_cis_c->bn;
4285 sep->p_bn = lll_cis_p->bn;
4286 sep->c_ft = lll_cis_c->ft;
4287 sep->p_ft = lll_cis_p->ft;
4288 sep->c_max_pdu = sys_cpu_to_le16(lll_cis_c->max_pdu);
4289 sep->p_max_pdu = sys_cpu_to_le16(lll_cis_p->max_pdu);
4290 sep->interval = sys_cpu_to_le16(cig->iso_interval);
4291
4292 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4293 if (is_central) {
4294 cis_pending_count--;
4295 }
4296 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4297 }
4298 #endif /* CONFIG_BT_CTLR_CONN_ISO */
4299
4300 static int controller_cmd_handle(uint16_t ocf, struct net_buf *cmd,
4301 struct net_buf **evt, void **node_rx)
4302 {
4303 switch (ocf) {
4304 case BT_OCF(BT_HCI_OP_LE_SET_EVENT_MASK):
4305 le_set_event_mask(cmd, evt);
4306 break;
4307
4308 case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE):
4309 le_read_buffer_size(cmd, evt);
4310 break;
4311
4312 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4313 case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2):
4314 le_read_buffer_size_v2(cmd, evt);
4315 break;
4316 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4317
4318 case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_FEATURES):
4319 le_read_local_features(cmd, evt);
4320 break;
4321
4322 case BT_OCF(BT_HCI_OP_LE_SET_RANDOM_ADDRESS):
4323 le_set_random_address(cmd, evt);
4324 break;
4325
4326 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
4327 case BT_OCF(BT_HCI_OP_LE_READ_FAL_SIZE):
4328 le_read_fal_size(cmd, evt);
4329 break;
4330
4331 case BT_OCF(BT_HCI_OP_LE_CLEAR_FAL):
4332 le_clear_fal(cmd, evt);
4333 break;
4334
4335 case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_FAL):
4336 le_add_dev_to_fal(cmd, evt);
4337 break;
4338
4339 case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_FAL):
4340 le_rem_dev_from_fal(cmd, evt);
4341 break;
4342 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
4343
4344 #if defined(CONFIG_BT_CTLR_CRYPTO)
4345 case BT_OCF(BT_HCI_OP_LE_ENCRYPT):
4346 le_encrypt(cmd, evt);
4347 break;
4348 #endif /* CONFIG_BT_CTLR_CRYPTO */
4349
4350 case BT_OCF(BT_HCI_OP_LE_RAND):
4351 le_rand(cmd, evt);
4352 break;
4353
4354 case BT_OCF(BT_HCI_OP_LE_READ_SUPP_STATES):
4355 le_read_supp_states(cmd, evt);
4356 break;
4357
4358 #if defined(CONFIG_BT_BROADCASTER)
4359 case BT_OCF(BT_HCI_OP_LE_SET_ADV_PARAM):
4360 le_set_adv_param(cmd, evt);
4361 break;
4362
4363 case BT_OCF(BT_HCI_OP_LE_READ_ADV_CHAN_TX_POWER):
4364 le_read_adv_chan_tx_power(cmd, evt);
4365 break;
4366
4367 case BT_OCF(BT_HCI_OP_LE_SET_ADV_DATA):
4368 le_set_adv_data(cmd, evt);
4369 break;
4370
4371 case BT_OCF(BT_HCI_OP_LE_SET_SCAN_RSP_DATA):
4372 le_set_scan_rsp_data(cmd, evt);
4373 break;
4374
4375 case BT_OCF(BT_HCI_OP_LE_SET_ADV_ENABLE):
4376 le_set_adv_enable(cmd, evt);
4377 break;
4378
4379 #if defined(CONFIG_BT_CTLR_ADV_ISO)
4380 case BT_OCF(BT_HCI_OP_LE_CREATE_BIG):
4381 le_create_big(cmd, evt);
4382 break;
4383
4384 case BT_OCF(BT_HCI_OP_LE_CREATE_BIG_TEST):
4385 le_create_big_test(cmd, evt);
4386 break;
4387
4388 case BT_OCF(BT_HCI_OP_LE_TERMINATE_BIG):
4389 le_terminate_big(cmd, evt);
4390 break;
4391 #endif /* CONFIG_BT_CTLR_ADV_ISO */
4392 #endif /* CONFIG_BT_BROADCASTER */
4393
4394 #if defined(CONFIG_BT_OBSERVER)
4395 case BT_OCF(BT_HCI_OP_LE_SET_SCAN_PARAM):
4396 le_set_scan_param(cmd, evt);
4397 break;
4398
4399 case BT_OCF(BT_HCI_OP_LE_SET_SCAN_ENABLE):
4400 le_set_scan_enable(cmd, evt);
4401 break;
4402
4403 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
4404 case BT_OCF(BT_HCI_OP_LE_BIG_CREATE_SYNC):
4405 le_big_create_sync(cmd, evt);
4406 break;
4407
4408 case BT_OCF(BT_HCI_OP_LE_BIG_TERMINATE_SYNC):
4409 le_big_terminate_sync(cmd, evt, node_rx);
4410 break;
4411 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
4412 #endif /* CONFIG_BT_OBSERVER */
4413
4414 #if defined(CONFIG_BT_CENTRAL)
4415 case BT_OCF(BT_HCI_OP_LE_CREATE_CONN):
4416 le_create_connection(cmd, evt);
4417 break;
4418
4419 case BT_OCF(BT_HCI_OP_LE_CREATE_CONN_CANCEL):
4420 le_create_conn_cancel(cmd, evt, node_rx);
4421 break;
4422
4423 case BT_OCF(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF):
4424 le_set_host_chan_classif(cmd, evt);
4425 break;
4426
4427 #if defined(CONFIG_BT_CTLR_LE_ENC)
4428 case BT_OCF(BT_HCI_OP_LE_START_ENCRYPTION):
4429 le_start_encryption(cmd, evt);
4430 break;
4431 #endif /* CONFIG_BT_CTLR_LE_ENC */
4432
4433 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4434 case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS):
4435 le_set_cig_parameters(cmd, evt);
4436 break;
4437 case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS_TEST):
4438 le_set_cig_params_test(cmd, evt);
4439 break;
4440 case BT_OCF(BT_HCI_OP_LE_CREATE_CIS):
4441 le_create_cis(cmd, evt);
4442 break;
4443 case BT_OCF(BT_HCI_OP_LE_REMOVE_CIG):
4444 le_remove_cig(cmd, evt);
4445 break;
4446 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4447 #endif /* CONFIG_BT_CENTRAL */
4448
4449 #if defined(CONFIG_BT_PERIPHERAL)
4450 #if defined(CONFIG_BT_CTLR_LE_ENC)
4451 case BT_OCF(BT_HCI_OP_LE_LTK_REQ_REPLY):
4452 le_ltk_req_reply(cmd, evt);
4453 break;
4454
4455 case BT_OCF(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY):
4456 le_ltk_req_neg_reply(cmd, evt);
4457 break;
4458 #endif /* CONFIG_BT_CTLR_LE_ENC */
4459
4460 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4461 case BT_OCF(BT_HCI_OP_LE_ACCEPT_CIS):
4462 le_accept_cis(cmd, evt);
4463 break;
4464 case BT_OCF(BT_HCI_OP_LE_REJECT_CIS):
4465 le_reject_cis(cmd, evt);
4466 break;
4467 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4468 #endif /* CONFIG_BT_PERIPHERAL */
4469
4470 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
4471 case BT_OCF(BT_HCI_OP_LE_REQ_PEER_SC):
4472 le_req_peer_sca(cmd, evt);
4473 break;
4474 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
4475
4476 #if defined(CONFIG_BT_CTLR_ISO)
4477 case BT_OCF(BT_HCI_OP_LE_SETUP_ISO_PATH):
4478 le_setup_iso_path(cmd, evt);
4479 break;
4480 case BT_OCF(BT_HCI_OP_LE_REMOVE_ISO_PATH):
4481 le_remove_iso_path(cmd, evt);
4482 break;
4483 case BT_OCF(BT_HCI_OP_LE_ISO_TEST_END):
4484 le_iso_test_end(cmd, evt);
4485 break;
4486 #endif /* CONFIG_BT_CTLR_ISO */
4487
4488 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4489 case BT_OCF(BT_HCI_OP_LE_ISO_TRANSMIT_TEST):
4490 le_iso_transmit_test(cmd, evt);
4491 break;
4492 case BT_OCF(BT_HCI_OP_LE_READ_ISO_TX_SYNC):
4493 le_read_iso_tx_sync(cmd, evt);
4494 break;
4495 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4496
4497 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4498 case BT_OCF(BT_HCI_OP_LE_ISO_RECEIVE_TEST):
4499 le_iso_receive_test(cmd, evt);
4500 break;
4501 case BT_OCF(BT_HCI_OP_LE_ISO_READ_TEST_COUNTERS):
4502 le_iso_read_test_counters(cmd, evt);
4503 break;
4504 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
4505 case BT_OCF(BT_HCI_OP_LE_READ_ISO_LINK_QUALITY):
4506 le_read_iso_link_quality(cmd, evt);
4507 break;
4508 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
4509 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
4510
4511 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
4512 case BT_OCF(BT_HCI_OP_LE_SET_HOST_FEATURE):
4513 le_set_host_feature(cmd, evt);
4514 break;
4515 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
4516
4517 #if defined(CONFIG_BT_CONN)
4518 case BT_OCF(BT_HCI_OP_LE_READ_CHAN_MAP):
4519 le_read_chan_map(cmd, evt);
4520 break;
4521
4522 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
4523 case BT_OCF(BT_HCI_OP_LE_READ_REMOTE_FEATURES):
4524 le_read_remote_features(cmd, evt);
4525 break;
4526 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
4527
4528 case BT_OCF(BT_HCI_OP_LE_CONN_UPDATE):
4529 le_conn_update(cmd, evt);
4530 break;
4531
4532 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
4533 case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY):
4534 le_conn_param_req_reply(cmd, evt);
4535 break;
4536
4537 case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY):
4538 le_conn_param_req_neg_reply(cmd, evt);
4539 break;
4540 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
4541
4542 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4543 case BT_OCF(BT_HCI_OP_LE_SET_DATA_LEN):
4544 le_set_data_len(cmd, evt);
4545 break;
4546
4547 case BT_OCF(BT_HCI_OP_LE_READ_DEFAULT_DATA_LEN):
4548 le_read_default_data_len(cmd, evt);
4549 break;
4550
4551 case BT_OCF(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN):
4552 le_write_default_data_len(cmd, evt);
4553 break;
4554
4555 case BT_OCF(BT_HCI_OP_LE_READ_MAX_DATA_LEN):
4556 le_read_max_data_len(cmd, evt);
4557 break;
4558 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4559
4560 #if defined(CONFIG_BT_CTLR_PHY)
4561 case BT_OCF(BT_HCI_OP_LE_READ_PHY):
4562 le_read_phy(cmd, evt);
4563 break;
4564
4565 case BT_OCF(BT_HCI_OP_LE_SET_DEFAULT_PHY):
4566 le_set_default_phy(cmd, evt);
4567 break;
4568
4569 case BT_OCF(BT_HCI_OP_LE_SET_PHY):
4570 le_set_phy(cmd, evt);
4571 break;
4572 #endif /* CONFIG_BT_CTLR_PHY */
4573 #endif /* CONFIG_BT_CONN */
4574
4575 #if defined(CONFIG_BT_CTLR_ADV_EXT)
4576 #if defined(CONFIG_BT_BROADCASTER)
4577 case BT_OCF(BT_HCI_OP_LE_SET_ADV_SET_RANDOM_ADDR):
4578 le_set_adv_set_random_addr(cmd, evt);
4579 break;
4580
4581 case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_PARAM):
4582 le_set_ext_adv_param(cmd, evt);
4583 break;
4584
4585 case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_DATA):
4586 le_set_ext_adv_data(cmd, evt);
4587 break;
4588
4589 case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_RSP_DATA):
4590 le_set_ext_scan_rsp_data(cmd, evt);
4591 break;
4592
4593 case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_ENABLE):
4594 le_set_ext_adv_enable(cmd, evt);
4595 break;
4596
4597 case BT_OCF(BT_HCI_OP_LE_READ_MAX_ADV_DATA_LEN):
4598 le_read_max_adv_data_len(cmd, evt);
4599 break;
4600
4601 case BT_OCF(BT_HCI_OP_LE_READ_NUM_ADV_SETS):
4602 le_read_num_adv_sets(cmd, evt);
4603 break;
4604
4605 case BT_OCF(BT_HCI_OP_LE_REMOVE_ADV_SET):
4606 le_remove_adv_set(cmd, evt);
4607 break;
4608
4609 case BT_OCF(BT_HCI_OP_CLEAR_ADV_SETS):
4610 le_clear_adv_sets(cmd, evt);
4611 break;
4612
4613 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
4614 case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_PARAM):
4615 le_set_per_adv_param(cmd, evt);
4616 break;
4617
4618 case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_DATA):
4619 le_set_per_adv_data(cmd, evt);
4620 break;
4621
4622 case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_ENABLE):
4623 le_set_per_adv_enable(cmd, evt);
4624 break;
4625 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
4626 #endif /* CONFIG_BT_BROADCASTER */
4627
4628 #if defined(CONFIG_BT_OBSERVER)
4629 case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_PARAM):
4630 le_set_ext_scan_param(cmd, evt);
4631 break;
4632
4633 case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_ENABLE):
4634 le_set_ext_scan_enable(cmd, evt);
4635 break;
4636
4637 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
4638 case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC):
4639 le_per_adv_create_sync(cmd, evt);
4640 break;
4641
4642 case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC_CANCEL):
4643 le_per_adv_create_sync_cancel(cmd, evt, node_rx);
4644 break;
4645
4646 case BT_OCF(BT_HCI_OP_LE_PER_ADV_TERMINATE_SYNC):
4647 le_per_adv_terminate_sync(cmd, evt);
4648 break;
4649
4650 case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_RECV_ENABLE):
4651 le_per_adv_recv_enable(cmd, evt);
4652 break;
4653
4654 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4655 case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_PER_ADV_LIST):
4656 le_add_dev_to_pal(cmd, evt);
4657 break;
4658
4659 case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_PER_ADV_LIST):
4660 le_rem_dev_from_pal(cmd, evt);
4661 break;
4662
4663 case BT_OCF(BT_HCI_OP_LE_CLEAR_PER_ADV_LIST):
4664 le_clear_pal(cmd, evt);
4665 break;
4666
4667 case BT_OCF(BT_HCI_OP_LE_READ_PER_ADV_LIST_SIZE):
4668 le_read_pal_size(cmd, evt);
4669 break;
4670 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4671 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4672 #endif /* CONFIG_BT_OBSERVER */
4673
4674 #if defined(CONFIG_BT_CONN)
4675 #if defined(CONFIG_BT_CENTRAL)
4676 case BT_OCF(BT_HCI_OP_LE_EXT_CREATE_CONN):
4677 le_ext_create_connection(cmd, evt);
4678 break;
4679 #endif /* CONFIG_BT_CENTRAL */
4680 #endif /* CONFIG_BT_CONN */
4681 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4682
4683 #if defined(CONFIG_BT_CTLR_PRIVACY)
4684 case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_RL):
4685 le_add_dev_to_rl(cmd, evt);
4686 break;
4687 case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_RL):
4688 le_rem_dev_from_rl(cmd, evt);
4689 break;
4690 case BT_OCF(BT_HCI_OP_LE_CLEAR_RL):
4691 le_clear_rl(cmd, evt);
4692 break;
4693 case BT_OCF(BT_HCI_OP_LE_READ_RL_SIZE):
4694 le_read_rl_size(cmd, evt);
4695 break;
4696 case BT_OCF(BT_HCI_OP_LE_READ_PEER_RPA):
4697 le_read_peer_rpa(cmd, evt);
4698 break;
4699 case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_RPA):
4700 le_read_local_rpa(cmd, evt);
4701 break;
4702 case BT_OCF(BT_HCI_OP_LE_SET_ADDR_RES_ENABLE):
4703 le_set_addr_res_enable(cmd, evt);
4704 break;
4705 case BT_OCF(BT_HCI_OP_LE_SET_RPA_TIMEOUT):
4706 le_set_rpa_timeout(cmd, evt);
4707 break;
4708 case BT_OCF(BT_HCI_OP_LE_SET_PRIVACY_MODE):
4709 le_set_privacy_mode(cmd, evt);
4710 break;
4711 #endif /* CONFIG_BT_CTLR_PRIVACY */
4712
4713 case BT_OCF(BT_HCI_OP_LE_READ_TX_POWER):
4714 le_read_tx_power(cmd, evt);
4715 break;
4716
4717 #if defined(CONFIG_BT_CTLR_DF)
4718 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
4719 case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_PARAMS):
4720 le_df_set_cl_cte_tx_params(cmd, evt);
4721 break;
4722 case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_ENABLE):
4723 le_df_set_cl_cte_enable(cmd, evt);
4724 break;
4725 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
4726 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
4727 case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_SAMPLING_ENABLE):
4728 le_df_set_cl_iq_sampling_enable(cmd, evt);
4729 break;
4730 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
4731 case BT_OCF(BT_HCI_OP_LE_READ_ANT_INFO):
4732 le_df_read_ant_inf(cmd, evt);
4733 break;
4734 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
4735 case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_TX_PARAMS):
4736 le_df_set_conn_cte_tx_params(cmd, evt);
4737 break;
4738 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
4739 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
4740 case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_RX_PARAMS):
4741 le_df_set_conn_cte_rx_params(cmd, evt);
4742 break;
4743 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
4744 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
4745 case BT_OCF(BT_HCI_OP_LE_CONN_CTE_REQ_ENABLE):
4746 le_df_set_conn_cte_req_enable(cmd, evt);
4747 break;
4748 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
4749 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
4750 case BT_OCF(BT_HCI_OP_LE_CONN_CTE_RSP_ENABLE):
4751 le_df_set_conn_cte_rsp_enable(cmd, evt);
4752 break;
4753 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
4754 #endif /* CONFIG_BT_CTLR_DF */
4755
4756 #if defined(CONFIG_BT_CTLR_DTM_HCI)
4757 case BT_OCF(BT_HCI_OP_LE_RX_TEST):
4758 le_rx_test(cmd, evt);
4759 break;
4760 case BT_OCF(BT_HCI_OP_LE_TX_TEST):
4761 le_tx_test(cmd, evt);
4762 break;
4763 case BT_OCF(BT_HCI_OP_LE_TEST_END):
4764 le_test_end(cmd, evt);
4765 break;
4766 case BT_OCF(BT_HCI_OP_LE_ENH_RX_TEST):
4767 le_enh_rx_test(cmd, evt);
4768 break;
4769 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
4770 case BT_OCF(BT_HCI_OP_LE_RX_TEST_V3):
4771 le_rx_test_v3(cmd, evt);
4772 break;
4773 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
4774 case BT_OCF(BT_HCI_OP_LE_ENH_TX_TEST):
4775 le_enh_tx_test(cmd, evt);
4776 break;
4777 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
4778 case BT_OCF(BT_HCI_OP_LE_TX_TEST_V3):
4779 le_tx_test_v3(cmd, evt);
4780 break;
4781 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
4782 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
4783 case BT_OCF(BT_HCI_OP_LE_TX_TEST_V4):
4784 le_tx_test_v4(cmd, evt);
4785 break;
4786 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
4787 #endif /* CONFIG_BT_CTLR_DTM_HCI */
4788
4789 default:
4790 return -EINVAL;
4791 }
4792
4793 return 0;
4794 }
4795
4796 #if defined(CONFIG_BT_HCI_VS)
4797 static void vs_read_version_info(struct net_buf *buf, struct net_buf **evt)
4798 {
4799 struct bt_hci_rp_vs_read_version_info *rp;
4800
4801 rp = hci_cmd_complete(evt, sizeof(*rp));
4802
4803 rp->status = 0x00;
4804 rp->hw_platform = sys_cpu_to_le16(BT_HCI_VS_HW_PLAT);
4805 rp->hw_variant = sys_cpu_to_le16(BT_HCI_VS_HW_VAR);
4806
4807 rp->fw_variant = 0U;
4808 rp->fw_version = (KERNEL_VERSION_MAJOR & 0xff);
4809 rp->fw_revision = sys_cpu_to_le16(KERNEL_VERSION_MINOR);
4810 rp->fw_build = sys_cpu_to_le32(KERNEL_PATCHLEVEL & 0xffff);
4811 }
4812
4813 static void vs_read_supported_commands(struct net_buf *buf,
4814 struct net_buf **evt)
4815 {
4816 struct bt_hci_rp_vs_read_supported_commands *rp;
4817
4818 rp = hci_cmd_complete(evt, sizeof(*rp));
4819
4820 rp->status = 0x00;
4821 (void)memset(&rp->commands[0], 0, sizeof(rp->commands));
4822
4823 /* Set Version Information, Supported Commands, Supported Features. */
4824 rp->commands[0] |= BIT(0) | BIT(1) | BIT(2);
4825 /* Write BD_ADDR, Read Build Info */
4826 rp->commands[0] |= BIT(5) | BIT(7);
4827 /* Read Static Addresses, Read Key Hierarchy Roots */
4828 rp->commands[1] |= BIT(0) | BIT(1);
4829 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
4830 /* Set Scan Request Reports */
4831 rp->commands[1] |= BIT(4);
4832 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
4833 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
4834 /* Write Tx Power, Read Tx Power */
4835 rp->commands[1] |= BIT(5) | BIT(6);
4836 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
4837 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
4838 /* Read Supported USB Transport Modes */
4839 rp->commands[1] |= BIT(7);
4840 /* Set USB Transport Mode */
4841 rp->commands[2] |= BIT(0);
4842 #endif /* USB_DEVICE_BLUETOOTH_VS_H4 */
4843 }
4844
4845 static void vs_read_supported_features(struct net_buf *buf,
4846 struct net_buf **evt)
4847 {
4848 struct bt_hci_rp_vs_read_supported_features *rp;
4849
4850 rp = hci_cmd_complete(evt, sizeof(*rp));
4851
4852 rp->status = 0x00;
4853 (void)memset(&rp->features[0], 0x00, sizeof(rp->features));
4854 }
4855
4856 uint8_t __weak hci_vendor_read_static_addr(struct bt_hci_vs_static_addr addrs[],
4857 uint8_t size)
4858 {
4859 ARG_UNUSED(addrs);
4860 ARG_UNUSED(size);
4861
4862 return 0;
4863 }
4864
4865 static void vs_write_bd_addr(struct net_buf *buf, struct net_buf **evt)
4866 {
4867 struct bt_hci_cp_vs_write_bd_addr *cmd = (void *)buf->data;
4868
4869 ll_addr_set(0, &cmd->bdaddr.val[0]);
4870
4871 *evt = cmd_complete_status(0x00);
4872 }
4873
4874 static void vs_read_build_info(struct net_buf *buf, struct net_buf **evt)
4875 {
4876 struct bt_hci_rp_vs_read_build_info *rp;
4877
4878 #define HCI_VS_BUILD_INFO "Zephyr OS v" \
4879 KERNEL_VERSION_STRING CONFIG_BT_CTLR_HCI_VS_BUILD_INFO
4880
4881 const char build_info[] = HCI_VS_BUILD_INFO;
4882
4883 #define BUILD_INFO_EVT_LEN (sizeof(struct bt_hci_evt_hdr) + \
4884 sizeof(struct bt_hci_evt_cmd_complete) + \
4885 sizeof(struct bt_hci_rp_vs_read_build_info) + \
4886 sizeof(build_info))
4887
4888 BUILD_ASSERT(CONFIG_BT_BUF_EVT_RX_SIZE >= BUILD_INFO_EVT_LEN);
4889
4890 rp = hci_cmd_complete(evt, sizeof(*rp) + sizeof(build_info));
4891 rp->status = 0x00;
4892 memcpy(rp->info, build_info, sizeof(build_info));
4893 }
4894
4895 void __weak hci_vendor_read_key_hierarchy_roots(uint8_t ir[16], uint8_t er[16])
4896 {
4897 /* Mark IR as invalid */
4898 (void)memset(ir, 0x00, 16);
4899
4900 /* Mark ER as invalid */
4901 (void)memset(er, 0x00, 16);
4902 }
4903
4904 static void vs_read_static_addrs(struct net_buf *buf, struct net_buf **evt)
4905 {
4906 struct bt_hci_rp_vs_read_static_addrs *rp;
4907
4908 rp = hci_cmd_complete(evt, sizeof(*rp) +
4909 sizeof(struct bt_hci_vs_static_addr));
4910 rp->status = 0x00;
4911 rp->num_addrs = hci_vendor_read_static_addr(rp->a, 1);
4912 }
4913
4914 static void vs_read_key_hierarchy_roots(struct net_buf *buf,
4915 struct net_buf **evt)
4916 {
4917 struct bt_hci_rp_vs_read_key_hierarchy_roots *rp;
4918
4919 rp = hci_cmd_complete(evt, sizeof(*rp));
4920 rp->status = 0x00;
4921 hci_vendor_read_key_hierarchy_roots(rp->ir, rp->er);
4922 }
4923
4924 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
4925 static void vs_set_min_used_chans(struct net_buf *buf, struct net_buf **evt)
4926 {
4927 struct bt_hci_cp_vs_set_min_num_used_chans *cmd = (void *)buf->data;
4928 uint16_t handle = sys_le16_to_cpu(cmd->handle);
4929 uint8_t status;
4930
4931 status = ll_set_min_used_chans(handle, cmd->phys, cmd->min_used_chans);
4932
4933 *evt = cmd_complete_status(status);
4934 }
4935 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
4936
4937 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
4938 static void vs_set_scan_req_reports(struct net_buf *buf, struct net_buf **evt)
4939 {
4940 struct bt_hci_cp_vs_set_scan_req_reports *cmd = (void *)buf->data;
4941
4942 if (cmd->enable) {
4943 vs_events_mask |= BT_EVT_MASK_VS_SCAN_REQ_RX;
4944 } else {
4945 vs_events_mask &= ~BT_EVT_MASK_VS_SCAN_REQ_RX;
4946 }
4947 *evt = cmd_complete_status(0x00);
4948 }
4949 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
4950
4951 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
4952 static void vs_write_tx_power_level(struct net_buf *buf, struct net_buf **evt)
4953 {
4954 struct bt_hci_cp_vs_write_tx_power_level *cmd = (void *)buf->data;
4955 struct bt_hci_rp_vs_write_tx_power_level *rp;
4956 uint8_t handle_type;
4957 uint16_t handle;
4958 uint8_t status;
4959
4960 handle_type = cmd->handle_type;
4961 handle = sys_le16_to_cpu(cmd->handle);
4962
4963 rp = hci_cmd_complete(evt, sizeof(*rp));
4964 rp->selected_tx_power = cmd->tx_power_level;
4965
4966 status = ll_tx_pwr_lvl_set(handle_type, handle, &rp->selected_tx_power);
4967
4968 rp->status = status;
4969 rp->handle_type = handle_type;
4970 rp->handle = sys_cpu_to_le16(handle);
4971 }
4972
4973 static void vs_read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
4974 {
4975 struct bt_hci_cp_vs_read_tx_power_level *cmd = (void *)buf->data;
4976 struct bt_hci_rp_vs_read_tx_power_level *rp;
4977 uint8_t handle_type;
4978 uint16_t handle;
4979 uint8_t status;
4980
4981 handle_type = cmd->handle_type;
4982 handle = sys_le16_to_cpu(cmd->handle);
4983
4984 rp = hci_cmd_complete(evt, sizeof(*rp));
4985
4986 status = ll_tx_pwr_lvl_get(handle_type, handle, 0, &rp->tx_power_level);
4987
4988 rp->status = status;
4989 rp->handle_type = handle_type;
4990 rp->handle = sys_cpu_to_le16(handle);
4991 }
4992 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
4993
4994 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
4995 /* A memory pool for vandor specific events for fatal error reporting purposes. */
4996 NET_BUF_POOL_FIXED_DEFINE(vs_err_tx_pool, 1, BT_BUF_EVT_RX_SIZE,
4997 sizeof(struct bt_buf_data), NULL);
4998
4999 /* The alias for convenience of Controller HCI implementation. Controller is build for
5000 * a particular architecture hence the alias will allow to avoid conditional compilation.
5001 * Host may be not aware of hardware architecture the Controller is working on, hence
5002 * all CPU data types for supported architectures should be available during build, hence
5003 * the alias is defined here.
5004 */
5005 #if defined(CONFIG_CPU_CORTEX_M)
5006 typedef struct bt_hci_vs_fata_error_cpu_data_cortex_m bt_hci_vs_fatal_error_cpu_data;
5007
5008 static void vs_err_fatal_cpu_data_fill(bt_hci_vs_fatal_error_cpu_data *cpu_data,
5009 const struct arch_esf *esf)
5010 {
5011 cpu_data->a1 = sys_cpu_to_le32(esf->basic.a1);
5012 cpu_data->a2 = sys_cpu_to_le32(esf->basic.a2);
5013 cpu_data->a3 = sys_cpu_to_le32(esf->basic.a3);
5014 cpu_data->a4 = sys_cpu_to_le32(esf->basic.a4);
5015 cpu_data->ip = sys_cpu_to_le32(esf->basic.ip);
5016 cpu_data->lr = sys_cpu_to_le32(esf->basic.lr);
5017 cpu_data->xpsr = sys_cpu_to_le32(esf->basic.xpsr);
5018 }
5019 #endif /* CONFIG_CPU_CORTEX_M */
5020
5021 static struct net_buf *vs_err_evt_create(uint8_t subevt, uint8_t len)
5022 {
5023 struct net_buf *buf;
5024
5025 buf = net_buf_alloc(&vs_err_tx_pool, K_FOREVER);
5026 if (buf) {
5027 struct bt_hci_evt_le_meta_event *me;
5028 struct bt_hci_evt_hdr *hdr;
5029
5030 net_buf_reserve(buf, BT_BUF_RESERVE);
5031 bt_buf_set_type(buf, BT_BUF_EVT);
5032
5033 hdr = net_buf_add(buf, sizeof(*hdr));
5034 hdr->evt = BT_HCI_EVT_VENDOR;
5035 hdr->len = len + sizeof(*me);
5036
5037 me = net_buf_add(buf, sizeof(*me));
5038 me->subevent = subevt;
5039 }
5040
5041 return buf;
5042 }
5043
5044 struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const struct arch_esf *esf)
5045 {
5046 /* Prepare vendor specific HCI Fatal Error event */
5047 struct bt_hci_vs_fatal_error_stack_frame *sf;
5048 bt_hci_vs_fatal_error_cpu_data *cpu_data;
5049 struct net_buf *buf;
5050
5051 buf = vs_err_evt_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_STACK_FRAME,
5052 sizeof(*sf) + sizeof(*cpu_data));
5053 if (buf != NULL) {
5054 sf = net_buf_add(buf, (sizeof(*sf) + sizeof(*cpu_data)));
5055 sf->reason = sys_cpu_to_le32(reason);
5056 sf->cpu_type = BT_HCI_EVT_VS_ERROR_CPU_TYPE_CORTEX_M;
5057
5058 vs_err_fatal_cpu_data_fill(
5059 (bt_hci_vs_fatal_error_cpu_data *)sf->cpu_data, esf);
5060 } else {
5061 LOG_ERR("Can't create HCI Fatal Error event");
5062 }
5063
5064 return buf;
5065 }
5066
5067 static struct net_buf *hci_vs_err_trace_create(uint8_t data_type,
5068 const char *file_path,
5069 uint32_t line, uint64_t pc)
5070 {
5071 uint32_t file_name_len = 0U, pos = 0U;
5072 struct net_buf *buf = NULL;
5073
5074 if (file_path) {
5075 /* Extract file name from a path */
5076 while (file_path[file_name_len] != '\0') {
5077 if (file_path[file_name_len] == '/') {
5078 pos = file_name_len + 1;
5079 }
5080 file_name_len++;
5081 }
5082 file_path += pos;
5083 file_name_len -= pos;
5084
5085 /* If file name was found in file_path, in other words: file_path is not empty
5086 * string and is not `foo/bar/`.
5087 */
5088 if (file_name_len) {
5089 /* Total data length: len = file name strlen + \0 + sizeof(line number)
5090 * Maximum length of an HCI event data is BT_BUF_EVT_RX_SIZE. If total data
5091 * length exceeds this maximum, truncate file name.
5092 */
5093 uint32_t data_len = 1 + sizeof(line);
5094
5095 /* If a buffer is created for a TRACE data, include sizeof(pc) in total
5096 * length.
5097 */
5098 if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5099 data_len += sizeof(pc);
5100 }
5101
5102 if (data_len + file_name_len > BT_BUF_EVT_RX_SIZE) {
5103 uint32_t overflow_len =
5104 file_name_len + data_len - BT_BUF_EVT_RX_SIZE;
5105
5106 /* Truncate the file name length by number of overflow bytes */
5107 file_name_len -= overflow_len;
5108 }
5109
5110 /* Get total event data length including file name length */
5111 data_len += file_name_len;
5112
5113 /* Prepare vendor specific HCI Fatal Error event */
5114 buf = vs_err_evt_create(data_type, data_len);
5115 if (buf != NULL) {
5116 if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5117 net_buf_add_le64(buf, pc);
5118 }
5119 net_buf_add_mem(buf, file_path, file_name_len);
5120 net_buf_add_u8(buf, STR_NULL_TERMINATOR);
5121 net_buf_add_le32(buf, line);
5122 } else {
5123 LOG_ERR("Can't create HCI Fatal Error event");
5124 }
5125 }
5126 }
5127
5128 return buf;
5129 }
5130
5131 struct net_buf *hci_vs_err_trace(const char *file, uint32_t line, uint64_t pc)
5132 {
5133 return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE, file, line, pc);
5134 }
5135
5136 struct net_buf *hci_vs_err_assert(const char *file, uint32_t line)
5137 {
5138 /* ASSERT data does not contain PC counter, because of that zero constant is used */
5139 return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_CTRL_ASSERT, file, line, 0U);
5140 }
5141 #endif /* CONFIG_BT_HCI_VS_FATAL_ERROR */
5142
5143 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
5144 static void vs_le_df_connectionless_iq_report(struct pdu_data *pdu_rx, struct node_rx_pdu *node_rx,
5145 struct net_buf *buf)
5146 {
5147 struct bt_hci_evt_vs_le_connectionless_iq_report *sep;
5148 struct node_rx_iq_report *iq_report;
5149 struct lll_sync *lll;
5150 uint8_t samples_cnt;
5151 int16_t rssi;
5152 uint16_t sync_handle;
5153 uint16_t per_evt_counter;
5154 struct ll_sync_set *sync = NULL;
5155
5156 iq_report = (struct node_rx_iq_report *)node_rx;
5157
5158 if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTIONLESS_IQ_REPORT)) {
5159 return;
5160 }
5161
5162 lll = iq_report->rx.rx_ftr.param;
5163
5164 sync = HDR_LLL2ULL(lll);
5165
5166 /* TX LL thread has higher priority than RX thread. It may happen that
5167 * host successfully disables CTE sampling in the meantime.
5168 * It should be verified here, to avoid reporting IQ samples after
5169 * the functionality was disabled or if sync was lost.
5170 */
5171 if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) || !sync->timeout_reload) {
5172 /* Drop further processing of the event. */
5173 return;
5174 }
5175
5176 /* Get the sync handle corresponding to the LLL context passed in the
5177 * node rx footer field.
5178 */
5179 sync_handle = ull_sync_handle_get(sync);
5180 per_evt_counter = iq_report->event_counter;
5181
5182 /* If packet status does not indicate insufficient resources for IQ samples and for
5183 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5184 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE
5185 * value.
5186 */
5187 if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5188 samples_cnt = 0U;
5189 } else {
5190 samples_cnt = MAX(1, iq_report->sample_count);
5191 }
5192
5193 sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
5194 (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5195
5196 rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
5197
5198 sep->sync_handle = sys_cpu_to_le16(sync_handle);
5199 sep->rssi = sys_cpu_to_le16(rssi);
5200 sep->rssi_ant_id = iq_report->rssi_ant_id;
5201 sep->cte_type = iq_report->cte_info.type;
5202
5203 sep->chan_idx = iq_report->chan_idx;
5204 sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
5205
5206 if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5207 sep->slot_durations = iq_report->local_slot_durations;
5208 } else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5209 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5210 } else {
5211 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5212 }
5213
5214 sep->packet_status = iq_report->packet_status;
5215
5216 if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5217 if (iq_report->sample_count == 0U) {
5218 sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5219 sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5220 } else {
5221 for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5222 sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5223 sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5224 }
5225 }
5226 }
5227
5228 sep->sample_count = samples_cnt;
5229 }
5230 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
5231
5232 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
5233 static void vs_le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
5234 {
5235 struct bt_hci_evt_vs_le_connection_iq_report *sep;
5236 struct node_rx_iq_report *iq_report;
5237 struct lll_conn *lll;
5238 uint8_t samples_cnt;
5239 uint8_t phy_rx;
5240 int16_t rssi;
5241
5242 iq_report = (struct node_rx_iq_report *)node_rx;
5243
5244 if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTION_IQ_REPORT)) {
5245 return;
5246 }
5247
5248 lll = iq_report->rx.rx_ftr.param;
5249
5250 #if defined(CONFIG_BT_CTLR_PHY)
5251 phy_rx = lll->phy_rx;
5252
5253 /* Make sure the report is generated for connection on PHY UNCODED */
5254 LL_ASSERT(phy_rx != PHY_CODED);
5255 #else
5256 phy_rx = PHY_1M;
5257 #endif /* CONFIG_BT_CTLR_PHY */
5258
5259 /* TX LL thread has higher priority than RX thread. It may happen that host succefully
5260 * disables CTE sampling in the meantime. It should be verified here, to avoid reporting
5261 * IQ samples after the functionality was disabled.
5262 */
5263 if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
5264 /* Dropp further processing of the event. */
5265 return;
5266 }
5267
5268 /* If packet status does not indicate insufficient resources for IQ samples and for
5269 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5270 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE value.
5271 */
5272 if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5273 samples_cnt = 0U;
5274 } else {
5275 samples_cnt = MAX(1, iq_report->sample_count);
5276 }
5277
5278 sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT,
5279 (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5280
5281 rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
5282
5283 sep->conn_handle = sys_cpu_to_le16(iq_report->rx.hdr.handle);
5284 sep->rx_phy = phy_rx;
5285 sep->rssi = sys_cpu_to_le16(rssi);
5286 sep->rssi_ant_id = iq_report->rssi_ant_id;
5287 sep->cte_type = iq_report->cte_info.type;
5288
5289 sep->data_chan_idx = iq_report->chan_idx;
5290 sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
5291
5292 if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5293 sep->slot_durations = iq_report->local_slot_durations;
5294 } else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5295 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5296 } else {
5297 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5298 }
5299
5300 sep->packet_status = iq_report->packet_status;
5301
5302 if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5303 if (iq_report->sample_count == 0U) {
5304 sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5305 sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5306 } else {
5307 for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5308 sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5309 sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5310 }
5311 }
5312 }
5313
5314 sep->sample_count = samples_cnt;
5315 }
5316 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
5317
5318 #if defined(CONFIG_BT_HCI_MESH_EXT)
5319 static void mesh_get_opts(struct net_buf *buf, struct net_buf **evt)
5320 {
5321 struct bt_hci_rp_mesh_get_opts *rp;
5322
5323 rp = hci_cmd_complete(evt, sizeof(*rp));
5324
5325 rp->status = 0x00;
5326 rp->opcode = BT_HCI_OC_MESH_GET_OPTS;
5327
5328 rp->revision = BT_HCI_MESH_REVISION;
5329 rp->ch_map = 0x7;
5330 /*@todo: nRF51 only */
5331 rp->min_tx_power = -30;
5332 /*@todo: nRF51 only */
5333 rp->max_tx_power = 4;
5334 rp->max_scan_filter = CONFIG_BT_CTLR_MESH_SCAN_FILTERS;
5335 rp->max_filter_pattern = CONFIG_BT_CTLR_MESH_SF_PATTERNS;
5336 rp->max_adv_slot = 1U;
5337 rp->evt_prefix_len = 0x01;
5338 rp->evt_prefix = BT_HCI_MESH_EVT_PREFIX;
5339 }
5340
5341 static void mesh_set_scan_filter(struct net_buf *buf, struct net_buf **evt)
5342 {
5343 struct bt_hci_cp_mesh_set_scan_filter *cmd = (void *)buf->data;
5344 struct bt_hci_rp_mesh_set_scan_filter *rp;
5345 uint8_t filter = cmd->scan_filter - 1;
5346 struct scan_filter *f;
5347 uint8_t status = 0x00;
5348 uint8_t i;
5349
5350 if (filter > ARRAY_SIZE(scan_filters) ||
5351 cmd->num_patterns > CONFIG_BT_CTLR_MESH_SF_PATTERNS) {
5352 status = BT_HCI_ERR_INVALID_PARAM;
5353 goto exit;
5354 }
5355
5356 if (filter == sf_curr) {
5357 status = BT_HCI_ERR_CMD_DISALLOWED;
5358 goto exit;
5359 }
5360
5361 /* duplicate filtering not supported yet */
5362 if (cmd->filter_dup) {
5363 status = BT_HCI_ERR_INVALID_PARAM;
5364 goto exit;
5365 }
5366
5367 f = &scan_filters[filter];
5368 for (i = 0U; i < cmd->num_patterns; i++) {
5369 if (!cmd->patterns[i].pattern_len ||
5370 cmd->patterns[i].pattern_len >
5371 BT_HCI_MESH_PATTERN_LEN_MAX) {
5372 status = BT_HCI_ERR_INVALID_PARAM;
5373 goto exit;
5374 }
5375 f->lengths[i] = cmd->patterns[i].pattern_len;
5376 memcpy(f->patterns[i], cmd->patterns[i].pattern, f->lengths[i]);
5377 }
5378
5379 f->count = cmd->num_patterns;
5380
5381 exit:
5382 rp = hci_cmd_complete(evt, sizeof(*rp));
5383 rp->status = status;
5384 rp->opcode = BT_HCI_OC_MESH_SET_SCAN_FILTER;
5385 rp->scan_filter = filter + 1;
5386 }
5387
5388 static void mesh_advertise(struct net_buf *buf, struct net_buf **evt)
5389 {
5390 struct bt_hci_cp_mesh_advertise *cmd = (void *)buf->data;
5391 struct bt_hci_rp_mesh_advertise *rp;
5392 uint8_t adv_slot = cmd->adv_slot;
5393 uint8_t status;
5394
5395 status = ll_mesh_advertise(adv_slot,
5396 cmd->own_addr_type, cmd->random_addr.val,
5397 cmd->ch_map, cmd->tx_power,
5398 cmd->min_tx_delay, cmd->max_tx_delay,
5399 cmd->retx_count, cmd->retx_interval,
5400 cmd->scan_duration, cmd->scan_delay,
5401 cmd->scan_filter, cmd->data_len, cmd->data);
5402 if (!status) {
5403 /* Yields 0xFF if no scan filter selected */
5404 sf_curr = cmd->scan_filter - 1;
5405 }
5406
5407 rp = hci_cmd_complete(evt, sizeof(*rp));
5408 rp->status = status;
5409 rp->opcode = BT_HCI_OC_MESH_ADVERTISE;
5410 rp->adv_slot = adv_slot;
5411 }
5412
5413 static void mesh_advertise_cancel(struct net_buf *buf, struct net_buf **evt)
5414 {
5415 struct bt_hci_cp_mesh_advertise_cancel *cmd = (void *)buf->data;
5416 struct bt_hci_rp_mesh_advertise_cancel *rp;
5417 uint8_t adv_slot = cmd->adv_slot;
5418 uint8_t status;
5419
5420 status = ll_mesh_advertise_cancel(adv_slot);
5421 if (!status) {
5422 /* Yields 0xFF if no scan filter selected */
5423 sf_curr = 0xFF;
5424 }
5425
5426 rp = hci_cmd_complete(evt, sizeof(*rp));
5427 rp->status = status;
5428 rp->opcode = BT_HCI_OC_MESH_ADVERTISE_CANCEL;
5429 rp->adv_slot = adv_slot;
5430 }
5431
5432 static int mesh_cmd_handle(struct net_buf *cmd, struct net_buf **evt)
5433 {
5434 struct bt_hci_cp_mesh *cp_mesh;
5435 uint8_t mesh_op;
5436
5437 if (cmd->len < sizeof(*cp_mesh)) {
5438 LOG_ERR("No HCI VSD Command header");
5439 return -EINVAL;
5440 }
5441
5442 cp_mesh = net_buf_pull_mem(cmd, sizeof(*cp_mesh));
5443 mesh_op = cp_mesh->opcode;
5444
5445 switch (mesh_op) {
5446 case BT_HCI_OC_MESH_GET_OPTS:
5447 mesh_get_opts(cmd, evt);
5448 break;
5449
5450 case BT_HCI_OC_MESH_SET_SCAN_FILTER:
5451 mesh_set_scan_filter(cmd, evt);
5452 break;
5453
5454 case BT_HCI_OC_MESH_ADVERTISE:
5455 mesh_advertise(cmd, evt);
5456 break;
5457
5458 case BT_HCI_OC_MESH_ADVERTISE_CANCEL:
5459 mesh_advertise_cancel(cmd, evt);
5460 break;
5461
5462 default:
5463 return -EINVAL;
5464 }
5465
5466 return 0;
5467 }
5468 #endif /* CONFIG_BT_HCI_MESH_EXT */
5469
5470 int hci_vendor_cmd_handle_common(uint16_t ocf, struct net_buf *cmd,
5471 struct net_buf **evt)
5472 {
5473 switch (ocf) {
5474 case BT_OCF(BT_HCI_OP_VS_READ_VERSION_INFO):
5475 vs_read_version_info(cmd, evt);
5476 break;
5477
5478 case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS):
5479 vs_read_supported_commands(cmd, evt);
5480 break;
5481
5482 case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES):
5483 vs_read_supported_features(cmd, evt);
5484 break;
5485
5486 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
5487 case BT_OCF(BT_HCI_OP_VS_READ_USB_TRANSPORT_MODE):
5488 break;
5489 case BT_OCF(BT_HCI_OP_VS_SET_USB_TRANSPORT_MODE):
5490 reset(cmd, evt);
5491 break;
5492 #endif /* CONFIG_USB_DEVICE_BLUETOOTH_VS_H4 */
5493
5494 case BT_OCF(BT_HCI_OP_VS_READ_BUILD_INFO):
5495 vs_read_build_info(cmd, evt);
5496 break;
5497
5498 case BT_OCF(BT_HCI_OP_VS_WRITE_BD_ADDR):
5499 vs_write_bd_addr(cmd, evt);
5500 break;
5501
5502 case BT_OCF(BT_HCI_OP_VS_READ_STATIC_ADDRS):
5503 vs_read_static_addrs(cmd, evt);
5504 break;
5505
5506 case BT_OCF(BT_HCI_OP_VS_READ_KEY_HIERARCHY_ROOTS):
5507 vs_read_key_hierarchy_roots(cmd, evt);
5508 break;
5509
5510 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
5511 case BT_OCF(BT_HCI_OP_VS_SET_SCAN_REQ_REPORTS):
5512 vs_set_scan_req_reports(cmd, evt);
5513 break;
5514 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
5515
5516 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5517 case BT_OCF(BT_HCI_OP_VS_WRITE_TX_POWER_LEVEL):
5518 vs_write_tx_power_level(cmd, evt);
5519 break;
5520
5521 case BT_OCF(BT_HCI_OP_VS_READ_TX_POWER_LEVEL):
5522 vs_read_tx_power_level(cmd, evt);
5523 break;
5524 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5525
5526 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
5527 case BT_OCF(BT_HCI_OP_VS_SET_MIN_NUM_USED_CHANS):
5528 vs_set_min_used_chans(cmd, evt);
5529 break;
5530 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
5531
5532 #if defined(CONFIG_BT_HCI_MESH_EXT)
5533 case BT_OCF(BT_HCI_OP_VS_MESH):
5534 mesh_cmd_handle(cmd, evt);
5535 break;
5536 #endif /* CONFIG_BT_HCI_MESH_EXT */
5537
5538 default:
5539 return -EINVAL;
5540 }
5541
5542 return 0;
5543 }
5544 #endif
5545
5546 struct net_buf *hci_cmd_handle(struct net_buf *cmd, void **node_rx)
5547 {
5548 struct bt_hci_cmd_hdr *chdr;
5549 struct net_buf *evt = NULL;
5550 uint16_t ocf;
5551 int err;
5552
5553 if (cmd->len < sizeof(*chdr)) {
5554 LOG_ERR("No HCI Command header");
5555 return NULL;
5556 }
5557
5558 chdr = net_buf_pull_mem(cmd, sizeof(*chdr));
5559 if (cmd->len < chdr->param_len) {
5560 LOG_ERR("Invalid HCI CMD packet length");
5561 return NULL;
5562 }
5563
5564 /* store in a global for later CC/CS event creation */
5565 _opcode = sys_le16_to_cpu(chdr->opcode);
5566
5567 ocf = BT_OCF(_opcode);
5568
5569 switch (BT_OGF(_opcode)) {
5570 case BT_OGF_LINK_CTRL:
5571 err = link_control_cmd_handle(ocf, cmd, &evt);
5572 break;
5573 case BT_OGF_BASEBAND:
5574 err = ctrl_bb_cmd_handle(ocf, cmd, &evt);
5575 break;
5576 case BT_OGF_INFO:
5577 err = info_cmd_handle(ocf, cmd, &evt);
5578 break;
5579 case BT_OGF_STATUS:
5580 err = status_cmd_handle(ocf, cmd, &evt);
5581 break;
5582 case BT_OGF_LE:
5583 err = controller_cmd_handle(ocf, cmd, &evt, node_rx);
5584 break;
5585 #if defined(CONFIG_BT_HCI_VS)
5586 case BT_OGF_VS:
5587 err = hci_vendor_cmd_handle(ocf, cmd, &evt);
5588 break;
5589 #endif
5590 default:
5591 err = -EINVAL;
5592 break;
5593 }
5594
5595 if (err == -EINVAL) {
5596 evt = cmd_status(BT_HCI_ERR_UNKNOWN_CMD);
5597 }
5598
5599 return evt;
5600 }
5601
5602 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
5603 defined(CONFIG_BT_CTLR_CONN_ISO)
5604 static void data_buf_overflow(struct net_buf **buf, uint8_t link_type)
5605 {
5606 struct bt_hci_evt_data_buf_overflow *ep;
5607
5608 if (!(event_mask & BT_EVT_MASK_DATA_BUFFER_OVERFLOW)) {
5609 return;
5610 }
5611
5612 *buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
5613 hci_evt_create(*buf, BT_HCI_EVT_DATA_BUF_OVERFLOW, sizeof(*ep));
5614 ep = net_buf_add(*buf, sizeof(*ep));
5615
5616 ep->link_type = link_type;
5617 }
5618 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_SYNC_ISO ||
5619 * CONFIG_BT_CTLR_CONN_ISO
5620 */
5621
5622 #if defined(CONFIG_BT_CONN)
5623 int hci_acl_handle(struct net_buf *buf, struct net_buf **evt)
5624 {
5625 struct node_tx *node_tx;
5626 struct bt_hci_acl_hdr *acl;
5627 struct pdu_data *pdu_data;
5628 uint16_t handle;
5629 uint8_t flags;
5630 uint16_t len;
5631
5632 *evt = NULL;
5633
5634 if (buf->len < sizeof(*acl)) {
5635 LOG_ERR("No HCI ACL header");
5636 return -EINVAL;
5637 }
5638
5639 acl = net_buf_pull_mem(buf, sizeof(*acl));
5640 len = sys_le16_to_cpu(acl->len);
5641 handle = sys_le16_to_cpu(acl->handle);
5642
5643 if (buf->len < len) {
5644 LOG_ERR("Invalid HCI ACL packet length");
5645 return -EINVAL;
5646 }
5647
5648 if (len > LL_LENGTH_OCTETS_TX_MAX) {
5649 LOG_ERR("Invalid HCI ACL Data length");
5650 return -EINVAL;
5651 }
5652
5653 /* assigning flags first because handle will be overwritten */
5654 flags = bt_acl_flags(handle);
5655 handle = bt_acl_handle(handle);
5656
5657 node_tx = ll_tx_mem_acquire();
5658 if (!node_tx) {
5659 LOG_ERR("Tx Buffer Overflow");
5660 data_buf_overflow(evt, BT_OVERFLOW_LINK_ACL);
5661 return -ENOBUFS;
5662 }
5663
5664 pdu_data = (void *)node_tx->pdu;
5665
5666 if (bt_acl_flags_bc(flags) != BT_ACL_POINT_TO_POINT) {
5667 return -EINVAL;
5668 }
5669
5670 switch (bt_acl_flags_pb(flags)) {
5671 case BT_ACL_START_NO_FLUSH:
5672 pdu_data->ll_id = PDU_DATA_LLID_DATA_START;
5673 break;
5674 case BT_ACL_CONT:
5675 pdu_data->ll_id = PDU_DATA_LLID_DATA_CONTINUE;
5676 break;
5677 default:
5678 /* BT_ACL_START and BT_ACL_COMPLETE not allowed on LE-U
5679 * from Host to Controller
5680 */
5681 return -EINVAL;
5682 }
5683
5684 pdu_data->len = len;
5685 memcpy(&pdu_data->lldata[0], buf->data, len);
5686
5687 if (ll_tx_mem_enqueue(handle, node_tx)) {
5688 LOG_ERR("Invalid Tx Enqueue");
5689 ll_tx_mem_release(node_tx);
5690 return -EINVAL;
5691 }
5692
5693 return 0;
5694 }
5695 #endif /* CONFIG_BT_CONN */
5696
5697 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
5698 int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
5699 {
5700 struct bt_hci_iso_sdu_hdr *iso_sdu_hdr;
5701 struct isoal_sdu_tx sdu_frag_tx;
5702 struct bt_hci_iso_hdr *iso_hdr;
5703 uint32_t *time_stamp;
5704 uint16_t handle;
5705 uint8_t pb_flag;
5706 uint8_t ts_flag;
5707 uint8_t flags;
5708 uint16_t len;
5709
5710 iso_sdu_hdr = NULL;
5711 *evt = NULL;
5712
5713 if (buf->len < sizeof(*iso_hdr)) {
5714 LOG_ERR("No HCI ISO header");
5715 return -EINVAL;
5716 }
5717
5718 iso_hdr = net_buf_pull_mem(buf, sizeof(*iso_hdr));
5719 handle = sys_le16_to_cpu(iso_hdr->handle);
5720 len = bt_iso_hdr_len(sys_le16_to_cpu(iso_hdr->len));
5721
5722 if (buf->len < len) {
5723 LOG_ERR("Invalid HCI ISO packet length");
5724 return -EINVAL;
5725 }
5726
5727 /* Assigning flags first because handle will be overwritten */
5728 flags = bt_iso_flags(handle);
5729 pb_flag = bt_iso_flags_pb(flags);
5730 ts_flag = bt_iso_flags_ts(flags);
5731 handle = bt_iso_handle(handle);
5732
5733 /* Extract time stamp */
5734 /* Set default to current time
5735 * BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
5736 * 3.1 Time_Offset in framed PDUs :
5737 * The Controller transmitting a SDU may use any of the following
5738 * methods to determine the value of the SDU reference time:
5739 * -- A captured time stamp of the SDU
5740 * -- A time stamp provided by the higher layer
5741 * -- A computed time stamp based on a sequence counter provided by the
5742 * higher layer
5743 * -- Any other method of determining Time_Offset
5744 * (Uses a timestamp computed from the difference in provided
5745 * timestamps, if the timestamp is deemed not based on the
5746 * controller's clock)
5747 */
5748 sdu_frag_tx.cntr_time_stamp = HAL_TICKER_TICKS_TO_US(ticker_ticks_now_get());
5749 if (ts_flag) {
5750 /* Use HCI provided time stamp */
5751 time_stamp = net_buf_pull_mem(buf, sizeof(*time_stamp));
5752 len -= sizeof(*time_stamp);
5753 sdu_frag_tx.time_stamp = sys_le32_to_cpu(*time_stamp);
5754 } else {
5755 /* Use controller's capture time */
5756 sdu_frag_tx.time_stamp = sdu_frag_tx.cntr_time_stamp;
5757 }
5758
5759 /* Extract ISO data header if included (PB_Flag 0b00 or 0b10) */
5760 if ((pb_flag & 0x01) == 0) {
5761 iso_sdu_hdr = net_buf_pull_mem(buf, sizeof(*iso_sdu_hdr));
5762 len -= sizeof(*iso_sdu_hdr);
5763 sdu_frag_tx.packet_sn = sys_le16_to_cpu(iso_sdu_hdr->sn);
5764 sdu_frag_tx.iso_sdu_length =
5765 sys_le16_to_cpu(bt_iso_pkt_len(iso_sdu_hdr->slen));
5766 } else {
5767 sdu_frag_tx.packet_sn = 0;
5768 sdu_frag_tx.iso_sdu_length = 0;
5769 }
5770
5771 /* Packet boundary flags should be bitwise identical to the SDU state
5772 * 0b00 BT_ISO_START
5773 * 0b01 BT_ISO_CONT
5774 * 0b10 BT_ISO_SINGLE
5775 * 0b11 BT_ISO_END
5776 */
5777 sdu_frag_tx.sdu_state = pb_flag;
5778 /* Fill in SDU buffer fields */
5779 sdu_frag_tx.dbuf = buf->data;
5780 sdu_frag_tx.size = len;
5781
5782 if (false) {
5783
5784 #if defined(CONFIG_BT_CTLR_CONN_ISO)
5785 /* Extract source handle from CIS or BIS handle by way of header and
5786 * data path
5787 */
5788 } else if (IS_CIS_HANDLE(handle)) {
5789 struct ll_conn_iso_stream *cis;
5790 struct ll_conn_iso_group *cig;
5791 struct ll_iso_stream_hdr *hdr;
5792 struct ll_iso_datapath *dp_in;
5793 uint8_t event_offset;
5794
5795 cis = ll_iso_stream_connected_get(handle);
5796 if (!cis) {
5797 return -EINVAL;
5798 }
5799
5800 cig = cis->group;
5801
5802 /* We must ensure sufficient time for ISO-AL to fragment SDU and
5803 * deliver PDUs to the TX queue. By checking ull_ref_get, we
5804 * know if we are within the subevents of an ISO event. If so,
5805 * we can assume that we have enough time to deliver in the next
5806 * ISO event. If we're not active within the ISO event, we don't
5807 * know if there is enough time to deliver in the next event,
5808 * and for safety we set the target to current event + 2.
5809 *
5810 * For FT > 1, we have the opportunity to retransmit in later
5811 * event(s), in which case we have the option to target an
5812 * earlier event (this or next) because being late does not
5813 * instantly flush the payload.
5814 */
5815
5816 event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
5817
5818 if (cis->lll.tx.ft > 1) {
5819 /* FT > 1, target an earlier event */
5820 event_offset -= 1;
5821 }
5822
5823 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
5824 uint64_t event_count;
5825 uint64_t pkt_seq_num;
5826
5827 /* Catch up local pkt_seq_num with internal pkt_seq_num */
5828 event_count = cis->lll.event_count + event_offset;
5829 pkt_seq_num = event_count + 1U;
5830
5831 /* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
5832 * then we simply check that the pb_flag is an even value, and
5833 * then pkt_seq_num is a future sequence number value compare
5834 * to last recorded number in cis->pkt_seq_num.
5835 *
5836 * When (pkt_seq_num - stream->pkt_seq_num) is negative then
5837 * BIT64(39) will be set (2's compliment value). The diff value
5838 * less than or equal to BIT64_MASK(38) means the diff value is
5839 * positive and hence pkt_seq_num is greater than
5840 * stream->pkt_seq_num. This calculation is valid for when value
5841 * rollover too.
5842 */
5843 if (!(pb_flag & 0x01) &&
5844 (((pkt_seq_num - cis->pkt_seq_num) &
5845 BIT64_MASK(39)) <= BIT64_MASK(38))) {
5846 cis->pkt_seq_num = pkt_seq_num;
5847 } else {
5848 pkt_seq_num = cis->pkt_seq_num;
5849 }
5850
5851 /* Pre-increment, when pg_flag is BT_ISO_SINGLE (0b10) or
5852 * BT_ISO_END (0b11) then we simple check if pb_flag has bit 1
5853 * is set, for next ISO data packet seq num comparison.
5854 */
5855 if (pb_flag & 0x10) {
5856 cis->pkt_seq_num++;
5857 }
5858
5859 /* Target next ISO event to avoid overlapping with, if any,
5860 * current ISO event
5861 */
5862 pkt_seq_num++;
5863 sdu_frag_tx.target_event = pkt_seq_num;
5864 sdu_frag_tx.grp_ref_point =
5865 isoal_get_wrapped_time_us(cig->cig_ref_point,
5866 ((pkt_seq_num - event_count) *
5867 cig->iso_interval *
5868 ISO_INT_UNIT_US));
5869
5870 #else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
5871 sdu_frag_tx.target_event = cis->lll.event_count + event_offset;
5872 sdu_frag_tx.grp_ref_point =
5873 isoal_get_wrapped_time_us(cig->cig_ref_point,
5874 (event_offset *
5875 cig->iso_interval *
5876 ISO_INT_UNIT_US));
5877 #endif /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
5878
5879 /* Get controller's input data path for CIS */
5880 hdr = &cis->hdr;
5881 dp_in = hdr->datapath_in;
5882 if (!dp_in || dp_in->path_id != BT_HCI_DATAPATH_ID_HCI) {
5883 LOG_ERR("Input data path not set for HCI");
5884 return -EINVAL;
5885 }
5886
5887 /* Get input data path's source handle */
5888 isoal_source_handle_t source = dp_in->source_hdl;
5889
5890 /* Start Fragmentation */
5891 isoal_status_t isoal_status =
5892 isoal_tx_sdu_fragment(source, &sdu_frag_tx);
5893
5894 if (isoal_status) {
5895 if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
5896 data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
5897 return -ENOBUFS;
5898 }
5899
5900 return -EINVAL;
5901 }
5902
5903 /* TODO: Assign *evt if an immediate response is required */
5904 return 0;
5905 #endif /* CONFIG_BT_CTLR_CONN_ISO */
5906
5907 #if defined(CONFIG_BT_CTLR_ADV_ISO)
5908 } else if (IS_ADV_ISO_HANDLE(handle)) {
5909 struct lll_adv_iso_stream *stream;
5910 struct ll_adv_iso_set *adv_iso;
5911 struct lll_adv_iso *lll_iso;
5912 uint16_t latency_prepare;
5913 uint16_t stream_handle;
5914 uint64_t target_event;
5915 uint8_t event_offset;
5916
5917 /* Get BIS stream handle and stream context */
5918 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
5919 stream = ull_adv_iso_stream_get(stream_handle);
5920 if (!stream || !stream->dp) {
5921 LOG_ERR("Invalid BIS stream");
5922 return -EINVAL;
5923 }
5924
5925 adv_iso = ull_adv_iso_by_stream_get(stream_handle);
5926 if (!adv_iso) {
5927 LOG_ERR("No BIG associated with stream handle");
5928 return -EINVAL;
5929 }
5930
5931 lll_iso = &adv_iso->lll;
5932
5933 /* Determine the target event and the first event offset after
5934 * datapath setup.
5935 * event_offset mitigates the possibility of first SDU being
5936 * late on the datapath and avoid all subsequent SDUs being
5937 * dropped for a said SDU interval. i.e. upper layer is not
5938 * drifting, say first SDU dropped, hence subsequent SDUs all
5939 * dropped, is mitigated by offsetting the grp_ref_point.
5940 *
5941 * It is ok to do the below for every received ISO data, ISOAL
5942 * will not consider subsequent skewed target_event after the
5943 * first use of target_event value.
5944 *
5945 * In BIG implementation in LLL, payload_count corresponds to
5946 * the next BIG event, hence calculate grp_ref_point for next
5947 * BIG event by incrementing the previous elapsed big_ref_point
5948 * by one additional ISO interval.
5949 */
5950 target_event = lll_iso->payload_count / lll_iso->bn;
5951 latency_prepare = lll_iso->latency_prepare;
5952 if (latency_prepare) {
5953 /* big_ref_point has been updated, but payload_count
5954 * hasn't been updated yet - increment target_event to
5955 * compensate
5956 */
5957 target_event += latency_prepare;
5958 }
5959 event_offset = ull_ref_get(&adv_iso->ull) ? 0U : 1U;
5960
5961 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
5962 uint64_t event_count;
5963 uint64_t pkt_seq_num;
5964
5965 /* Catch up local pkt_seq_num with internal pkt_seq_num */
5966 event_count = target_event + event_offset;
5967 pkt_seq_num = event_count + 1U;
5968
5969 /* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
5970 * then we simply check that the pb_flag is an even value, and
5971 * then pkt_seq_num is a future sequence number value compare
5972 * to last recorded number in cis->pkt_seq_num.
5973 *
5974 * When (pkt_seq_num - stream->pkt_seq_num) is negative then
5975 * BIT64(39) will be set (2's compliment value). The diff value
5976 * less than or equal to BIT64_MASK(38) means the diff value is
5977 * positive and hence pkt_seq_num is greater than
5978 * stream->pkt_seq_num. This calculation is valid for when value
5979 * rollover too.
5980 */
5981 if (!(pb_flag & 0x01) &&
5982 (((pkt_seq_num - stream->pkt_seq_num) &
5983 BIT64_MASK(39)) <= BIT64_MASK(38))) {
5984 stream->pkt_seq_num = pkt_seq_num;
5985 } else {
5986 pkt_seq_num = stream->pkt_seq_num;
5987 }
5988
5989 /* Pre-increment, when pg_flag is BT_ISO_SINGLE (0b10) or
5990 * BT_ISO_END (0b11) then we simple check if pb_flag has bit 1
5991 * is set, for next ISO data packet seq num comparison.
5992 */
5993 if (pb_flag & 0x10) {
5994 stream->pkt_seq_num++;
5995 }
5996
5997 /* Target next ISO event to avoid overlapping with, if any,
5998 * current ISO event
5999 */
6000 /* FIXME: Implement ISO Tx ack generation early in done compared
6001 * to currently only in prepare. I.e. to ensure upper
6002 * layer has the number of completed packet before the
6003 * next BIG event, so as to supply new ISO data packets.
6004 * Without which upper layers need extra buffers to
6005 * buffer next ISO data packet.
6006 *
6007 * Enable below increment once early Tx ack is
6008 * implemented.
6009 *
6010 * pkt_seq_num++;
6011 */
6012 sdu_frag_tx.target_event = pkt_seq_num;
6013 sdu_frag_tx.grp_ref_point =
6014 isoal_get_wrapped_time_us(adv_iso->big_ref_point,
6015 (((pkt_seq_num + 1U) -
6016 event_count) *
6017 lll_iso->iso_interval *
6018 ISO_INT_UNIT_US));
6019
6020 #else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6021 sdu_frag_tx.target_event = target_event + event_offset;
6022 sdu_frag_tx.grp_ref_point =
6023 isoal_get_wrapped_time_us(adv_iso->big_ref_point,
6024 ((event_offset + 1U) *
6025 lll_iso->iso_interval *
6026 ISO_INT_UNIT_US));
6027 #endif /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6028
6029 /* Start Fragmentation */
6030 /* FIXME: need to ensure ISO-AL returns proper isoal_status.
6031 * Currently there are cases where ISO-AL calls LL_ASSERT.
6032 */
6033 isoal_status_t isoal_status =
6034 isoal_tx_sdu_fragment(stream->dp->source_hdl, &sdu_frag_tx);
6035
6036 if (isoal_status) {
6037 if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
6038 data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
6039 return -ENOBUFS;
6040 }
6041
6042 return -EINVAL;
6043 }
6044
6045 return 0;
6046 #endif /* CONFIG_BT_CTLR_ADV_ISO */
6047
6048 }
6049
6050 return -EINVAL;
6051 }
6052 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
6053
6054 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6055 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6056 static void dup_ext_adv_adi_store(struct dup_ext_adv_mode *dup_mode,
6057 const struct pdu_adv_adi *adi,
6058 uint8_t data_status)
6059 {
6060 struct dup_ext_adv_set *adv_set;
6061
6062 adv_set = &dup_mode->set[dup_mode->set_curr];
6063
6064 adv_set->data_cmplt = (data_status ==
6065 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) ?
6066 1U : 0U;
6067
6068 if (adi) {
6069 (void)memcpy(&adv_set->adi, adi, sizeof(*adi));
6070 } else {
6071 (void)memset(&adv_set->adi, 0U, sizeof(*adi));
6072 }
6073
6074 if (dup_mode->set_count < CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
6075 dup_mode->set_count++;
6076 dup_mode->set_curr = dup_mode->set_count;
6077 } else {
6078 dup_mode->set_curr++;
6079 }
6080
6081 if (dup_mode->set_curr == CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
6082 dup_mode->set_curr = 0U;
6083 }
6084 }
6085
6086 static void dup_ext_adv_mode_reset(struct dup_ext_adv_mode *dup_adv_mode)
6087 {
6088 uint8_t adv_mode;
6089
6090 for (adv_mode = 0U; adv_mode < DUP_EXT_ADV_MODE_COUNT;
6091 adv_mode++) {
6092 struct dup_ext_adv_mode *dup_mode;
6093
6094 dup_mode = &dup_adv_mode[adv_mode];
6095 dup_mode->set_count = 0U;
6096 dup_mode->set_curr = 0U;
6097 }
6098 }
6099
6100 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
6101 static void dup_ext_adv_reset(void)
6102 {
6103 for (int32_t i = 0; i < dup_count; i++) {
6104 struct dup_entry *dup;
6105
6106 dup = &dup_filter[i];
6107 dup->mask = 0U;
6108 dup_ext_adv_mode_reset(dup->adv_mode);
6109 }
6110 }
6111
6112 static void dup_periodic_adv_reset(uint8_t addr_type, const uint8_t *addr,
6113 uint8_t sid)
6114 {
6115 for (int32_t addr_idx = 0; addr_idx < dup_count; addr_idx++) {
6116 struct dup_ext_adv_mode *dup_mode;
6117 struct dup_entry *dup;
6118
6119 dup = &dup_filter[addr_idx];
6120 if (memcmp(addr, dup->addr.a.val, sizeof(bt_addr_t)) ||
6121 (addr_type != dup->addr.type)) {
6122 continue;
6123 }
6124
6125 dup_mode = &dup->adv_mode[DUP_EXT_ADV_MODE_PERIODIC];
6126 for (uint16_t set_idx = 0; set_idx < dup_mode->set_count;
6127 set_idx++) {
6128 struct dup_ext_adv_set *adv_set;
6129
6130 adv_set = &dup_mode->set[set_idx];
6131 if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != sid) {
6132 continue;
6133 }
6134
6135 /* reset data complete state */
6136 adv_set->data_cmplt = 0U;
6137
6138 return;
6139 }
6140
6141 return;
6142 }
6143 }
6144 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
6145 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6146
6147 static inline bool is_dup_or_update(struct dup_entry *dup, uint8_t adv_type,
6148 uint8_t adv_mode,
6149 const struct pdu_adv_adi *adi,
6150 uint8_t data_status)
6151 {
6152 if (!(dup->mask & BIT(adv_type))) {
6153 /* report different adv types */
6154 dup->mask |= BIT(adv_type);
6155
6156 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6157 dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6158 data_status);
6159
6160 return false;
6161 } else if (adv_type != PDU_ADV_TYPE_EXT_IND) {
6162 /* drop duplicate legacy advertising */
6163 return true;
6164 } else if (dup->adv_mode[adv_mode].set_count == 0U) {
6165 /* report different extended adv mode */
6166 dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6167 data_status);
6168 return false;
6169 } else if (adi) {
6170 struct dup_ext_adv_mode *dup_mode;
6171 uint8_t j;
6172
6173 dup_mode = &dup->adv_mode[adv_mode];
6174 for (j = 0; j < dup_mode->set_count; j++) {
6175 struct dup_ext_adv_set *adv_set;
6176
6177 adv_set = &dup_mode->set[j];
6178 if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != PDU_ADV_ADI_SID_GET(adi)) {
6179 continue;
6180 }
6181
6182 if (PDU_ADV_ADI_DID_GET(&adv_set->adi) != PDU_ADV_ADI_DID_GET(adi)) {
6183 /* report different DID */
6184 adv_set->adi.did_sid_packed[0] = adi->did_sid_packed[0];
6185 adv_set->adi.did_sid_packed[1] = adi->did_sid_packed[1];
6186 /* set new data status */
6187 if (data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
6188 adv_set->data_cmplt = 1U;
6189 } else {
6190 adv_set->data_cmplt = 0U;
6191 }
6192
6193 return false;
6194 } else if (!adv_set->data_cmplt &&
6195 (data_status ==
6196 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE)) {
6197 /* report data complete */
6198 adv_set->data_cmplt = 1U;
6199 return false;
6200 } else if (!adv_set->data_cmplt) {
6201 /* report partial and incomplete data */
6202 return false;
6203 }
6204
6205 return true;
6206 }
6207
6208 dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6209 data_status);
6210 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6211
6212 return false;
6213 }
6214
6215 return true;
6216 }
6217
6218 static bool dup_found(uint8_t adv_type, uint8_t addr_type, const uint8_t *addr,
6219 uint8_t adv_mode, const struct pdu_adv_adi *adi,
6220 uint8_t data_status)
6221 {
6222 /* check for duplicate filtering */
6223 if (dup_count >= 0) {
6224 struct dup_entry *dup;
6225
6226 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6227 __ASSERT((adv_mode < ARRAY_SIZE(dup_filter[0].adv_mode)),
6228 "adv_mode index out-of-bound");
6229 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6230
6231 /* find for existing entry and update if changed */
6232 for (int32_t i = 0; i < dup_count; i++) {
6233 dup = &dup_filter[i];
6234 if (memcmp(addr, &dup->addr.a.val[0],
6235 sizeof(bt_addr_t)) ||
6236 (addr_type != dup->addr.type)) {
6237 continue;
6238 }
6239
6240 /* still duplicate or update entry with change */
6241 return is_dup_or_update(dup, adv_type, adv_mode, adi,
6242 data_status);
6243 }
6244
6245 /* insert into the duplicate filter */
6246 dup = &dup_filter[dup_curr];
6247 (void)memcpy(&dup->addr.a.val[0], addr, sizeof(bt_addr_t));
6248 dup->addr.type = addr_type;
6249 dup->mask = BIT(adv_type);
6250
6251 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6252 dup_ext_adv_mode_reset(dup->adv_mode);
6253 dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6254 data_status);
6255 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6256
6257 if (dup_count < CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6258 dup_count++;
6259 dup_curr = dup_count;
6260 } else {
6261 dup_curr++;
6262 }
6263
6264 if (dup_curr == CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6265 dup_curr = 0U;
6266 }
6267 }
6268
6269 return false;
6270 }
6271 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6272
6273 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6274 static inline void le_dir_adv_report(struct pdu_adv *adv, struct net_buf *buf,
6275 int8_t rssi, uint8_t rl_idx)
6276 {
6277 struct bt_hci_evt_le_direct_adv_report *drp;
6278 struct bt_hci_evt_le_direct_adv_info *dir_info;
6279
6280 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6281 !(le_event_mask & BT_EVT_MASK_LE_DIRECT_ADV_REPORT)) {
6282 return;
6283 }
6284
6285 LL_ASSERT(adv->type == PDU_ADV_TYPE_DIRECT_IND);
6286
6287 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6288 if (dup_scan &&
6289 dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6290 return;
6291 }
6292 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6293
6294 drp = meta_evt(buf, BT_HCI_EVT_LE_DIRECT_ADV_REPORT,
6295 sizeof(*drp) + sizeof(*dir_info));
6296
6297 drp->num_reports = 1U;
6298 dir_info = (void *)(((uint8_t *)drp) + sizeof(*drp));
6299
6300 /* Directed Advertising */
6301 dir_info->evt_type = BT_HCI_ADV_DIRECT_IND;
6302
6303 #if defined(CONFIG_BT_CTLR_PRIVACY)
6304 if (rl_idx < ll_rl_size_get()) {
6305 /* Store identity address */
6306 ll_rl_id_addr_get(rl_idx, &dir_info->addr.type,
6307 &dir_info->addr.a.val[0]);
6308 /* Mark it as identity address from RPA (0x02, 0x03) */
6309 dir_info->addr.type += 2U;
6310 } else {
6311 #else
6312 if (1) {
6313 #endif /* CONFIG_BT_CTLR_PRIVACY */
6314 dir_info->addr.type = adv->tx_addr;
6315 memcpy(&dir_info->addr.a.val[0], &adv->direct_ind.adv_addr[0],
6316 sizeof(bt_addr_t));
6317 }
6318
6319 dir_info->dir_addr.type = adv->rx_addr;
6320 memcpy(&dir_info->dir_addr.a.val[0],
6321 &adv->direct_ind.tgt_addr[0], sizeof(bt_addr_t));
6322
6323 dir_info->rssi = rssi;
6324 }
6325 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6326
6327 #if defined(CONFIG_BT_OBSERVER)
6328 #if defined(CONFIG_BT_HCI_MESH_EXT)
6329 static inline bool scan_filter_apply(uint8_t filter, uint8_t *data, uint8_t len)
6330 {
6331 struct scan_filter *f = &scan_filters[filter];
6332
6333 /* No patterns means filter out all advertising packets */
6334 for (uint8_t i = 0; i < f->count; i++) {
6335 /* Require at least the length of the pattern */
6336 if (len >= f->lengths[i] &&
6337 !memcmp(data, f->patterns[i], f->lengths[i])) {
6338 return true;
6339 }
6340 }
6341
6342 return false;
6343 }
6344
6345 static inline void le_mesh_scan_report(struct pdu_adv *adv,
6346 struct node_rx_pdu *node_rx,
6347 struct net_buf *buf, int8_t rssi)
6348 {
6349 uint8_t data_len = (adv->len - BDADDR_SIZE);
6350 struct bt_hci_evt_mesh_scanning_report *mep;
6351 struct bt_hci_evt_mesh_scan_report *sr;
6352 uint32_t instant;
6353 uint8_t chan;
6354
6355 LL_ASSERT(adv->type == PDU_ADV_TYPE_NONCONN_IND);
6356
6357 /* Filter based on currently active Scan Filter */
6358 if (sf_curr < ARRAY_SIZE(scan_filters) &&
6359 !scan_filter_apply(sf_curr, &adv->adv_ind.data[0], data_len)) {
6360 /* Drop the report */
6361 return;
6362 }
6363
6364 chan = node_rx->rx_ftr.chan;
6365 instant = node_rx->rx_ftr.anchor_ticks;
6366
6367 mep = mesh_evt(buf, BT_HCI_EVT_MESH_SCANNING_REPORT,
6368 sizeof(*mep) + sizeof(*sr));
6369
6370 mep->num_reports = 1U;
6371 sr = (void *)(((uint8_t *)mep) + sizeof(*mep));
6372 sr->addr.type = adv->tx_addr;
6373 memcpy(&sr->addr.a.val[0], &adv->adv_ind.addr[0], sizeof(bt_addr_t));
6374 sr->chan = chan;
6375 sr->rssi = rssi;
6376 sys_put_le32(instant, (uint8_t *)&sr->instant);
6377
6378 sr->data_len = data_len;
6379 memcpy(&sr->data[0], &adv->adv_ind.data[0], data_len);
6380 }
6381 #endif /* CONFIG_BT_HCI_MESH_EXT */
6382
6383 static void le_advertising_report(struct pdu_data *pdu_data,
6384 struct node_rx_pdu *node_rx,
6385 struct net_buf *buf)
6386 {
6387 const uint8_t c_adv_type[] = { 0x00, 0x01, 0x03, 0xff, 0x04,
6388 0xff, 0x02 };
6389 struct bt_hci_evt_le_advertising_report *sep;
6390 struct pdu_adv *adv = (void *)pdu_data;
6391 struct bt_hci_evt_le_advertising_info *adv_info;
6392 uint8_t data_len;
6393 uint8_t info_len;
6394 int8_t rssi;
6395 #if defined(CONFIG_BT_CTLR_PRIVACY)
6396 uint8_t rl_idx;
6397 #endif /* CONFIG_BT_CTLR_PRIVACY */
6398 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6399 uint8_t direct_report;
6400 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6401 int8_t *prssi;
6402
6403 rssi = -(node_rx->rx_ftr.rssi);
6404 #if defined(CONFIG_BT_CTLR_PRIVACY)
6405 rl_idx = node_rx->rx_ftr.rl_idx;
6406 #endif /* CONFIG_BT_CTLR_PRIVACY */
6407 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6408 direct_report = node_rx->rx_ftr.direct;
6409 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6410
6411 #if defined(CONFIG_BT_CTLR_PRIVACY)
6412 if (adv->tx_addr) {
6413 /* Update current RPA */
6414 ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6415 }
6416 #endif /* CONFIG_BT_CTLR_PRIVACY */
6417
6418 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6419 if (direct_report) {
6420 #if defined(CONFIG_BT_CTLR_PRIVACY)
6421 le_dir_adv_report(adv, buf, rssi, rl_idx);
6422 #else
6423 le_dir_adv_report(adv, buf, rssi, 0xFF);
6424 #endif /* CONFIG_BT_CTLR_PRIVACY */
6425 return;
6426 }
6427 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6428
6429 #if defined(CONFIG_BT_HCI_MESH_EXT)
6430 if (node_rx->hdr.type == NODE_RX_TYPE_MESH_REPORT) {
6431 le_mesh_scan_report(adv, node_rx, buf, rssi);
6432 return;
6433 }
6434 #endif /* CONFIG_BT_HCI_MESH_EXT */
6435
6436 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6437 !(le_event_mask & BT_EVT_MASK_LE_ADVERTISING_REPORT)) {
6438 return;
6439 }
6440
6441 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6442 if (dup_scan &&
6443 dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6444 return;
6445 }
6446 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6447
6448 if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6449 data_len = (adv->len - BDADDR_SIZE);
6450 } else {
6451 data_len = 0U;
6452 }
6453 info_len = sizeof(struct bt_hci_evt_le_advertising_info) + data_len +
6454 sizeof(*prssi);
6455 sep = meta_evt(buf, BT_HCI_EVT_LE_ADVERTISING_REPORT,
6456 sizeof(*sep) + info_len);
6457
6458 sep->num_reports = 1U;
6459 adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6460
6461 adv_info->evt_type = c_adv_type[adv->type];
6462
6463 #if defined(CONFIG_BT_CTLR_PRIVACY)
6464 if (rl_idx < ll_rl_size_get()) {
6465 /* Store identity address */
6466 ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6467 &adv_info->addr.a.val[0]);
6468 /* Mark it as identity address from RPA (0x02, 0x03) */
6469 adv_info->addr.type += 2U;
6470 } else {
6471 #else
6472 if (1) {
6473 #endif /* CONFIG_BT_CTLR_PRIVACY */
6474
6475 adv_info->addr.type = adv->tx_addr;
6476 memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6477 sizeof(bt_addr_t));
6478 }
6479
6480 adv_info->length = data_len;
6481 memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6482 /* RSSI */
6483 prssi = &adv_info->data[0] + data_len;
6484 *prssi = rssi;
6485 }
6486
6487 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6488 static void le_ext_adv_legacy_report(struct pdu_data *pdu_data,
6489 struct node_rx_pdu *node_rx,
6490 struct net_buf *buf)
6491 {
6492 /* Lookup event type based on pdu_adv_type set by LLL */
6493 const uint8_t evt_type_lookup[] = {
6494 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6495 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* ADV_IND */
6496 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_DIRECT |
6497 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* DIRECT_IND */
6498 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY), /* NONCONN_IND */
6499 0xff, /* Invalid index lookup */
6500 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6501 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6502 BT_HCI_LE_ADV_EVT_TYPE_SCAN), /* SCAN_RSP to an ADV_SCAN_IND
6503 */
6504 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6505 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6506 BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6507 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* SCAN_RSP to an ADV_IND,
6508 * NOTE: LLL explicitly sets
6509 * adv_type to
6510 * PDU_ADV_TYPE_ADV_IND_SCAN_RSP
6511 */
6512 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6513 BT_HCI_LE_ADV_EVT_TYPE_SCAN) /* SCAN_IND */
6514 };
6515 struct bt_hci_evt_le_ext_advertising_info *adv_info;
6516 struct bt_hci_evt_le_ext_advertising_report *sep;
6517 struct pdu_adv *adv = (void *)pdu_data;
6518 uint8_t data_len;
6519 uint8_t info_len;
6520 int8_t rssi;
6521
6522 #if defined(CONFIG_BT_CTLR_PRIVACY)
6523 uint8_t rl_idx;
6524 #endif /* CONFIG_BT_CTLR_PRIVACY */
6525
6526 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6527 !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
6528 return;
6529 }
6530
6531 /* The Link Layer currently returns RSSI as an absolute value */
6532 rssi = -(node_rx->rx_ftr.rssi);
6533
6534 #if defined(CONFIG_BT_CTLR_PRIVACY)
6535 rl_idx = node_rx->rx_ftr.rl_idx;
6536 #endif /* CONFIG_BT_CTLR_PRIVACY */
6537
6538 #if defined(CONFIG_BT_CTLR_PRIVACY)
6539 if (adv->tx_addr) {
6540 /* Update current RPA */
6541 ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6542 }
6543 #endif /* CONFIG_BT_CTLR_PRIVACY */
6544
6545 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6546 if (dup_scan &&
6547 dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6548 return;
6549 }
6550 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6551
6552 if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6553 data_len = (adv->len - BDADDR_SIZE);
6554 } else {
6555 data_len = 0U;
6556 }
6557
6558 info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6559 data_len;
6560 sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6561 sizeof(*sep) + info_len);
6562
6563 sep->num_reports = 1U;
6564 adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6565
6566 adv_info->evt_type = sys_cpu_to_le16((uint16_t)evt_type_lookup[adv->type]);
6567
6568 #if defined(CONFIG_BT_CTLR_PRIVACY)
6569 if (rl_idx < ll_rl_size_get()) {
6570 /* Store identity address */
6571 ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6572 &adv_info->addr.a.val[0]);
6573 /* Mark it as identity address from RPA (0x02, 0x03) */
6574 adv_info->addr.type += 2U;
6575 } else
6576 #endif /* CONFIG_BT_CTLR_PRIVACY */
6577 {
6578 adv_info->addr.type = adv->tx_addr;
6579 memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6580 sizeof(bt_addr_t));
6581 }
6582
6583 adv_info->prim_phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
6584 adv_info->sec_phy = 0U;
6585 adv_info->sid = 0xff;
6586 adv_info->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6587 adv_info->rssi = rssi;
6588 adv_info->interval = 0U;
6589
6590 if (adv->type == PDU_ADV_TYPE_DIRECT_IND) {
6591 adv_info->direct_addr.type = adv->rx_addr;
6592 bt_addr_copy(&adv_info->direct_addr.a,
6593 (void *)adv->direct_ind.tgt_addr);
6594 } else {
6595 adv_info->direct_addr.type = 0U;
6596 (void)memset(adv_info->direct_addr.a.val, 0U,
6597 sizeof(adv_info->direct_addr.a.val));
6598 }
6599
6600 adv_info->length = data_len;
6601 memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6602 }
6603
6604 static uint8_t ext_adv_direct_addr_type(struct lll_scan *lll,
6605 bool peer_resolved, bool direct_report,
6606 uint8_t rx_addr_type,
6607 const uint8_t *const rx_addr)
6608 {
6609 /* The directed address is resolvable private address, but Controller
6610 * could not resolve it.
6611 */
6612 if (direct_report) {
6613 return BT_ADDR_LE_UNRESOLVED;
6614 }
6615
6616 if (0) {
6617 #if defined(CONFIG_BT_CTLR_PRIVACY)
6618 /* Peer directed advertiser's address was resolved */
6619 } else if (peer_resolved) {
6620 struct ll_scan_set *scan;
6621
6622 scan = HDR_LLL2ULL(lll);
6623 if ((rx_addr_type == lll->init_addr_type) &&
6624 !memcmp(lll->init_addr, rx_addr, BDADDR_SIZE)) {
6625 /* Peer directed advertiser used local scanner's
6626 * initiator address.
6627 */
6628 return scan->own_addr_type;
6629 }
6630
6631 /* Peer directed advertiser used directed resolvable
6632 * private address generated from the local scanner's
6633 * Identity Resolution Key.
6634 */
6635 return scan->own_addr_type | BIT(1);
6636 #endif /* CONFIG_BT_CTLR_PRIVACY */
6637 } else {
6638 struct ll_scan_set *scan;
6639
6640 scan = HDR_LLL2ULL(lll);
6641
6642 /* Peer directed advertiser used local scanner's
6643 * initiator address.
6644 */
6645 return scan->own_addr_type;
6646 }
6647 }
6648
6649 static uint8_t ext_adv_data_get(const struct node_rx_pdu *node_rx_data,
6650 uint8_t *const sec_phy, int8_t *const tx_pwr,
6651 const uint8_t **const data)
6652 {
6653 const struct pdu_adv *adv = (void *)node_rx_data->pdu;
6654 const struct pdu_adv_com_ext_adv *p;
6655 const struct pdu_adv_ext_hdr *h;
6656 uint8_t hdr_buf_len;
6657 const uint8_t *ptr;
6658 uint8_t hdr_len;
6659
6660 *tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6661
6662 p = (void *)&adv->adv_ext_ind;
6663 h = (void *)p->ext_hdr_adv_data;
6664 ptr = (void *)h;
6665
6666 if (!p->ext_hdr_len) {
6667 hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
6668
6669 goto no_ext_hdr;
6670 }
6671
6672 ptr = h->data;
6673
6674 if (h->adv_addr) {
6675 ptr += BDADDR_SIZE;
6676 }
6677
6678 if (h->tgt_addr) {
6679 ptr += BDADDR_SIZE;
6680 }
6681
6682 if (h->adi) {
6683 ptr += sizeof(struct pdu_adv_adi);
6684 }
6685
6686 if (h->aux_ptr) {
6687 struct pdu_adv_aux_ptr *aux_ptr;
6688
6689 aux_ptr = (void *)ptr;
6690 ptr += sizeof(*aux_ptr);
6691
6692 *sec_phy = HCI_AUX_PHY_TO_HCI_PHY(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
6693 }
6694
6695 if (h->sync_info) {
6696 ptr += sizeof(struct pdu_adv_sync_info);
6697 }
6698
6699 if (h->tx_pwr) {
6700 *tx_pwr = *(int8_t *)ptr;
6701 ptr++;
6702 }
6703
6704 hdr_len = ptr - (uint8_t *)p;
6705 hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
6706 if (hdr_len < hdr_buf_len) {
6707 uint8_t acad_len = hdr_buf_len - hdr_len;
6708
6709 ptr += acad_len;
6710 hdr_len += acad_len;
6711 }
6712
6713 no_ext_hdr:
6714 if (hdr_len < adv->len) {
6715 *data = ptr;
6716
6717 return adv->len - hdr_len;
6718 }
6719
6720 return 0;
6721 }
6722
6723 static void node_rx_extra_list_release(struct node_rx_pdu *node_rx_extra)
6724 {
6725 while (node_rx_extra) {
6726 struct node_rx_pdu *node_rx_curr;
6727
6728 node_rx_curr = node_rx_extra;
6729 node_rx_extra = node_rx_curr->rx_ftr.extra;
6730
6731 node_rx_curr->hdr.next = NULL;
6732 ll_rx_mem_release((void **)&node_rx_curr);
6733 }
6734 }
6735
6736 static void ext_adv_info_fill(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6737 uint8_t adv_addr_type, const uint8_t *adv_addr,
6738 uint8_t direct_addr_type,
6739 const uint8_t *direct_addr, uint8_t rl_idx,
6740 int8_t tx_pwr, int8_t rssi,
6741 uint16_t interval_le16,
6742 const struct pdu_adv_adi *adi, uint8_t data_len,
6743 const uint8_t *data, struct net_buf *buf)
6744 {
6745 struct bt_hci_evt_le_ext_advertising_info *adv_info;
6746 struct bt_hci_evt_le_ext_advertising_report *sep;
6747 uint8_t info_len;
6748
6749 info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6750 data_len;
6751 sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6752 sizeof(*sep) + info_len);
6753
6754 sep->num_reports = 1U;
6755 adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6756
6757 adv_info->evt_type = sys_cpu_to_le16((uint16_t)evt_type);
6758
6759 if (0) {
6760 #if defined(CONFIG_BT_CTLR_PRIVACY)
6761 } else if (rl_idx < ll_rl_size_get()) {
6762 /* Store identity address */
6763 ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6764 adv_info->addr.a.val);
6765 /* Mark it as identity address from RPA (0x02, 0x03) */
6766 adv_info->addr.type += 2U;
6767 #else /* !CONFIG_BT_CTLR_PRIVACY */
6768 ARG_UNUSED(rl_idx);
6769 #endif /* !CONFIG_BT_CTLR_PRIVACY */
6770 } else if (adv_addr) {
6771 adv_info->addr.type = adv_addr_type;
6772 (void)memcpy(adv_info->addr.a.val, adv_addr, sizeof(bt_addr_t));
6773 } else {
6774 adv_info->addr.type = 0U;
6775 (void)memset(adv_info->addr.a.val, 0, sizeof(bt_addr_t));
6776 }
6777
6778 adv_info->prim_phy = find_lsb_set(phy);
6779 adv_info->sec_phy = sec_phy;
6780 adv_info->sid = (adi) ? PDU_ADV_ADI_SID_GET(adi) : BT_HCI_LE_EXT_ADV_SID_INVALID;
6781 adv_info->tx_power = tx_pwr;
6782 adv_info->rssi = rssi;
6783 adv_info->interval = interval_le16;
6784
6785 if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_DIRECT) {
6786 adv_info->direct_addr.type = direct_addr_type;
6787 (void)memcpy(adv_info->direct_addr.a.val, direct_addr,
6788 sizeof(bt_addr_t));
6789 } else {
6790 adv_info->direct_addr.type = 0U;
6791 (void)memset(adv_info->direct_addr.a.val, 0, sizeof(bt_addr_t));
6792 }
6793
6794 adv_info->length = data_len;
6795 (void)memcpy(adv_info->data, data, data_len);
6796 }
6797
6798 static void ext_adv_pdu_frag(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6799 uint8_t adv_addr_type, const uint8_t *adv_addr,
6800 uint8_t direct_addr_type,
6801 const uint8_t *direct_addr, uint8_t rl_idx,
6802 int8_t tx_pwr, int8_t rssi, uint16_t interval_le16,
6803 const struct pdu_adv_adi *adi,
6804 uint8_t data_len_max,
6805 uint16_t *const data_len_total,
6806 uint8_t *const data_len,
6807 const uint8_t **const data, struct net_buf *buf,
6808 struct net_buf **const evt_buf)
6809 {
6810 const uint8_t data_len_frag = MIN(*data_len, data_len_max);
6811
6812 do {
6813 /* Prepare a fragment of PDU data in a HCI event */
6814 ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type,
6815 adv_addr, direct_addr_type, direct_addr,
6816 rl_idx, tx_pwr, rssi, interval_le16, adi,
6817 data_len_frag, *data, *evt_buf);
6818
6819 *data += data_len_frag;
6820 *data_len -= data_len_frag;
6821 *data_len_total -= data_len_frag;
6822
6823 *evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
6824 LL_ASSERT(*evt_buf);
6825
6826 net_buf_frag_add(buf, *evt_buf);
6827
6828 /* Continue to fragment until last partial PDU data fragment,
6829 * remainder PDU data's HCI event will be prepare by caller.
6830 */
6831 } while (*data_len > data_len_max);
6832 }
6833
6834 static void ext_adv_data_frag(const struct node_rx_pdu *node_rx_data,
6835 uint8_t evt_type, uint8_t phy,
6836 uint8_t *const sec_phy, uint8_t adv_addr_type,
6837 const uint8_t *adv_addr, uint8_t direct_addr_type,
6838 const uint8_t *direct_addr, uint8_t rl_idx,
6839 int8_t *const tx_pwr, int8_t rssi,
6840 uint16_t interval_le16,
6841 const struct pdu_adv_adi *adi,
6842 uint8_t data_len_max, uint16_t data_len_total,
6843 uint8_t *const data_len,
6844 const uint8_t **const data, struct net_buf *buf,
6845 struct net_buf **const evt_buf)
6846 {
6847 evt_type |= (BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL << 5);
6848
6849 do {
6850 /* Fragment the PDU data */
6851 ext_adv_pdu_frag(evt_type, phy, *sec_phy, adv_addr_type,
6852 adv_addr, direct_addr_type, direct_addr,
6853 rl_idx, *tx_pwr, rssi, interval_le16, adi,
6854 data_len_max, &data_len_total, data_len,
6855 data, buf, evt_buf);
6856
6857 /* Check if more PDUs in the list */
6858 node_rx_data = node_rx_data->rx_ftr.extra;
6859 if (node_rx_data) {
6860 if (*data_len >= data_len_total) {
6861 /* Last fragment restricted to maximum scan
6862 * data length, caller will prepare the last
6863 * HCI fragment event.
6864 */
6865 break;
6866 } else if (*data_len) {
6867 /* Last fragment of current PDU data */
6868 ext_adv_pdu_frag(evt_type, phy, *sec_phy,
6869 adv_addr_type, adv_addr,
6870 direct_addr_type, direct_addr,
6871 rl_idx, *tx_pwr, rssi,
6872 interval_le16, adi,
6873 data_len_max, &data_len_total,
6874 data_len, data, buf, evt_buf);
6875 }
6876
6877 /* Get next PDU data in list */
6878 *data_len = ext_adv_data_get(node_rx_data, sec_phy,
6879 tx_pwr, data);
6880
6881 /* Restrict PDU data to maximum scan data length */
6882 if (*data_len > data_len_total) {
6883 *data_len = data_len_total;
6884 }
6885 }
6886
6887 /* Continue to fragment if current PDU data length less than
6888 * total data length or current PDU data length greater than
6889 * HCI event max length.
6890 */
6891 } while ((*data_len < data_len_total) || (*data_len > data_len_max));
6892 }
6893
6894 static void le_ext_adv_report(struct pdu_data *pdu_data,
6895 struct node_rx_pdu *node_rx,
6896 struct net_buf *buf, uint8_t phy)
6897 {
6898 int8_t scan_rsp_tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6899 int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6900 struct node_rx_pdu *node_rx_scan_data = NULL;
6901 struct node_rx_pdu *node_rx_data = NULL;
6902 const struct pdu_adv_adi *adi = NULL;
6903 uint16_t scan_data_len_total = 0U;
6904 struct node_rx_pdu *node_rx_curr;
6905 struct node_rx_pdu *node_rx_next;
6906 const uint8_t *scan_data = NULL;
6907 uint8_t scan_data_status = 0U;
6908 uint8_t direct_addr_type = 0U;
6909 uint16_t data_len_total = 0U;
6910 uint8_t *direct_addr = NULL;
6911 uint16_t interval_le16 = 0U;
6912 const uint8_t *data = NULL;
6913 uint8_t scan_data_len = 0U;
6914 uint8_t adv_addr_type = 0U;
6915 uint8_t sec_phy_scan = 0U;
6916 uint8_t *adv_addr = NULL;
6917 uint8_t data_status = 0U;
6918 struct net_buf *evt_buf;
6919 bool devmatch = false;
6920 uint8_t data_len = 0U;
6921 uint8_t evt_type = 0U;
6922 uint8_t sec_phy = 0U;
6923 uint8_t data_len_max;
6924 uint8_t rl_idx = 0U;
6925 struct pdu_adv *adv;
6926 int8_t rssi;
6927
6928 /* NOTE: This function uses a lot of initializers before the check and
6929 * return below, as an exception to initializing close to their locality
6930 * of reference. This is acceptable as the return is unlikely in typical
6931 * Controller use.
6932 */
6933 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6934 !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
6935 node_rx_extra_list_release(node_rx->rx_ftr.extra);
6936 return;
6937 }
6938
6939 #if defined(CONFIG_BT_CTLR_PRIVACY)
6940 rl_idx = ll_rl_size_get();
6941 #endif /* CONFIG_BT_CTLR_PRIVACY */
6942
6943 adv = (void *)pdu_data;
6944 node_rx_curr = node_rx;
6945 node_rx_next = node_rx_curr->rx_ftr.extra;
6946 do {
6947 int8_t tx_pwr_curr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6948 struct pdu_adv_adi *adi_curr = NULL;
6949 uint8_t direct_addr_type_curr = 0U;
6950 bool direct_resolved_curr = false;
6951 uint8_t *direct_addr_curr = NULL;
6952 uint8_t adv_addr_type_curr = 0U;
6953 struct pdu_adv_com_ext_adv *p;
6954 uint8_t *adv_addr_curr = NULL;
6955 uint8_t data_len_curr = 0U;
6956 uint8_t *data_curr = NULL;
6957 struct pdu_adv_ext_hdr *h;
6958 uint8_t sec_phy_curr = 0U;
6959 uint8_t evt_type_curr;
6960 uint8_t hdr_buf_len;
6961 uint8_t hdr_len;
6962 uint8_t *ptr;
6963
6964 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6965 bool direct_report_curr = node_rx_curr->rx_ftr.direct;
6966 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6967
6968 #if defined(CONFIG_BT_CTLR_PRIVACY)
6969 uint8_t rl_idx_curr = node_rx_curr->rx_ftr.rl_idx;
6970
6971 direct_resolved_curr = node_rx_curr->rx_ftr.direct_resolved;
6972 #endif /* CONFIG_BT_CTLR_PRIVACY */
6973
6974 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
6975 defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
6976 const bool devmatch_curr = node_rx_curr->rx_ftr.devmatch;
6977 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
6978
6979 /* The Link Layer currently returns RSSI as an absolute value */
6980 rssi = -(node_rx_curr->rx_ftr.rssi);
6981
6982 LOG_DBG("phy= 0x%x, type= 0x%x, len= %u, tat= %u, rat= %u,"
6983 " rssi=%d dB", phy, adv->type, adv->len, adv->tx_addr,
6984 adv->rx_addr, rssi);
6985
6986 p = (void *)&adv->adv_ext_ind;
6987 h = (void *)p->ext_hdr_adv_data;
6988 ptr = (void *)h;
6989
6990 LOG_DBG(" Ext. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
6991
6992 evt_type_curr = p->adv_mode;
6993
6994 if (!p->ext_hdr_len) {
6995 hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
6996
6997 goto no_ext_hdr;
6998 }
6999
7000 ptr = h->data;
7001
7002 if (h->adv_addr) {
7003 /* AdvA is RFU in AUX_CHAIN_IND */
7004 if (node_rx_curr == node_rx ||
7005 node_rx_curr == node_rx->rx_ftr.extra) {
7006 bt_addr_le_t addr;
7007
7008 adv_addr_type_curr = adv->tx_addr;
7009 adv_addr_curr = ptr;
7010
7011 addr.type = adv->tx_addr;
7012 (void)memcpy(addr.a.val, ptr, sizeof(bt_addr_t));
7013
7014 LOG_DBG(" AdvA: %s", bt_addr_le_str(&addr));
7015 }
7016
7017 ptr += BDADDR_SIZE;
7018 }
7019
7020 if (h->tgt_addr) {
7021 /* TargetA is RFU in AUX_CHAIN_IND */
7022 if (node_rx_curr == node_rx ||
7023 node_rx_curr == node_rx->rx_ftr.extra) {
7024 struct lll_scan *lll;
7025 bt_addr_le_t addr;
7026
7027 lll = node_rx->rx_ftr.param;
7028
7029 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
7030 direct_addr_type_curr =
7031 ext_adv_direct_addr_type(lll,
7032 direct_resolved_curr,
7033 direct_report_curr,
7034 adv->rx_addr, ptr);
7035 #else /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
7036 direct_addr_type_curr =
7037 ext_adv_direct_addr_type(lll,
7038 direct_resolved_curr,
7039 false, adv->rx_addr,
7040 ptr);
7041 #endif /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
7042
7043 direct_addr_curr = ptr;
7044
7045 addr.type = adv->rx_addr;
7046 (void)memcpy(addr.a.val, direct_addr_curr,
7047 sizeof(bt_addr_t));
7048
7049 LOG_DBG(" TgtA: %s", bt_addr_le_str(&addr));
7050 }
7051
7052 ptr += BDADDR_SIZE;
7053 }
7054
7055 if (h->cte_info) {
7056 /* CTEInfo is RFU */
7057 ptr += 1;
7058 }
7059
7060 if (h->adi) {
7061 adi_curr = (void *)ptr;
7062
7063 ptr += sizeof(*adi);
7064
7065 LOG_DBG(" AdvDataInfo DID = 0x%x, SID = 0x%x",
7066 PDU_ADV_ADI_DID_GET(adi_curr), PDU_ADV_ADI_SID_GET(adi_curr));
7067 }
7068
7069 if (h->aux_ptr) {
7070 struct pdu_adv_aux_ptr *aux_ptr;
7071
7072 /* AuxPtr is RFU for connectable or scannable AUX_ADV_IND */
7073 if (node_rx_curr != node_rx->rx_ftr.extra ||
7074 evt_type_curr == 0U) {
7075 uint8_t aux_phy;
7076
7077 aux_ptr = (void *)ptr;
7078
7079 /* Don't report if invalid phy or AUX_ADV_IND was not received
7080 * See BT Core 5.4, Vol 6, Part B, Section 4.4.3.5:
7081 * If the Controller does not listen for or does not receive the
7082 * AUX_ADV_IND PDU, no report shall be generated
7083 */
7084 if ((node_rx_curr == node_rx && !node_rx_next) ||
7085 PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7086 struct node_rx_ftr *ftr;
7087
7088 ftr = &node_rx->rx_ftr;
7089 node_rx_extra_list_release(ftr->extra);
7090 return;
7091 }
7092
7093
7094 sec_phy_curr = HCI_AUX_PHY_TO_HCI_PHY(
7095 PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7096
7097 aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7098
7099 LOG_DBG(" AuxPtr chan_idx = %u, ca = %u, offs_units "
7100 "= %u offs = 0x%x, phy = 0x%x",
7101 aux_ptr->chan_idx, aux_ptr->ca,
7102 aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr),
7103 aux_phy);
7104 }
7105
7106 ptr += sizeof(*aux_ptr);
7107 }
7108
7109 if (h->sync_info) {
7110 struct pdu_adv_sync_info *si;
7111
7112 si = (void *)ptr;
7113 ptr += sizeof(*si);
7114
7115 interval_le16 = si->interval;
7116
7117 LOG_DBG(" SyncInfo offs = %u, offs_unit = 0x%x, "
7118 "interval = 0x%x, sca = 0x%x, "
7119 "chan map = 0x%x 0x%x 0x%x 0x%x 0x%x, "
7120 "AA = 0x%x%x%x%x, CRC = 0x%x 0x%x 0x%x, "
7121 "evt cntr = 0x%x",
7122 PDU_ADV_SYNC_INFO_OFFSET_GET(si),
7123 PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si),
7124 sys_le16_to_cpu(si->interval),
7125 ((si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
7126 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
7127 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS),
7128 si->sca_chm[0], si->sca_chm[1], si->sca_chm[2],
7129 si->sca_chm[3],
7130 (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
7131 ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK),
7132 si->aa[3], si->aa[2], si->aa[1], si->aa[0],
7133 si->crc_init[0], si->crc_init[1],
7134 si->crc_init[2], sys_le16_to_cpu(si->evt_cntr));
7135 }
7136
7137 if (h->tx_pwr) {
7138 tx_pwr_curr = *(int8_t *)ptr;
7139 ptr++;
7140
7141 LOG_DBG(" Tx pwr= %d dB", tx_pwr_curr);
7142 }
7143
7144 hdr_len = ptr - (uint8_t *)p;
7145 hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7146 if (hdr_len > hdr_buf_len) {
7147 LOG_WRN(" Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7148 } else {
7149 uint8_t acad_len = hdr_buf_len - hdr_len;
7150
7151 if (acad_len) {
7152 ptr += acad_len;
7153 hdr_len += acad_len;
7154 }
7155 }
7156
7157 no_ext_hdr:
7158 if (hdr_len < adv->len) {
7159 data_len_curr = adv->len - hdr_len;
7160 data_curr = ptr;
7161
7162 LOG_DBG(" AD Data (%u): <todo>", data_len);
7163 }
7164
7165 if (data_len_total + data_len_curr > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
7166 /* Truncating advertising data
7167 * Note that this has to be done at a PDU boundary, so stop
7168 * processing nodes from this one forward
7169 */
7170 if (scan_data) {
7171 scan_data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7172 } else {
7173 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7174 }
7175 break;
7176 }
7177
7178 if (node_rx_curr == node_rx) {
7179 evt_type = evt_type_curr;
7180 adv_addr_type = adv_addr_type_curr;
7181 adv_addr = adv_addr_curr;
7182 direct_addr_type = direct_addr_type_curr;
7183 direct_addr = direct_addr_curr;
7184 adi = adi_curr;
7185 sec_phy = sec_phy_curr;
7186 node_rx_data = node_rx_curr;
7187 /* Adv data in ADV_EXT_IND is RFU */
7188 data_len = 0U;
7189 data_len_total = 0U;
7190 data = NULL;
7191 scan_data_len_total = 0U;
7192 tx_pwr = tx_pwr_curr;
7193
7194 #if defined(CONFIG_BT_CTLR_PRIVACY)
7195 rl_idx = rl_idx_curr;
7196 #endif /* CONFIG_BT_CTLR_PRIVACY */
7197
7198 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7199 defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7200 devmatch = devmatch_curr;
7201 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7202
7203 } else {
7204 /* TODO: Validate current value with previous */
7205
7206 /* Detect the scan response in the list of node_rx */
7207 if (node_rx_curr->rx_ftr.scan_rsp) {
7208 node_rx_scan_data = node_rx_curr;
7209 if (sec_phy_curr) {
7210 sec_phy_scan = sec_phy_curr;
7211 } else {
7212 sec_phy_scan = sec_phy;
7213 }
7214 scan_data_len = data_len_curr;
7215 scan_data = data_curr;
7216 scan_rsp_tx_pwr = tx_pwr_curr;
7217 }
7218
7219 if (!adv_addr) {
7220 adv_addr_type = adv_addr_type_curr;
7221 adv_addr = adv_addr_curr;
7222 }
7223
7224 if (!direct_addr) {
7225 direct_addr_type = direct_addr_type_curr;
7226 direct_addr = direct_addr_curr;
7227 }
7228
7229 if (scan_data) {
7230 scan_data_len_total += data_len_curr;
7231 } else if (!data) {
7232 node_rx_data = node_rx_curr;
7233 data_len = data_len_curr;
7234 data_len_total = data_len;
7235 data = data_curr;
7236 tx_pwr = tx_pwr_curr;
7237 } else {
7238 data_len_total += data_len_curr;
7239 }
7240
7241 #if defined(CONFIG_BT_CTLR_PRIVACY)
7242 if (rl_idx >= ll_rl_size_get()) {
7243 rl_idx = rl_idx_curr;
7244 }
7245 #endif /* CONFIG_BT_CTLR_PRIVACY */
7246
7247 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7248 defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7249 if (!devmatch) {
7250 devmatch = devmatch_curr;
7251 }
7252 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7253 }
7254
7255 if (!node_rx_next) {
7256 bool has_aux_ptr = !!sec_phy_curr;
7257
7258 if (scan_data) {
7259 if (has_aux_ptr) {
7260 scan_data_status =
7261 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7262 }
7263 } else if (has_aux_ptr) {
7264 data_status =
7265 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7266 }
7267
7268 break;
7269 }
7270
7271 node_rx_curr = node_rx_next;
7272 node_rx_next = node_rx_curr->rx_ftr.extra;
7273 adv = (void *)node_rx_curr->pdu;
7274 } while (1);
7275
7276 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
7277 IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST) &&
7278 !devmatch) {
7279 node_rx_extra_list_release(node_rx->rx_ftr.extra);
7280 return;
7281 }
7282
7283 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
7284 if (adv_addr) {
7285 if (dup_scan &&
7286 dup_found(PDU_ADV_TYPE_EXT_IND, adv_addr_type, adv_addr,
7287 (evt_type & BIT_MASK(2)), adi, data_status)) {
7288 node_rx_extra_list_release(node_rx->rx_ftr.extra);
7289 return;
7290 }
7291 }
7292 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
7293
7294 /* If data incomplete */
7295 if (data_status) {
7296 /* Data incomplete and no more to come */
7297 if (!(adv_addr ||
7298 (adi && ((tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) ||
7299 data)))) {
7300 /* No device address and no valid AD data parsed or
7301 * Tx Power present for this PDU chain that has ADI,
7302 * skip HCI event generation.
7303 * In other terms, generate HCI event if device address
7304 * is present or if Tx pwr and/or data is present from
7305 * anonymous device.
7306 */
7307 node_rx_extra_list_release(node_rx->rx_ftr.extra);
7308 return;
7309 }
7310 }
7311
7312 /* Set directed advertising bit */
7313 if (direct_addr) {
7314 evt_type |= BT_HCI_LE_ADV_EVT_TYPE_DIRECT;
7315 }
7316
7317 /* HCI fragment */
7318 evt_buf = buf;
7319 data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7320 sizeof(struct bt_hci_evt_le_meta_event) -
7321 sizeof(struct bt_hci_evt_le_ext_advertising_report) -
7322 sizeof(struct bt_hci_evt_le_ext_advertising_info);
7323
7324 /* If PDU data length less than total data length or PDU data length
7325 * greater than maximum HCI event data length, then fragment.
7326 */
7327 if ((data_len < data_len_total) || (data_len > data_len_max)) {
7328 ext_adv_data_frag(node_rx_data, evt_type, phy, &sec_phy,
7329 adv_addr_type, adv_addr, direct_addr_type,
7330 direct_addr, rl_idx, &tx_pwr, rssi,
7331 interval_le16, adi, data_len_max,
7332 data_len_total, &data_len, &data, buf,
7333 &evt_buf);
7334 }
7335
7336 /* Set data status bits */
7337 evt_type |= (data_status << 5);
7338
7339 /* Start constructing the adv event for remainder of the PDU data */
7340 ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type, adv_addr,
7341 direct_addr_type, direct_addr, rl_idx, tx_pwr, rssi,
7342 interval_le16, adi, data_len, data, evt_buf);
7343
7344 /* If scan response event to be constructed */
7345 if (!scan_data) {
7346 node_rx_extra_list_release(node_rx->rx_ftr.extra);
7347
7348 return;
7349 }
7350
7351 /* Set scan response bit */
7352 evt_type |= BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP;
7353
7354 /* Clear the data status bits */
7355 evt_type &= ~(BIT_MASK(2) << 5);
7356
7357 /* Allocate, append as buf fragment and construct the scan response
7358 * event.
7359 */
7360 evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7361 LL_ASSERT(evt_buf);
7362
7363 net_buf_frag_add(buf, evt_buf);
7364
7365 /* If PDU data length less than total data length or PDU data length
7366 * greater than maximum HCI event data length, then fragment.
7367 */
7368 if ((scan_data_len < scan_data_len_total) ||
7369 (scan_data_len > data_len_max)) {
7370 ext_adv_data_frag(node_rx_scan_data, evt_type, phy,
7371 &sec_phy_scan, adv_addr_type, adv_addr,
7372 direct_addr_type, direct_addr, rl_idx,
7373 &scan_rsp_tx_pwr, rssi, interval_le16, adi,
7374 data_len_max, scan_data_len_total,
7375 &scan_data_len, &scan_data, buf, &evt_buf);
7376 }
7377
7378 /* set scan data status bits */
7379 evt_type |= (scan_data_status << 5);
7380
7381 /* Start constructing the event for remainder of the PDU data */
7382 ext_adv_info_fill(evt_type, phy, sec_phy_scan, adv_addr_type, adv_addr,
7383 direct_addr_type, direct_addr, rl_idx,
7384 scan_rsp_tx_pwr, rssi, interval_le16, adi,
7385 scan_data_len, scan_data, evt_buf);
7386
7387 node_rx_extra_list_release(node_rx->rx_ftr.extra);
7388 }
7389
7390 static void le_adv_ext_report(struct pdu_data *pdu_data,
7391 struct node_rx_pdu *node_rx,
7392 struct net_buf *buf, uint8_t phy)
7393 {
7394 struct pdu_adv *adv = (void *)pdu_data;
7395
7396 if ((adv->type == PDU_ADV_TYPE_EXT_IND) && adv->len) {
7397 le_ext_adv_report(pdu_data, node_rx, buf, phy);
7398 } else {
7399 le_ext_adv_legacy_report(pdu_data, node_rx, buf);
7400 }
7401 }
7402
7403 static void le_adv_ext_1M_report(struct pdu_data *pdu_data,
7404 struct node_rx_pdu *node_rx,
7405 struct net_buf *buf)
7406 {
7407 le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_1M);
7408 }
7409
7410 static void le_adv_ext_2M_report(struct pdu_data *pdu_data,
7411 struct node_rx_pdu *node_rx,
7412 struct net_buf *buf)
7413 {
7414 le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_2M);
7415 }
7416
7417 static void le_adv_ext_coded_report(struct pdu_data *pdu_data,
7418 struct node_rx_pdu *node_rx,
7419 struct net_buf *buf)
7420 {
7421 le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_CODED);
7422 }
7423
7424 static void le_scan_timeout(struct pdu_data *pdu_data,
7425 struct node_rx_pdu *node_rx, struct net_buf *buf)
7426 {
7427 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7428 !(le_event_mask & BT_EVT_MASK_LE_SCAN_TIMEOUT)) {
7429 return;
7430 }
7431
7432 meta_evt(buf, BT_HCI_EVT_LE_SCAN_TIMEOUT, 0U);
7433 }
7434
7435 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
7436 static void le_per_adv_sync_established(struct pdu_data *pdu_data,
7437 struct node_rx_pdu *node_rx,
7438 struct net_buf *buf)
7439 {
7440 struct bt_hci_evt_le_per_adv_sync_established *sep;
7441 struct ll_scan_set *scan;
7442 struct node_rx_sync *se;
7443 void *node;
7444
7445 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7446 !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED)) {
7447 return;
7448 }
7449
7450 sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
7451 sizeof(*sep));
7452
7453 /* Check for pdu field being aligned before accessing sync established
7454 * event.
7455 */
7456 node = pdu_data;
7457 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync));
7458
7459 se = node;
7460 sep->status = se->status;
7461
7462 if (se->status == BT_HCI_ERR_OP_CANCELLED_BY_HOST) {
7463 return;
7464 }
7465
7466 scan = node_rx->rx_ftr.param;
7467
7468 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7469 defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7470 dup_periodic_adv_reset(scan->periodic.adv_addr_type,
7471 scan->periodic.adv_addr,
7472 scan->periodic.sid);
7473 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7474 * CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7475 */
7476
7477 sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7478
7479 /* Resolved address, if private, has been populated in ULL */
7480 sep->adv_addr.type = scan->periodic.adv_addr_type;
7481 (void)memcpy(sep->adv_addr.a.val, scan->periodic.adv_addr, BDADDR_SIZE);
7482
7483 sep->sid = scan->periodic.sid;
7484 sep->phy = find_lsb_set(se->phy);
7485 sep->interval = sys_cpu_to_le16(se->interval);
7486 sep->clock_accuracy = se->sca;
7487 }
7488
7489 static void le_per_adv_sync_report(struct pdu_data *pdu_data,
7490 struct node_rx_pdu *node_rx,
7491 struct net_buf *buf)
7492 {
7493 struct node_rx_ftr *ftr = &node_rx->rx_ftr;
7494 int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7495 struct pdu_adv *adv = (void *)pdu_data;
7496 struct pdu_adv_aux_ptr *aux_ptr = NULL;
7497 const struct pdu_adv_adi *adi = NULL;
7498 uint8_t cte_type = BT_HCI_LE_NO_CTE;
7499 const struct ll_sync_set *sync;
7500 struct pdu_adv_com_ext_adv *p;
7501 struct pdu_adv_ext_hdr *h;
7502 uint16_t data_len_total;
7503 struct net_buf *evt_buf;
7504 uint8_t data_len = 0U;
7505 uint8_t acad_len = 0U;
7506 uint8_t *data = NULL;
7507 uint8_t data_len_max;
7508 uint8_t *acad = NULL;
7509 uint8_t hdr_buf_len;
7510 uint8_t hdr_len;
7511 uint8_t *ptr;
7512 int8_t rssi;
7513 bool accept;
7514
7515 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7516 (!(le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7517 !(le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT))) {
7518 return;
7519 }
7520
7521 /* NOTE: The timeout_reload field in the sync context is checked under
7522 * race condition between HCI Tx and Rx thread wherein a sync
7523 * terminate was performed which resets the timeout_reload field
7524 * before releasing the sync context back into its memory pool.
7525 * It is important that timeout_reload field is at safe offset
7526 * inside the sync context such that it is not corrupt while being
7527 * in the memory pool.
7528 *
7529 * This check ensures reports are not sent out after sync
7530 * terminate.
7531 */
7532 sync = HDR_LLL2ULL(ftr->param);
7533 if (unlikely(!sync->timeout_reload)) {
7534 return;
7535 }
7536
7537 data_len_total = ftr->aux_data_len;
7538
7539 if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7540 (ftr->aux_failed || data_len_total > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7541 struct bt_hci_evt_le_per_advertising_report *sep;
7542
7543 sep = meta_evt(buf,
7544 BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7545 sizeof(*sep));
7546
7547 sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7548 sep->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7549 sep->rssi = BT_HCI_LE_RSSI_NOT_AVAILABLE;
7550 sep->cte_type = BT_HCI_LE_NO_CTE;
7551 sep->data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7552 sep->length = 0;
7553
7554 return;
7555 }
7556
7557 /* The Link Layer currently returns RSSI as an absolute value */
7558 rssi = -(ftr->rssi);
7559
7560 LOG_DBG("len = %u, rssi = %d", adv->len, rssi);
7561
7562 p = (void *)&adv->adv_ext_ind;
7563 h = (void *)p->ext_hdr_adv_data;
7564 ptr = (void *)h;
7565
7566 LOG_DBG(" Per. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
7567
7568 if (!p->ext_hdr_len) {
7569 hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
7570
7571 goto no_ext_hdr;
7572 }
7573
7574 ptr = h->data;
7575
7576 if (h->adv_addr) {
7577 ptr += BDADDR_SIZE;
7578 }
7579
7580 if (h->tgt_addr) {
7581 ptr += BDADDR_SIZE;
7582 }
7583
7584 if (h->cte_info) {
7585 struct pdu_cte_info *cte_info;
7586
7587 cte_info = (void *)ptr;
7588 cte_type = cte_info->type;
7589 ptr++;
7590
7591 LOG_DBG(" CTE type= %d", cte_type);
7592 }
7593
7594 if (h->adi) {
7595 adi = (void *)ptr;
7596
7597 ptr += sizeof(struct pdu_adv_adi);
7598 }
7599
7600 /* AuxPtr */
7601 if (h->aux_ptr) {
7602 uint8_t aux_phy;
7603
7604 aux_ptr = (void *)ptr;
7605 if (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7606 return;
7607 }
7608
7609 ptr += sizeof(*aux_ptr);
7610
7611 aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7612
7613 LOG_DBG(" AuxPtr chan_idx = %u, ca = %u, offs_units "
7614 "= %u offs = 0x%x, phy = 0x%x",
7615 aux_ptr->chan_idx, aux_ptr->ca,
7616 aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr), aux_phy);
7617 }
7618
7619 /* No SyncInfo */
7620 if (h->sync_info) {
7621 ptr += sizeof(struct pdu_adv_sync_info);
7622 }
7623
7624 /* Tx Power */
7625 if (h->tx_pwr) {
7626 tx_pwr = *(int8_t *)ptr;
7627 ptr++;
7628
7629 LOG_DBG(" Tx pwr= %d dB", tx_pwr);
7630 }
7631
7632 hdr_len = ptr - (uint8_t *)p;
7633 hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7634 if (hdr_len > hdr_buf_len) {
7635 LOG_WRN(" Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7636 } else {
7637 acad_len = hdr_buf_len - hdr_len;
7638 if (acad_len) {
7639 acad = ptr;
7640
7641 ptr += acad_len;
7642 hdr_len += acad_len;
7643 }
7644 }
7645
7646 no_ext_hdr:
7647 if (hdr_len < adv->len) {
7648 data_len = adv->len - hdr_len;
7649 data = ptr;
7650
7651 LOG_DBG(" AD Data (%u): <todo>", data_len);
7652 }
7653
7654 if (0) {
7655
7656 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7657 defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7658 } else if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
7659 adi) {
7660 uint8_t data_status;
7661
7662 data_status = (aux_ptr) ?
7663 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL :
7664 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7665
7666 accept = sync->rx_enable && ftr->sync_rx_enabled &&
7667 (!sync->nodups ||
7668 !dup_found(PDU_ADV_TYPE_EXT_IND,
7669 sync->peer_id_addr_type,
7670 sync->peer_id_addr,
7671 DUP_EXT_ADV_MODE_PERIODIC,
7672 adi, data_status));
7673 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7674 * CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7675 */
7676
7677 } else {
7678 accept = sync->rx_enable && ftr->sync_rx_enabled;
7679 }
7680
7681 data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7682 sizeof(struct bt_hci_evt_le_meta_event) -
7683 sizeof(struct bt_hci_evt_le_per_advertising_report);
7684
7685 evt_buf = buf;
7686
7687 if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) && accept) {
7688
7689 /* Pass verdict in LL.TS.p19 section 4.2.3.6 Extended Scanning,
7690 * Passive, Periodic Advertising Report, RSSI and TX_Power
7691 * states:
7692 * TX_Power is set to value of the TxPower field for the
7693 * AUX_SYNC_IND received, and RSSI set to a valid value.
7694 * Subsequent reports with data and the status set to
7695 * "Incomplete, more data to come" or "complete" can have the
7696 * TX_Power field set to 0x7F.
7697 *
7698 * In the implementation data_len_total is the running total
7699 * AD data length so far, data_len is the current PDU's AD data
7700 * length. For AUX_SYNC_IND received, data_len_total ==
7701 * data_len.
7702 */
7703 if (data_len_total > data_len) {
7704 /* Subsequent reports */
7705 tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7706 }
7707
7708 do {
7709 struct bt_hci_evt_le_per_advertising_report *sep;
7710 uint8_t data_len_frag;
7711 uint8_t data_status;
7712
7713 data_len_frag = MIN(data_len, data_len_max);
7714
7715 /* Start constructing periodic advertising report */
7716 sep = meta_evt(evt_buf,
7717 BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7718 sizeof(*sep) + data_len_frag);
7719
7720 sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7721 sep->tx_power = tx_pwr;
7722 sep->rssi = rssi;
7723 sep->cte_type = cte_type;
7724 sep->length = data_len_frag;
7725 memcpy(&sep->data[0], data, data_len_frag);
7726
7727 data += data_len_frag;
7728 data_len -= data_len_frag;
7729
7730 if (data_len > 0) {
7731 /* Some data left in PDU, mark as partial data. */
7732 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7733
7734 evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7735 LL_ASSERT(evt_buf);
7736
7737 net_buf_frag_add(buf, evt_buf);
7738
7739 tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7740 } else if (!aux_ptr) {
7741 /* No data left, no AuxPtr, mark as complete data. */
7742 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7743 } else if (ftr->aux_sched &&
7744 (data_len_total < CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7745 /* No data left, but have AuxPtr and scheduled aux scan,
7746 * mark as partial data.
7747 */
7748 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7749 } else {
7750 /* No data left, have AuxPtr but not aux scan scheduled,
7751 * mark as incomplete data.
7752 */
7753 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7754 }
7755
7756 sep->data_status = data_status;
7757 } while (data_len > 0);
7758
7759 evt_buf = NULL;
7760 }
7761
7762 if ((le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT) && acad &&
7763 (acad_len >= (PDU_BIG_INFO_CLEARTEXT_SIZE +
7764 PDU_ADV_DATA_HEADER_SIZE))) {
7765 struct bt_hci_evt_le_biginfo_adv_report *sep;
7766 struct pdu_big_info *bi;
7767 uint8_t bi_size;
7768 uint8_t phy;
7769
7770 /* FIXME: Parse and find the BIGInfo */
7771 if (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] != BT_DATA_BIG_INFO) {
7772 return;
7773 }
7774
7775 bi_size = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
7776 bi = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
7777
7778 /* Do not report if phy is invalid or unsupported */
7779 phy = (bi->chm_phy[4] >> 5);
7780 if ((phy > EXT_ADV_AUX_PHY_LE_CODED) ||
7781 (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
7782 (phy == EXT_ADV_AUX_PHY_LE_CODED))) {
7783 return;
7784 }
7785
7786 /* Allocate new event buffer if periodic advertising report was
7787 * constructed with the caller supplied buffer.
7788 */
7789 if (!evt_buf) {
7790 evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7791 LL_ASSERT(evt_buf);
7792
7793 net_buf_frag_add(buf, evt_buf);
7794 }
7795
7796 /* Start constructing BIGInfo advertising report */
7797 sep = meta_evt(evt_buf, BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
7798 sizeof(*sep));
7799
7800 sep->sync_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7801
7802 /* NOTE: both sep and bi struct store little-endian values.
7803 * Multi-byte variables extracted using
7804 * PDU_BIG_INFO_ISO_*_GET macros, which return
7805 * value in host-endianness, require conversion.
7806 */
7807 sep->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
7808 sep->nse = PDU_BIG_INFO_NSE_GET(bi);
7809 sep->iso_interval =
7810 sys_cpu_to_le16(PDU_BIG_INFO_ISO_INTERVAL_GET(bi));
7811 sep->bn = PDU_BIG_INFO_BN_GET(bi);
7812 sep->pto = PDU_BIG_INFO_PTO_GET(bi);
7813 sep->irc = PDU_BIG_INFO_IRC_GET(bi);
7814
7815 sep->max_pdu = sys_cpu_to_le16(bi->max_pdu);
7816 sys_put_le24(PDU_BIG_INFO_SDU_INTERVAL_GET(bi),
7817 sep->sdu_interval);
7818 sep->max_sdu = sys_cpu_to_le16(PDU_BIG_INFO_MAX_SDU_GET(bi));
7819 sep->phy = HCI_AUX_PHY_TO_HCI_PHY(bi->chm_phy[4] >> 5);
7820 sep->framing = (bi->payload_count_framing[4] >> 7) & 0x01;
7821 if (bi_size == (PDU_BIG_INFO_ENCRYPTED_SIZE + 1)) {
7822 sep->encryption = 1U;
7823 } else {
7824 sep->encryption = 0U;
7825 }
7826 }
7827 }
7828
7829 static void le_per_adv_sync_lost(struct pdu_data *pdu_data,
7830 struct node_rx_pdu *node_rx,
7831 struct net_buf *buf)
7832 {
7833 struct bt_hci_evt_le_per_adv_sync_lost *sep;
7834
7835 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7836 !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_LOST)) {
7837 return;
7838 }
7839
7840 sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, sizeof(*sep));
7841 sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7842 }
7843
7844 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
7845 static void le_big_sync_established(struct pdu_data *pdu,
7846 struct node_rx_pdu *node_rx,
7847 struct net_buf *buf)
7848 {
7849 struct bt_hci_evt_le_big_sync_established *sep;
7850 struct ll_sync_iso_set *sync_iso;
7851 uint32_t transport_latency_big;
7852 struct node_rx_sync_iso *se;
7853 struct lll_sync_iso *lll;
7854 uint32_t iso_interval_us;
7855 uint32_t big_sync_delay;
7856 size_t evt_size;
7857 void *node;
7858
7859 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7860 !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED)) {
7861 return;
7862 }
7863
7864 sync_iso = node_rx->rx_ftr.param;
7865 lll = &sync_iso->lll;
7866
7867 evt_size = sizeof(*sep) + (lll->num_bis * sizeof(uint16_t));
7868
7869 sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED, evt_size);
7870 sep->big_handle = (uint8_t)node_rx->hdr.handle;
7871
7872 /* Check for pdu field being aligned before accessing ISO sync
7873 * established event.
7874 */
7875 node = pdu;
7876 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync_iso));
7877
7878 se = node;
7879 sep->status = se->status;
7880 if (sep->status) {
7881 return;
7882 }
7883
7884 /* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
7885 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing + (NSE – 1) × Sub_Interval + MPT.
7886 *
7887 * BT Core v5.4 - Vol 6, Part G, Section 3.2.1: (Framed)
7888 * Transport_Latenct_BIG = BIG_Sync_Delay + PTO × (NSE / BN – IRC) * ISO_Interval +
7889 * ISO_Interval + SDU_Interval
7890 *
7891 * BT Core v5.4 - Vol 6, Part G, Section 3.2.2: (Unframed)
7892 * Transport_Latenct_BIG = BIG_Sync_Delay + (PTO × (NSE / BN – IRC) + 1) * ISO_Interval -
7893 * SDU_Interval
7894 */
7895 iso_interval_us = lll->iso_interval * ISO_INT_UNIT_US;
7896 big_sync_delay = ull_iso_big_sync_delay(lll->num_bis, lll->bis_spacing, lll->nse,
7897 lll->sub_interval, lll->phy, lll->max_pdu,
7898 lll->enc);
7899 if (lll->framing) {
7900 /* Framed */
7901 transport_latency_big = big_sync_delay +
7902 lll->pto * (lll->nse / lll->bn - lll->irc) *
7903 iso_interval_us + iso_interval_us + lll->sdu_interval;
7904 } else {
7905 /* Unframed */
7906 transport_latency_big = big_sync_delay +
7907 (lll->pto * (lll->nse / lll->bn - lll->irc) + 1) *
7908 iso_interval_us - lll->sdu_interval;
7909 }
7910
7911 sys_put_le24(transport_latency_big, sep->latency);
7912 sep->nse = lll->nse;
7913 sep->bn = lll->bn;
7914 sep->pto = lll->pto;
7915 sep->irc = lll->irc;
7916 sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
7917 sep->iso_interval = sys_cpu_to_le16(lll->iso_interval);
7918 sep->num_bis = lll->stream_count;
7919
7920 /* Connection handle list of all BISes synchronized in the BIG */
7921 for (uint8_t i = 0U; i < lll->stream_count; i++) {
7922 uint16_t handle;
7923
7924 handle = LL_BIS_SYNC_HANDLE_FROM_IDX(lll->stream_handle[i]);
7925 sep->handle[i] = sys_cpu_to_le16(handle);
7926 }
7927 }
7928
7929 static void le_big_sync_lost(struct pdu_data *pdu,
7930 struct node_rx_pdu *node_rx,
7931 struct net_buf *buf)
7932 {
7933 struct bt_hci_evt_le_big_sync_lost *sep;
7934
7935 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7936 !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_LOST)) {
7937 return;
7938 }
7939
7940 sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_LOST, sizeof(*sep));
7941 sep->big_handle = (uint8_t)node_rx->hdr.handle;
7942 sep->reason = *((uint8_t *)pdu);
7943 }
7944 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
7945 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
7946 #endif /* CONFIG_BT_CTLR_ADV_EXT */
7947 #endif /* CONFIG_BT_OBSERVER */
7948
7949 #if defined(CONFIG_BT_BROADCASTER)
7950 #if defined(CONFIG_BT_CTLR_ADV_EXT)
7951 static void le_adv_ext_terminate(struct pdu_data *pdu_data,
7952 struct node_rx_pdu *node_rx,
7953 struct net_buf *buf)
7954 {
7955 struct bt_hci_evt_le_adv_set_terminated *sep;
7956
7957 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7958 !(le_event_mask & BT_EVT_MASK_LE_ADV_SET_TERMINATED)) {
7959 return;
7960 }
7961
7962 sep = meta_evt(buf, BT_HCI_EVT_LE_ADV_SET_TERMINATED, sizeof(*sep));
7963 sep->status = node_rx->rx_ftr.param_adv_term.status;
7964 sep->adv_handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
7965 sep->conn_handle =
7966 sys_cpu_to_le16(node_rx->rx_ftr.param_adv_term.conn_handle);
7967 sep->num_completed_ext_adv_evts =
7968 node_rx->rx_ftr.param_adv_term.num_events;
7969 }
7970
7971 #if defined(CONFIG_BT_CTLR_ADV_ISO)
7972 static void le_big_complete(struct pdu_data *pdu_data,
7973 struct node_rx_pdu *node_rx,
7974 struct net_buf *buf)
7975 {
7976 struct bt_hci_evt_le_big_complete *sep;
7977 struct ll_adv_iso_set *adv_iso;
7978 struct lll_adv_iso *lll;
7979 size_t evt_size;
7980
7981 adv_iso = node_rx->rx_ftr.param;
7982 lll = &adv_iso->lll;
7983
7984 evt_size = sizeof(*sep) + (lll->num_bis * sizeof(uint16_t));
7985
7986 sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_COMPLETE, evt_size);
7987
7988 sep->status = BT_HCI_ERR_SUCCESS;
7989 sep->big_handle = (uint8_t)node_rx->hdr.handle;
7990
7991 if (sep->status) {
7992 return;
7993 }
7994
7995 /* FIXME: Fill sync delay and latency */
7996 sys_put_le24(0, sep->sync_delay);
7997 sys_put_le24(0, sep->latency);
7998
7999 sep->phy = find_lsb_set(lll->phy);
8000 sep->nse = lll->nse;
8001 sep->bn = lll->bn;
8002 sep->pto = lll->pto;
8003 sep->irc = lll->irc;
8004 sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
8005 sep->num_bis = lll->num_bis;
8006
8007 /* Connection handle list of all BISes in the BIG */
8008 for (uint8_t i = 0U; i < lll->num_bis; i++) {
8009 uint16_t handle;
8010
8011 handle = LL_BIS_ADV_HANDLE_FROM_IDX(lll->stream_handle[i]);
8012 sep->handle[i] = sys_cpu_to_le16(handle);
8013 }
8014 }
8015
8016 static void le_big_terminate(struct pdu_data *pdu,
8017 struct node_rx_pdu *node_rx,
8018 struct net_buf *buf)
8019 {
8020 struct bt_hci_evt_le_big_terminate *sep;
8021
8022 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8023 !(le_event_mask & BT_EVT_MASK_LE_BIG_TERMINATED)) {
8024 return;
8025 }
8026
8027 sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_TERMINATE, sizeof(*sep));
8028 sep->big_handle = (uint8_t)node_rx->hdr.handle;
8029 sep->reason = *((uint8_t *)pdu);
8030 }
8031 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8032 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8033 #endif /* CONFIG_BT_BROADCASTER */
8034
8035 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8036 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8037 static void le_scan_req_received(struct pdu_data *pdu_data,
8038 struct node_rx_pdu *node_rx,
8039 struct net_buf *buf)
8040 {
8041 struct pdu_adv *adv = (void *)pdu_data;
8042 struct bt_hci_evt_le_scan_req_received *sep;
8043
8044 #if defined(CONFIG_BT_CTLR_PRIVACY)
8045 uint8_t rl_idx;
8046 #endif
8047
8048 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8049 !(le_event_mask & BT_EVT_MASK_LE_SCAN_REQ_RECEIVED)) {
8050 bt_addr_le_t addr;
8051 uint8_t handle;
8052 int8_t rssi;
8053
8054 handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
8055 addr.type = adv->tx_addr;
8056 memcpy(&addr.a.val[0], &adv->scan_req.scan_addr[0],
8057 sizeof(bt_addr_t));
8058
8059 /* The Link Layer currently returns RSSI as an absolute value */
8060 rssi = -(node_rx->rx_ftr.rssi);
8061
8062 LOG_DBG("handle: %d, addr: %s, rssi: %d dB.", handle, bt_addr_le_str(&addr), rssi);
8063
8064 return;
8065 }
8066
8067 sep = meta_evt(buf, BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, sizeof(*sep));
8068 sep->handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
8069 sep->addr.type = adv->tx_addr;
8070 memcpy(&sep->addr.a.val[0], &adv->scan_req.scan_addr[0],
8071 sizeof(bt_addr_t));
8072
8073 #if defined(CONFIG_BT_CTLR_PRIVACY)
8074 rl_idx = node_rx->rx_ftr.rl_idx;
8075 if (rl_idx < ll_rl_size_get()) {
8076 /* Store identity address */
8077 ll_rl_id_addr_get(rl_idx, &sep->addr.type,
8078 &sep->addr.a.val[0]);
8079 /* Mark it as identity address from RPA (0x02, 0x03) */
8080 sep->addr.type += 2U;
8081 } else {
8082 #else
8083 if (1) {
8084 #endif
8085 sep->addr.type = adv->tx_addr;
8086 memcpy(&sep->addr.a.val[0], &adv->adv_ind.addr[0],
8087 sizeof(bt_addr_t));
8088 }
8089 }
8090 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8091
8092 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
8093 static void le_vs_scan_req_received(struct pdu_data *pdu,
8094 struct node_rx_pdu *node_rx,
8095 struct net_buf *buf)
8096 {
8097 struct pdu_adv *adv = (void *)pdu;
8098 struct bt_hci_evt_vs_scan_req_rx *sep;
8099
8100 #if defined(CONFIG_BT_CTLR_PRIVACY)
8101 uint8_t rl_idx;
8102 #endif
8103
8104 if (!(vs_events_mask & BT_EVT_MASK_VS_SCAN_REQ_RX)) {
8105 return;
8106 }
8107
8108 sep = vs_event(buf, BT_HCI_EVT_VS_SCAN_REQ_RX, sizeof(*sep));
8109 sep->addr.type = adv->tx_addr;
8110 memcpy(&sep->addr.a.val[0], &adv->scan_req.scan_addr[0],
8111 sizeof(bt_addr_t));
8112
8113 #if defined(CONFIG_BT_CTLR_PRIVACY)
8114 rl_idx = node_rx->rx_ftr.rl_idx;
8115 if (rl_idx < ll_rl_size_get()) {
8116 /* Store identity address */
8117 ll_rl_id_addr_get(rl_idx, &sep->addr.type,
8118 &sep->addr.a.val[0]);
8119 /* Mark it as identity address from RPA (0x02, 0x03) */
8120 sep->addr.type += 2U;
8121 } else {
8122 #else
8123 if (1) {
8124 #endif
8125 sep->addr.type = adv->tx_addr;
8126 memcpy(&sep->addr.a.val[0], &adv->adv_ind.addr[0],
8127 sizeof(bt_addr_t));
8128 }
8129
8130 /* The Link Layer currently returns RSSI as an absolute value */
8131 sep->rssi = -(node_rx->rx_ftr.rssi);
8132 }
8133 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
8134 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8135
8136 #if defined(CONFIG_BT_CONN)
8137 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
8138 struct net_buf *buf)
8139 {
8140 struct bt_hci_evt_le_conn_complete *lecc;
8141 struct node_rx_cc *cc;
8142 uint8_t status;
8143 void *node;
8144
8145 /* Check for pdu field being aligned before accessing connection
8146 * complete event.
8147 */
8148 node = pdu_data;
8149 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cc));
8150
8151 cc = node;
8152 status = cc->status;
8153
8154 #if defined(CONFIG_BT_CTLR_PRIVACY)
8155 if (!status) {
8156 /* Update current RPA */
8157 ll_rl_crpa_set(cc->peer_addr_type,
8158 &cc->peer_addr[0], 0xff,
8159 &cc->peer_rpa[0]);
8160 }
8161 #endif
8162
8163 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8164 (!(le_event_mask & BT_EVT_MASK_LE_CONN_COMPLETE) &&
8165 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
8166 !(le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE))) {
8167 #else
8168 1)) {
8169 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
8170 return;
8171 }
8172
8173 if (!status) {
8174 conn_count++;
8175 }
8176
8177 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
8178 if (le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE) {
8179 struct bt_hci_evt_le_enh_conn_complete *leecc;
8180
8181 leecc = meta_evt(buf, BT_HCI_EVT_LE_ENH_CONN_COMPLETE,
8182 sizeof(*leecc));
8183
8184 if (status) {
8185 (void)memset(leecc, 0x00, sizeof(*leecc));
8186 leecc->status = status;
8187 return;
8188 }
8189
8190 leecc->status = 0x00;
8191 leecc->handle = sys_cpu_to_le16(handle);
8192 leecc->role = cc->role;
8193
8194 leecc->peer_addr.type = cc->peer_addr_type;
8195 memcpy(&leecc->peer_addr.a.val[0], &cc->peer_addr[0],
8196 BDADDR_SIZE);
8197
8198 #if defined(CONFIG_BT_CTLR_PRIVACY)
8199 memcpy(&leecc->local_rpa.val[0], &cc->local_rpa[0],
8200 BDADDR_SIZE);
8201 memcpy(&leecc->peer_rpa.val[0], &cc->peer_rpa[0],
8202 BDADDR_SIZE);
8203 #else /* !CONFIG_BT_CTLR_PRIVACY */
8204 memset(&leecc->local_rpa.val[0], 0, BDADDR_SIZE);
8205 memset(&leecc->peer_rpa.val[0], 0, BDADDR_SIZE);
8206 #endif /* !CONFIG_BT_CTLR_PRIVACY */
8207
8208 leecc->interval = sys_cpu_to_le16(cc->interval);
8209 leecc->latency = sys_cpu_to_le16(cc->latency);
8210 leecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
8211 leecc->clock_accuracy = cc->sca;
8212 return;
8213 }
8214 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
8215
8216 lecc = meta_evt(buf, BT_HCI_EVT_LE_CONN_COMPLETE, sizeof(*lecc));
8217
8218 if (status) {
8219 (void)memset(lecc, 0x00, sizeof(*lecc));
8220 lecc->status = status;
8221 return;
8222 }
8223
8224 lecc->status = 0x00;
8225 lecc->handle = sys_cpu_to_le16(handle);
8226 lecc->role = cc->role;
8227 lecc->peer_addr.type = cc->peer_addr_type & 0x1;
8228 memcpy(&lecc->peer_addr.a.val[0], &cc->peer_addr[0], BDADDR_SIZE);
8229 lecc->interval = sys_cpu_to_le16(cc->interval);
8230 lecc->latency = sys_cpu_to_le16(cc->latency);
8231 lecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
8232 lecc->clock_accuracy = cc->sca;
8233 }
8234
8235 void hci_disconn_complete_encode(struct pdu_data *pdu_data, uint16_t handle,
8236 struct net_buf *buf)
8237 {
8238 struct bt_hci_evt_disconn_complete *ep;
8239
8240 if (!(event_mask & BT_EVT_MASK_DISCONN_COMPLETE)) {
8241 return;
8242 }
8243
8244 hci_evt_create(buf, BT_HCI_EVT_DISCONN_COMPLETE, sizeof(*ep));
8245 ep = net_buf_add(buf, sizeof(*ep));
8246
8247 ep->status = 0x00;
8248 ep->handle = sys_cpu_to_le16(handle);
8249 ep->reason = *((uint8_t *)pdu_data);
8250 }
8251
8252 void hci_disconn_complete_process(uint16_t handle)
8253 {
8254 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8255 /* Clear any pending packets upon disconnection */
8256 /* Note: This requires linear handle values starting from 0 */
8257 if (handle >= ARRAY_SIZE(hci_hbuf_pend)) {
8258 return;
8259 }
8260
8261 hci_hbuf_acked += hci_hbuf_pend[handle];
8262 hci_hbuf_pend[handle] = 0U;
8263 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
8264
8265 conn_count--;
8266 }
8267
8268 static void le_conn_update_complete(struct pdu_data *pdu_data, uint16_t handle,
8269 struct net_buf *buf)
8270 {
8271 struct bt_hci_evt_le_conn_update_complete *sep;
8272 struct node_rx_cu *cu;
8273 void *node;
8274
8275 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8276 !(le_event_mask & BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE)) {
8277 return;
8278 }
8279
8280 sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE, sizeof(*sep));
8281
8282 /* Check for pdu field being aligned before accessing connection
8283 * update complete event.
8284 */
8285 node = pdu_data;
8286 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cu));
8287
8288 cu = node;
8289 sep->status = cu->status;
8290 sep->handle = sys_cpu_to_le16(handle);
8291 sep->interval = sys_cpu_to_le16(cu->interval);
8292 sep->latency = sys_cpu_to_le16(cu->latency);
8293 sep->supv_timeout = sys_cpu_to_le16(cu->timeout);
8294 }
8295
8296 #if defined(CONFIG_BT_CTLR_LE_ENC)
8297 static void enc_refresh_complete(struct pdu_data *pdu_data, uint16_t handle,
8298 struct net_buf *buf)
8299 {
8300 struct bt_hci_evt_encrypt_key_refresh_complete *ep;
8301
8302 if (!(event_mask & BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE)) {
8303 return;
8304 }
8305
8306 hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
8307 sizeof(*ep));
8308 ep = net_buf_add(buf, sizeof(*ep));
8309
8310 ep->status = 0x00;
8311 ep->handle = sys_cpu_to_le16(handle);
8312 }
8313 #endif /* CONFIG_BT_CTLR_LE_ENC */
8314
8315 #if defined(CONFIG_BT_CTLR_LE_PING)
8316 static void auth_payload_timeout_exp(struct pdu_data *pdu_data, uint16_t handle,
8317 struct net_buf *buf)
8318 {
8319 struct bt_hci_evt_auth_payload_timeout_exp *ep;
8320
8321 if (!(event_mask_page_2 & BT_EVT_MASK_AUTH_PAYLOAD_TIMEOUT_EXP)) {
8322 return;
8323 }
8324
8325 hci_evt_create(buf, BT_HCI_EVT_AUTH_PAYLOAD_TIMEOUT_EXP, sizeof(*ep));
8326 ep = net_buf_add(buf, sizeof(*ep));
8327
8328 ep->handle = sys_cpu_to_le16(handle);
8329 }
8330 #endif /* CONFIG_BT_CTLR_LE_PING */
8331
8332 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8333 static void le_chan_sel_algo(struct pdu_data *pdu_data, uint16_t handle,
8334 struct net_buf *buf)
8335 {
8336 struct bt_hci_evt_le_chan_sel_algo *sep;
8337 struct node_rx_cs *cs;
8338
8339 cs = (void *)pdu_data;
8340
8341 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8342 !(le_event_mask & BT_EVT_MASK_LE_CHAN_SEL_ALGO)) {
8343 LOG_DBG("handle: 0x%04x, CSA: %x.", handle, cs->csa);
8344 return;
8345 }
8346
8347 sep = meta_evt(buf, BT_HCI_EVT_LE_CHAN_SEL_ALGO, sizeof(*sep));
8348
8349 sep->handle = sys_cpu_to_le16(handle);
8350 sep->chan_sel_algo = cs->csa;
8351 }
8352 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8353
8354 #if defined(CONFIG_BT_CTLR_PHY)
8355 static void le_phy_upd_complete(struct pdu_data *pdu_data, uint16_t handle,
8356 struct net_buf *buf)
8357 {
8358 struct bt_hci_evt_le_phy_update_complete *sep;
8359 struct node_rx_pu *pu;
8360
8361 pu = (void *)pdu_data;
8362
8363 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8364 !(le_event_mask & BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE)) {
8365 LOG_WRN("handle: 0x%04x, status: %x, tx: %x, rx: %x.", handle, pu->status,
8366 find_lsb_set(pu->tx), find_lsb_set(pu->rx));
8367 return;
8368 }
8369
8370 sep = meta_evt(buf, BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE, sizeof(*sep));
8371
8372 sep->status = pu->status;
8373 sep->handle = sys_cpu_to_le16(handle);
8374 sep->tx_phy = find_lsb_set(pu->tx);
8375 sep->rx_phy = find_lsb_set(pu->rx);
8376 }
8377 #endif /* CONFIG_BT_CTLR_PHY */
8378
8379 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8380 static void le_req_peer_sca_complete(struct pdu_data *pdu, uint16_t handle,
8381 struct net_buf *buf)
8382 {
8383 struct bt_hci_evt_le_req_peer_sca_complete *sep;
8384 struct node_rx_sca *scau;
8385
8386 scau = (void *)pdu;
8387
8388 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8389 !(le_event_mask & BT_EVT_MASK_LE_REQ_PEER_SCA_COMPLETE)) {
8390 LOG_WRN("handle: 0x%04x, status: %x, sca: %x.", handle,
8391 scau->status,
8392 scau->sca);
8393 return;
8394 }
8395
8396 sep = meta_evt(buf, BT_HCI_EVT_LE_REQ_PEER_SCA_COMPLETE, sizeof(*sep));
8397
8398 sep->status = scau->status;
8399 sep->handle = sys_cpu_to_le16(handle);
8400 sep->sca = scau->sca;
8401 }
8402 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8403 #endif /* CONFIG_BT_CONN */
8404
8405 #if defined(CONFIG_BT_HCI_MESH_EXT)
8406 static void mesh_adv_cplt(struct pdu_data *pdu_data,
8407 struct node_rx_pdu *node_rx,
8408 struct net_buf *buf)
8409 {
8410 struct bt_hci_evt_mesh_adv_complete *mep;
8411
8412 mep = mesh_evt(buf, BT_HCI_EVT_MESH_ADV_COMPLETE, sizeof(*mep));
8413 mep->adv_slot = ((uint8_t *)pdu_data)[0];
8414 }
8415 #endif /* CONFIG_BT_HCI_MESH_EXT */
8416
8417 /**
8418 * @brief Encode a control-PDU into an HCI buffer
8419 * @details Execution context: Host thread
8420 *
8421 * @param node_rx_pdu[in] RX node containing header and PDU
8422 * @param pdu_data[in] PDU. Same as node_rx_pdu->pdu, but more convenient
8423 * @param net_buf[out] Upwards-going HCI buffer to fill
8424 */
8425 static void encode_control(struct node_rx_pdu *node_rx,
8426 struct pdu_data *pdu_data, struct net_buf *buf)
8427 {
8428 uint16_t handle;
8429
8430 handle = node_rx->hdr.handle;
8431
8432 switch (node_rx->hdr.type) {
8433 #if defined(CONFIG_BT_OBSERVER)
8434 case NODE_RX_TYPE_REPORT:
8435 le_advertising_report(pdu_data, node_rx, buf);
8436 break;
8437
8438 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8439 case NODE_RX_TYPE_EXT_1M_REPORT:
8440 le_adv_ext_1M_report(pdu_data, node_rx, buf);
8441 break;
8442
8443 case NODE_RX_TYPE_EXT_2M_REPORT:
8444 le_adv_ext_2M_report(pdu_data, node_rx, buf);
8445 break;
8446
8447 case NODE_RX_TYPE_EXT_CODED_REPORT:
8448 le_adv_ext_coded_report(pdu_data, node_rx, buf);
8449 break;
8450
8451 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
8452 le_scan_timeout(pdu_data, node_rx, buf);
8453 break;
8454
8455 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
8456 case NODE_RX_TYPE_SYNC:
8457 le_per_adv_sync_established(pdu_data, node_rx, buf);
8458 break;
8459
8460 case NODE_RX_TYPE_SYNC_REPORT:
8461 le_per_adv_sync_report(pdu_data, node_rx, buf);
8462 break;
8463
8464 case NODE_RX_TYPE_SYNC_LOST:
8465 le_per_adv_sync_lost(pdu_data, node_rx, buf);
8466 break;
8467
8468 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
8469 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
8470 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
8471 vs_le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8472 #else
8473 le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8474 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
8475 break;
8476 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
8477
8478 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8479 case NODE_RX_TYPE_SYNC_ISO:
8480 le_big_sync_established(pdu_data, node_rx, buf);
8481 break;
8482
8483 case NODE_RX_TYPE_SYNC_ISO_LOST:
8484 le_big_sync_lost(pdu_data, node_rx, buf);
8485 break;
8486 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8487 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8488 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8489 #endif /* CONFIG_BT_OBSERVER */
8490
8491 #if defined(CONFIG_BT_BROADCASTER)
8492 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8493 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
8494 le_adv_ext_terminate(pdu_data, node_rx, buf);
8495 break;
8496
8497 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8498 case NODE_RX_TYPE_BIG_COMPLETE:
8499 le_big_complete(pdu_data, node_rx, buf);
8500 break;
8501 case NODE_RX_TYPE_BIG_TERMINATE:
8502 le_big_terminate(pdu_data, node_rx, buf);
8503 break;
8504 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8505 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8506 #endif /* CONFIG_BT_BROADCASTER */
8507
8508 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8509 case NODE_RX_TYPE_SCAN_REQ:
8510 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8511 le_scan_req_received(pdu_data, node_rx, buf);
8512 #elif defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
8513 le_vs_scan_req_received(pdu_data, node_rx, buf);
8514 #else
8515 LL_ASSERT(0);
8516 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8517 break;
8518 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8519
8520 #if defined(CONFIG_BT_CONN)
8521 case NODE_RX_TYPE_CONNECTION:
8522 le_conn_complete(pdu_data, handle, buf);
8523 break;
8524
8525 case NODE_RX_TYPE_TERMINATE:
8526 hci_disconn_complete_encode(pdu_data, handle, buf);
8527 break;
8528
8529 case NODE_RX_TYPE_CONN_UPDATE:
8530 le_conn_update_complete(pdu_data, handle, buf);
8531 break;
8532
8533 #if defined(CONFIG_BT_CTLR_LE_ENC)
8534 case NODE_RX_TYPE_ENC_REFRESH:
8535 enc_refresh_complete(pdu_data, handle, buf);
8536 break;
8537 #endif /* CONFIG_BT_CTLR_LE_ENC */
8538
8539 #if defined(CONFIG_BT_CTLR_LE_PING)
8540 case NODE_RX_TYPE_APTO:
8541 auth_payload_timeout_exp(pdu_data, handle, buf);
8542 break;
8543 #endif /* CONFIG_BT_CTLR_LE_PING */
8544
8545 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8546 case NODE_RX_TYPE_CHAN_SEL_ALGO:
8547 le_chan_sel_algo(pdu_data, handle, buf);
8548 break;
8549 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8550
8551 #if defined(CONFIG_BT_CTLR_PHY)
8552 case NODE_RX_TYPE_PHY_UPDATE:
8553 le_phy_upd_complete(pdu_data, handle, buf);
8554 return;
8555 #endif /* CONFIG_BT_CTLR_PHY */
8556
8557 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
8558 case NODE_RX_TYPE_RSSI:
8559 LOG_INF("handle: 0x%04x, rssi: -%d dB.", handle, pdu_data->rssi);
8560 return;
8561 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
8562
8563 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
8564 case NODE_RX_TYPE_CIS_REQUEST:
8565 le_cis_request(pdu_data, node_rx, buf);
8566 return;
8567 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
8568
8569 #if defined(CONFIG_BT_CTLR_CONN_ISO)
8570 case NODE_RX_TYPE_CIS_ESTABLISHED:
8571 le_cis_established(pdu_data, node_rx, buf);
8572 return;
8573 #endif /* CONFIG_BT_CTLR_CONN_ISO */
8574
8575 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8576 case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
8577 le_req_peer_sca_complete(pdu_data, handle, buf);
8578 return;
8579 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8580
8581 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
8582 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
8583 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
8584 vs_le_df_connection_iq_report(node_rx, buf);
8585 #else
8586 le_df_connection_iq_report(node_rx, buf);
8587 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
8588 return;
8589 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
8590 #endif /* CONFIG_BT_CONN */
8591
8592 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
8593 case NODE_RX_TYPE_ADV_INDICATION:
8594 LOG_INF("Advertised.");
8595 return;
8596 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
8597
8598 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
8599 case NODE_RX_TYPE_SCAN_INDICATION:
8600 LOG_INF("Scanned.");
8601 return;
8602 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
8603
8604 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
8605 case NODE_RX_TYPE_PROFILE:
8606 LOG_INF("l: %u, %u, %u; t: %u, %u, %u; cpu: %u, %u, %u, %u.",
8607 pdu_data->profile.lcur, pdu_data->profile.lmin, pdu_data->profile.lmax,
8608 pdu_data->profile.cur, pdu_data->profile.min, pdu_data->profile.max,
8609 pdu_data->profile.radio, pdu_data->profile.lll, pdu_data->profile.ull_high,
8610 pdu_data->profile.ull_low);
8611 return;
8612 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
8613
8614 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
8615 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
8616 le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8617 return;
8618 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
8619
8620 #if defined(CONFIG_BT_HCI_MESH_EXT)
8621 case NODE_RX_TYPE_MESH_ADV_CPLT:
8622 mesh_adv_cplt(pdu_data, node_rx, buf);
8623 return;
8624
8625 case NODE_RX_TYPE_MESH_REPORT:
8626 le_advertising_report(pdu_data, node_rx, buf);
8627 return;
8628 #endif /* CONFIG_BT_HCI_MESH_EXT */
8629
8630 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
8631 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
8632 hci_user_ext_encode_control(node_rx, pdu_data, buf);
8633 return;
8634 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
8635
8636 default:
8637 LL_ASSERT(0);
8638 return;
8639 }
8640 }
8641
8642 #if defined(CONFIG_BT_CTLR_LE_ENC)
8643 static void le_ltk_request(struct pdu_data *pdu_data, uint16_t handle,
8644 struct net_buf *buf)
8645 {
8646 struct bt_hci_evt_le_ltk_request *sep;
8647
8648 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8649 !(le_event_mask & BT_EVT_MASK_LE_LTK_REQUEST)) {
8650 return;
8651 }
8652
8653 sep = meta_evt(buf, BT_HCI_EVT_LE_LTK_REQUEST, sizeof(*sep));
8654
8655 sep->handle = sys_cpu_to_le16(handle);
8656 memcpy(&sep->rand, pdu_data->llctrl.enc_req.rand, sizeof(uint64_t));
8657 memcpy(&sep->ediv, pdu_data->llctrl.enc_req.ediv, sizeof(uint16_t));
8658 }
8659
8660 static void encrypt_change(uint8_t err, uint16_t handle,
8661 struct net_buf *buf, bool encryption_on)
8662 {
8663 struct bt_hci_evt_encrypt_change *ep;
8664
8665 if (!(event_mask & BT_EVT_MASK_ENCRYPT_CHANGE)) {
8666 return;
8667 }
8668
8669 hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_CHANGE, sizeof(*ep));
8670 ep = net_buf_add(buf, sizeof(*ep));
8671
8672 ep->status = err ? err : (encryption_on ? err : BT_HCI_ERR_UNSPECIFIED);
8673 ep->handle = sys_cpu_to_le16(handle);
8674 ep->encrypt = encryption_on ? 1 : 0;
8675 }
8676 #endif /* CONFIG_BT_CTLR_LE_ENC */
8677
8678 static void le_remote_feat_complete(uint8_t status, struct pdu_data *pdu_data,
8679 uint16_t handle, struct net_buf *buf)
8680 {
8681 struct bt_hci_evt_le_remote_feat_complete *sep;
8682
8683 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8684 !(le_event_mask & BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE)) {
8685 return;
8686 }
8687
8688 sep = meta_evt(buf, BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE, sizeof(*sep));
8689
8690 sep->status = status;
8691 sep->handle = sys_cpu_to_le16(handle);
8692 if (!status) {
8693 memcpy(&sep->features[0],
8694 &pdu_data->llctrl.feature_rsp.features[0],
8695 sizeof(sep->features));
8696 } else {
8697 (void)memset(&sep->features[0], 0x00, sizeof(sep->features));
8698 }
8699 }
8700
8701 static void le_unknown_rsp(struct pdu_data *pdu_data, uint16_t handle,
8702 struct net_buf *buf)
8703 {
8704
8705 switch (pdu_data->llctrl.unknown_rsp.type) {
8706 case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
8707 le_remote_feat_complete(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE,
8708 NULL, handle, buf);
8709 break;
8710 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8711 case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8712 le_df_cte_req_failed(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, handle, buf);
8713 break;
8714 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8715 default:
8716 LOG_WRN("type: 0x%02x", pdu_data->llctrl.unknown_rsp.type);
8717 break;
8718 }
8719 }
8720
8721 static void le_reject_ext_ind(struct pdu_data *pdu, uint16_t handle, struct net_buf *buf)
8722 {
8723 switch (pdu->llctrl.reject_ext_ind.reject_opcode) {
8724 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8725 case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8726 le_df_cte_req_failed(pdu->llctrl.reject_ext_ind.error_code, handle, buf);
8727 break;
8728 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8729 default:
8730 LOG_WRN("reject opcode: 0x%02x", pdu->llctrl.reject_ext_ind.reject_opcode);
8731 break;
8732 }
8733 }
8734 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
8735 static void le_conn_param_req(struct pdu_data *pdu_data, uint16_t handle,
8736 struct net_buf *buf)
8737 {
8738 struct bt_hci_evt_le_conn_param_req *sep;
8739
8740 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8741 !(le_event_mask & BT_EVT_MASK_LE_CONN_PARAM_REQ)) {
8742 /* event masked, reject the conn param req */
8743 ll_conn_update(handle, 2, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, 0,
8744 0, 0, 0, NULL);
8745
8746 return;
8747 }
8748
8749 sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_PARAM_REQ, sizeof(*sep));
8750
8751 sep->handle = sys_cpu_to_le16(handle);
8752 sep->interval_min = pdu_data->llctrl.conn_param_req.interval_min;
8753 sep->interval_max = pdu_data->llctrl.conn_param_req.interval_max;
8754 sep->latency = pdu_data->llctrl.conn_param_req.latency;
8755 sep->timeout = pdu_data->llctrl.conn_param_req.timeout;
8756 }
8757 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
8758
8759 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
8760 static void le_data_len_change(struct pdu_data *pdu_data, uint16_t handle,
8761 struct net_buf *buf)
8762 {
8763 struct bt_hci_evt_le_data_len_change *sep;
8764
8765 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8766 !(le_event_mask & BT_EVT_MASK_LE_DATA_LEN_CHANGE)) {
8767 return;
8768 }
8769
8770 sep = meta_evt(buf, BT_HCI_EVT_LE_DATA_LEN_CHANGE, sizeof(*sep));
8771
8772 sep->handle = sys_cpu_to_le16(handle);
8773 sep->max_tx_octets = pdu_data->llctrl.length_rsp.max_tx_octets;
8774 sep->max_tx_time = pdu_data->llctrl.length_rsp.max_tx_time;
8775 sep->max_rx_octets = pdu_data->llctrl.length_rsp.max_rx_octets;
8776 sep->max_rx_time = pdu_data->llctrl.length_rsp.max_rx_time;
8777 }
8778 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
8779
8780 #if defined(CONFIG_BT_REMOTE_VERSION)
8781 static void remote_version_info_encode(struct pdu_data *pdu_data,
8782 uint16_t handle, struct net_buf *buf)
8783 {
8784 struct pdu_data_llctrl_version_ind *ver_ind;
8785 struct bt_hci_evt_remote_version_info *ep;
8786
8787 if (!(event_mask & BT_EVT_MASK_REMOTE_VERSION_INFO)) {
8788 return;
8789 }
8790
8791 hci_evt_create(buf, BT_HCI_EVT_REMOTE_VERSION_INFO, sizeof(*ep));
8792 ep = net_buf_add(buf, sizeof(*ep));
8793
8794 ver_ind = &pdu_data->llctrl.version_ind;
8795 ep->status = 0x00;
8796 ep->handle = sys_cpu_to_le16(handle);
8797 ep->version = ver_ind->version_number;
8798 ep->manufacturer = ver_ind->company_id;
8799 ep->subversion = ver_ind->sub_version_number;
8800 }
8801 #endif /* CONFIG_BT_REMOTE_VERSION */
8802
8803 static void encode_data_ctrl(struct node_rx_pdu *node_rx,
8804 struct pdu_data *pdu_data, struct net_buf *buf)
8805 {
8806 uint16_t handle = node_rx->hdr.handle;
8807
8808 switch (pdu_data->llctrl.opcode) {
8809
8810 #if defined(CONFIG_BT_CTLR_LE_ENC)
8811 case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
8812 le_ltk_request(pdu_data, handle, buf);
8813 break;
8814
8815 case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
8816 encrypt_change(0x00, handle, buf, true);
8817 break;
8818 #endif /* CONFIG_BT_CTLR_LE_ENC */
8819
8820 #if defined(CONFIG_BT_REMOTE_VERSION)
8821 case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
8822 remote_version_info_encode(pdu_data, handle, buf);
8823 break;
8824 #endif /* defined(CONFIG_BT_REMOTE_VERSION) */
8825
8826 case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
8827 le_remote_feat_complete(0x00, pdu_data, handle, buf);
8828 break;
8829
8830 #if defined(CONFIG_BT_CTLR_LE_ENC)
8831 case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
8832 encrypt_change(pdu_data->llctrl.reject_ind.error_code, handle,
8833 buf, false);
8834 break;
8835 #endif /* CONFIG_BT_CTLR_LE_ENC */
8836
8837 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
8838 case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
8839 le_conn_param_req(pdu_data, handle, buf);
8840 break;
8841 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
8842
8843 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
8844 case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
8845 case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
8846 le_data_len_change(pdu_data, handle, buf);
8847 break;
8848 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
8849
8850 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8851 case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
8852 le_df_cte_req_failed(BT_HCI_CTE_REQ_STATUS_RSP_WITHOUT_CTE, handle, buf);
8853 break;
8854 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8855
8856 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
8857 le_unknown_rsp(pdu_data, handle, buf);
8858 break;
8859
8860 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
8861 le_reject_ext_ind(pdu_data, handle, buf);
8862 break;
8863
8864 default:
8865 LL_ASSERT(0);
8866 return;
8867 }
8868 }
8869
8870 #if defined(CONFIG_BT_CONN)
8871 void hci_acl_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
8872 {
8873 struct pdu_data *pdu_data = (void *)node_rx->pdu;
8874 struct bt_hci_acl_hdr *acl;
8875 uint16_t handle_flags;
8876 uint16_t handle;
8877 uint8_t *data;
8878
8879 handle = node_rx->hdr.handle;
8880
8881 switch (pdu_data->ll_id) {
8882 case PDU_DATA_LLID_DATA_CONTINUE:
8883 case PDU_DATA_LLID_DATA_START:
8884 acl = (void *)net_buf_add(buf, sizeof(*acl));
8885 if (pdu_data->ll_id == PDU_DATA_LLID_DATA_START) {
8886 handle_flags = bt_acl_handle_pack(handle, BT_ACL_START);
8887 } else {
8888 handle_flags = bt_acl_handle_pack(handle, BT_ACL_CONT);
8889 }
8890 acl->handle = sys_cpu_to_le16(handle_flags);
8891 acl->len = sys_cpu_to_le16(pdu_data->len);
8892 data = (void *)net_buf_add(buf, pdu_data->len);
8893 memcpy(data, pdu_data->lldata, pdu_data->len);
8894 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8895 if (hci_hbuf_total > 0) {
8896 LL_ASSERT((hci_hbuf_sent - hci_hbuf_acked) <
8897 hci_hbuf_total);
8898 hci_hbuf_sent++;
8899 /* Note: This requires linear handle values starting
8900 * from 0
8901 */
8902 LL_ASSERT(handle < ARRAY_SIZE(hci_hbuf_pend));
8903 hci_hbuf_pend[handle]++;
8904 }
8905 #endif
8906 break;
8907
8908 default:
8909 LL_ASSERT(0);
8910 break;
8911 }
8912 }
8913 #endif /* CONFIG_BT_CONN */
8914
8915 void hci_evt_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
8916 {
8917 struct pdu_data *pdu_data = (void *)node_rx->pdu;
8918
8919 if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
8920 encode_control(node_rx, pdu_data, buf);
8921 } else if (IS_ENABLED(CONFIG_BT_CONN)) {
8922 encode_data_ctrl(node_rx, pdu_data, buf);
8923 }
8924 }
8925
8926 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
8927 defined(CONFIG_BT_CTLR_CONN_ISO)
8928 void hci_num_cmplt_encode(struct net_buf *buf, uint16_t handle, uint8_t num)
8929 {
8930 struct bt_hci_evt_num_completed_packets *ep;
8931 struct bt_hci_handle_count *hc;
8932 uint8_t num_handles;
8933 uint8_t len;
8934
8935 num_handles = 1U;
8936
8937 len = (sizeof(*ep) + (sizeof(*hc) * num_handles));
8938 hci_evt_create(buf, BT_HCI_EVT_NUM_COMPLETED_PACKETS, len);
8939
8940 ep = net_buf_add(buf, len);
8941 ep->num_handles = num_handles;
8942 hc = &ep->h[0];
8943 hc->handle = sys_cpu_to_le16(handle);
8944 hc->count = sys_cpu_to_le16(num);
8945 }
8946 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
8947
8948 uint8_t hci_get_class(struct node_rx_pdu *node_rx)
8949 {
8950 #if defined(CONFIG_BT_CONN)
8951 struct pdu_data *pdu_data = (void *)node_rx->pdu;
8952 #endif
8953
8954 if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
8955
8956 switch (node_rx->hdr.type) {
8957 #if defined(CONFIG_BT_OBSERVER) || \
8958 defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
8959 defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
8960 defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
8961 defined(CONFIG_BT_CTLR_PROFILE_ISR)
8962 #if defined(CONFIG_BT_OBSERVER)
8963 case NODE_RX_TYPE_REPORT:
8964 #endif /* CONFIG_BT_OBSERVER */
8965
8966 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8967 case NODE_RX_TYPE_SCAN_REQ:
8968 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8969
8970 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
8971 case NODE_RX_TYPE_ADV_INDICATION:
8972 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
8973
8974 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
8975 case NODE_RX_TYPE_SCAN_INDICATION:
8976 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
8977
8978 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
8979 case NODE_RX_TYPE_PROFILE:
8980 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
8981 return HCI_CLASS_EVT_DISCARDABLE;
8982 #endif
8983
8984 #if defined(CONFIG_BT_HCI_MESH_EXT)
8985 case NODE_RX_TYPE_MESH_ADV_CPLT:
8986 case NODE_RX_TYPE_MESH_REPORT:
8987 #endif /* CONFIG_BT_HCI_MESH_EXT */
8988
8989 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8990 #if defined(CONFIG_BT_BROADCASTER)
8991 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
8992
8993 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8994 case NODE_RX_TYPE_BIG_COMPLETE:
8995 case NODE_RX_TYPE_BIG_TERMINATE:
8996 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8997 #endif /* CONFIG_BT_BROADCASTER */
8998
8999 #if defined(CONFIG_BT_OBSERVER)
9000 case NODE_RX_TYPE_EXT_1M_REPORT:
9001 case NODE_RX_TYPE_EXT_2M_REPORT:
9002 case NODE_RX_TYPE_EXT_CODED_REPORT:
9003 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
9004
9005 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
9006 case NODE_RX_TYPE_SYNC:
9007 case NODE_RX_TYPE_SYNC_REPORT:
9008 case NODE_RX_TYPE_SYNC_LOST:
9009
9010 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
9011 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
9012 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
9013
9014 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
9015 case NODE_RX_TYPE_SYNC_ISO:
9016 case NODE_RX_TYPE_SYNC_ISO_LOST:
9017 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
9018 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
9019 #endif /* CONFIG_BT_OBSERVER */
9020
9021 return HCI_CLASS_EVT_REQUIRED;
9022 #endif /* CONFIG_BT_CTLR_ADV_EXT */
9023
9024 #if defined(CONFIG_BT_CONN)
9025 case NODE_RX_TYPE_CONNECTION:
9026
9027 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
9028 case NODE_RX_TYPE_CIS_REQUEST:
9029 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
9030
9031 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
9032 case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
9033 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
9034
9035 #if defined(CONFIG_BT_CTLR_CONN_ISO)
9036 case NODE_RX_TYPE_CIS_ESTABLISHED:
9037 #endif /* CONFIG_BT_CTLR_CONN_ISO */
9038
9039 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
9040 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
9041 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
9042
9043 return HCI_CLASS_EVT_REQUIRED;
9044
9045 case NODE_RX_TYPE_TERMINATE:
9046 case NODE_RX_TYPE_CONN_UPDATE:
9047
9048 #if defined(CONFIG_BT_CTLR_LE_ENC)
9049 case NODE_RX_TYPE_ENC_REFRESH:
9050 #endif /* CONFIG_BT_CTLR_LE_ENC */
9051
9052 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
9053 case NODE_RX_TYPE_RSSI:
9054 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
9055
9056 #if defined(CONFIG_BT_CTLR_LE_PING)
9057 case NODE_RX_TYPE_APTO:
9058 #endif /* CONFIG_BT_CTLR_LE_PING */
9059
9060 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
9061 case NODE_RX_TYPE_CHAN_SEL_ALGO:
9062 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
9063
9064 #if defined(CONFIG_BT_CTLR_PHY)
9065 case NODE_RX_TYPE_PHY_UPDATE:
9066 #endif /* CONFIG_BT_CTLR_PHY */
9067
9068 return HCI_CLASS_EVT_CONNECTION;
9069 #endif /* CONFIG_BT_CONN */
9070
9071 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
9072 case NODE_RX_TYPE_ISO_PDU:
9073 return HCI_CLASS_ISO_DATA;
9074 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
9075
9076 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
9077 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
9078 return HCI_CLASS_EVT_REQUIRED;
9079 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
9080
9081 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
9082 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
9083 return hci_user_ext_get_class(node_rx);
9084 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
9085
9086 default:
9087 return HCI_CLASS_NONE;
9088 }
9089
9090 #if defined(CONFIG_BT_CONN)
9091 } else if (pdu_data->ll_id == PDU_DATA_LLID_CTRL) {
9092 return HCI_CLASS_EVT_LLCP;
9093 } else {
9094 return HCI_CLASS_ACL_DATA;
9095 }
9096 #else
9097 } else {
9098 return HCI_CLASS_NONE;
9099 }
9100 #endif
9101 }
9102
9103 void hci_init(struct k_poll_signal *signal_host_buf)
9104 {
9105 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
9106 hbuf_signal = signal_host_buf;
9107 #endif
9108 reset(NULL, NULL);
9109 }
9110