1 /*
2 * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stddef.h>
9 #include <string.h>
10
11 #include <version.h>
12 #include <errno.h>
13
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/atomic.h>
17
18 #include <zephyr/drivers/bluetooth/hci_driver.h>
19
20 #include <zephyr/bluetooth/hci_types.h>
21 #include <zephyr/bluetooth/hci_vs.h>
22 #include <zephyr/bluetooth/buf.h>
23
24 #include "../host/hci_ecc.h"
25
26 #include "util/util.h"
27 #include "util/memq.h"
28 #include "util/mem.h"
29 #include "util/dbuf.h"
30
31 #include "hal/ecb.h"
32 #include "hal/ccm.h"
33 #include "hal/ticker.h"
34
35 #include "ticker/ticker.h"
36
37 #include "ll_sw/pdu_df.h"
38 #include "lll/pdu_vendor.h"
39 #include "ll_sw/pdu.h"
40
41 #include "ll_sw/lll.h"
42 #include "lll/lll_adv_types.h"
43 #include "ll_sw/lll_adv.h"
44 #include "lll/lll_adv_pdu.h"
45 #include "ll_sw/lll_scan.h"
46 #include "lll/lll_df_types.h"
47 #include "ll_sw/lll_sync.h"
48 #include "ll_sw/lll_sync_iso.h"
49 #include "ll_sw/lll_conn.h"
50 #include "ll_sw/lll_conn_iso.h"
51 #include "ll_sw/lll_iso_tx.h"
52
53 #include "ll_sw/isoal.h"
54
55 #include "ll_sw/ull_tx_queue.h"
56
57 #include "ll_sw/ull_adv_types.h"
58 #include "ll_sw/ull_scan_types.h"
59 #include "ll_sw/ull_sync_types.h"
60 #include "ll_sw/ull_conn_types.h"
61 #include "ll_sw/ull_iso_types.h"
62 #include "ll_sw/ull_conn_iso_types.h"
63 #include "ll_sw/ull_conn_iso_internal.h"
64 #include "ll_sw/ull_df_types.h"
65 #include "ll_sw/ull_internal.h"
66
67 #include "ll_sw/ull_adv_internal.h"
68 #include "ll_sw/ull_sync_internal.h"
69 #include "ll_sw/ull_conn_internal.h"
70 #include "ll_sw/ull_sync_iso_internal.h"
71 #include "ll_sw/ull_df_internal.h"
72
73 #include "ll.h"
74 #include "ll_feat.h"
75 #include "ll_settings.h"
76
77 #include "hci_internal.h"
78 #include "hci_vendor.h"
79
80 #if defined(CONFIG_BT_HCI_MESH_EXT)
81 #include "ll_sw/ll_mesh.h"
82 #endif /* CONFIG_BT_HCI_MESH_EXT */
83
84 #if defined(CONFIG_BT_CTLR_DTM_HCI)
85 #include "ll_sw/ll_test.h"
86 #endif /* CONFIG_BT_CTLR_DTM_HCI */
87
88 #if defined(CONFIG_BT_CTLR_USER_EXT)
89 #include "hci_user_ext.h"
90 #endif /* CONFIG_BT_CTLR_USER_EXT */
91
92 #include "common/bt_str.h"
93 #include "hal/debug.h"
94
95 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
96 #include <zephyr/logging/log.h>
97 LOG_MODULE_REGISTER(bt_ctlr_hci);
98
99 #define STR_NULL_TERMINATOR 0x00
100
101 /* opcode of the HCI command currently being processed. The opcode is stored
102 * by hci_cmd_handle() and then used during the creation of cmd complete and
103 * cmd status events to avoid passing it up the call chain.
104 */
105 static uint16_t _opcode;
106
107 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
108 /* NOTE: Duplicate filter uses two LS bits value of standard advertising modes:
109 * 0 - Non-Connectable Non-Scannable advertising report
110 * 1 - Connectable Non-Scannable advertising report
111 * 2 - Non-Connectable Scannable advertisig report
112 * 3 - Connectable Scannable advertising report
113 *
114 * FIXME: Duplicate filtering of Connectable Directed low and high duty
115 * cycle. If advertiser changes between Connectable Non-Scannable,
116 * Connectable Directed low, and high duty cycle without changing
117 * SID and DID, then such reports will be filtered out by the
118 * implementation. Needs enhancement to current implementation.
119 *
120 * Define a custom duplicate filter mode for periodic advertising:
121 * 4 - Periodic Advertising report
122 */
123
124 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
125 #define DUP_EXT_ADV_MODE_MAX 5
126 #define DUP_EXT_ADV_MODE_PERIODIC BIT(2)
127 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
128 #define DUP_EXT_ADV_MODE_MAX 4
129 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
130
131 #define DUP_EXT_ADV_MODE_COUNT 4
132
133 /* Duplicate filter entries, one per Bluetooth address */
134 static struct dup_entry {
135 bt_addr_le_t addr;
136
137 /* Mask to accumulate advertising PDU type as bitmask */
138 uint8_t mask;
139
140 #if defined(CONFIG_BT_CTLR_ADV_EXT)
141 struct dup_ext_adv_mode {
142 uint16_t set_count:5;
143 uint16_t set_curr:5;
144 struct dup_ext_adv_set {
145 uint8_t data_cmplt:1;
146 struct pdu_adv_adi adi;
147 } set[CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX];
148 } adv_mode[DUP_EXT_ADV_MODE_MAX];
149 #endif
150 } dup_filter[CONFIG_BT_CTLR_DUP_FILTER_LEN];
151
152 /* Duplicate filtering is disabled if count value is set to negative integer */
153 #define DUP_FILTER_DISABLED (-1)
154
155 /* Duplicate filtering array entry count, filtering disabled if negative */
156 static int32_t dup_count;
157 /* Duplicate filtering current free entry, overwrites entries after rollover */
158 static uint32_t dup_curr;
159
160 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
161 /* Helper function to reset non-periodic advertising entries in filter table */
162 static void dup_ext_adv_reset(void);
163 /* Flag for advertising reports be filtered for duplicates. */
164 static bool dup_scan;
165 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
166 /* Set constant true so that (dup_count >= 0) decides if advertising duplicate
167 * filter is enabled when Periodic Advertising ADI support is disabled.
168 */
169 static const bool dup_scan = true;
170 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
171 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
172
173 #if defined(CONFIG_BT_HCI_MESH_EXT)
174 struct scan_filter {
175 uint8_t count;
176 uint8_t lengths[CONFIG_BT_CTLR_MESH_SF_PATTERNS];
177 uint8_t patterns[CONFIG_BT_CTLR_MESH_SF_PATTERNS]
178 [BT_HCI_MESH_PATTERN_LEN_MAX];
179 };
180
181 static struct scan_filter scan_filters[CONFIG_BT_CTLR_MESH_SCAN_FILTERS];
182 static uint8_t sf_curr;
183 #endif
184
185 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
186 int32_t hci_hbuf_total;
187 uint32_t hci_hbuf_sent;
188 uint32_t hci_hbuf_acked;
189 uint16_t hci_hbuf_pend[CONFIG_BT_MAX_CONN];
190 atomic_t hci_state_mask;
191 static struct k_poll_signal *hbuf_signal;
192 #endif
193
194 #if defined(CONFIG_BT_CONN)
195 static uint32_t conn_count;
196 #endif
197
198 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
199 static uint32_t cis_pending_count;
200 #endif
201
202 /* In HCI event PHY indices start at 1 compare to 0 indexed in aux_ptr field in
203 * the Common Extended Payload Format in the PDUs.
204 */
205 #define HCI_AUX_PHY_TO_HCI_PHY(aux_phy) ((aux_phy) + 1)
206
207 #define DEFAULT_EVENT_MASK 0x1fffffffffff
208 #define DEFAULT_EVENT_MASK_PAGE_2 0x0
209 #define DEFAULT_LE_EVENT_MASK 0x1f
210
211 static uint64_t event_mask = DEFAULT_EVENT_MASK;
212 static uint64_t event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
213 static uint64_t le_event_mask = DEFAULT_LE_EVENT_MASK;
214 #if defined(CONFIG_BT_HCI_VS_EVT)
215 static uint64_t vs_events_mask = DEFAULT_VS_EVT_MASK;
216 #endif /* CONFIG_BT_HCI_VS_EVT */
217
218 static struct net_buf *cmd_complete_status(uint8_t status);
219
220 #if defined(CONFIG_BT_CTLR_ADV_EXT)
221 #define BUF_GET_TIMEOUT K_SECONDS(10)
222
223 #if defined(CONFIG_BT_HCI_RAW)
224 static uint8_t ll_adv_cmds;
225
ll_adv_cmds_set(uint8_t adv_cmds)226 __weak int ll_adv_cmds_set(uint8_t adv_cmds)
227 {
228 if (!ll_adv_cmds) {
229 ll_adv_cmds = adv_cmds;
230 }
231
232 if (ll_adv_cmds != adv_cmds) {
233 return -EINVAL;
234 }
235
236 return 0;
237 }
238
ll_adv_cmds_is_ext(void)239 __weak int ll_adv_cmds_is_ext(void)
240 {
241 return ll_adv_cmds == LL_ADV_CMDS_EXT;
242 }
243
244 #else /* !CONFIG_BT_HCI_RAW */
ll_adv_cmds_is_ext(void)245 __weak int ll_adv_cmds_is_ext(void)
246 {
247 return 1;
248 }
249 #endif /* !CONFIG_BT_HCI_RAW */
250
adv_cmds_legacy_check(struct net_buf ** cc_evt)251 static int adv_cmds_legacy_check(struct net_buf **cc_evt)
252 {
253 int err;
254
255 #if defined(CONFIG_BT_HCI_RAW)
256 err = ll_adv_cmds_set(LL_ADV_CMDS_LEGACY);
257 if (err && cc_evt) {
258 *cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
259 }
260 #else
261 if (cc_evt) {
262 *cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
263 }
264
265 err = -EINVAL;
266 #endif /* CONFIG_BT_HCI_RAW */
267
268 return err;
269 }
270
adv_cmds_ext_check(struct net_buf ** cc_evt)271 static int adv_cmds_ext_check(struct net_buf **cc_evt)
272 {
273 int err;
274
275 #if defined(CONFIG_BT_HCI_RAW)
276 err = ll_adv_cmds_set(LL_ADV_CMDS_EXT);
277 if (err && cc_evt) {
278 *cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
279 }
280 #else
281 err = 0;
282 #endif /* CONFIG_BT_HCI_RAW */
283
284 return err;
285 }
286 #else
adv_cmds_legacy_check(struct net_buf ** cc_evt)287 static inline int adv_cmds_legacy_check(struct net_buf **cc_evt)
288 {
289 return 0;
290 }
291 #endif /* CONFIG_BT_CTLR_ADV_EXT */
292
293 #if defined(CONFIG_BT_CONN)
294 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
295 struct net_buf *buf);
296 #endif /* CONFIG_BT_CONN */
297
hci_evt_create(struct net_buf * buf,uint8_t evt,uint8_t len)298 static void hci_evt_create(struct net_buf *buf, uint8_t evt, uint8_t len)
299 {
300 struct bt_hci_evt_hdr *hdr;
301
302 hdr = net_buf_add(buf, sizeof(*hdr));
303 hdr->evt = evt;
304 hdr->len = len;
305 }
306
hci_cmd_complete(struct net_buf ** buf,uint8_t plen)307 void *hci_cmd_complete(struct net_buf **buf, uint8_t plen)
308 {
309 *buf = bt_hci_cmd_complete_create(_opcode, plen);
310
311 return net_buf_add(*buf, plen);
312 }
313
cmd_status(uint8_t status)314 static struct net_buf *cmd_status(uint8_t status)
315 {
316 return bt_hci_cmd_status_create(_opcode, status);
317 }
318
cmd_complete_status(uint8_t status)319 static struct net_buf *cmd_complete_status(uint8_t status)
320 {
321 struct net_buf *buf;
322 struct bt_hci_evt_cc_status *ccst;
323
324 buf = bt_hci_cmd_complete_create(_opcode, sizeof(*ccst));
325 ccst = net_buf_add(buf, sizeof(*ccst));
326 ccst->status = status;
327
328 return buf;
329 }
330
meta_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)331 static void *meta_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
332 {
333 struct bt_hci_evt_le_meta_event *me;
334
335 hci_evt_create(buf, BT_HCI_EVT_LE_META_EVENT, sizeof(*me) + melen);
336 me = net_buf_add(buf, sizeof(*me));
337 me->subevent = subevt;
338
339 return net_buf_add(buf, melen);
340 }
341
342 #if defined(CONFIG_BT_HCI_VS_EVT)
vs_event(struct net_buf * buf,uint8_t subevt,uint8_t evt_len)343 static void *vs_event(struct net_buf *buf, uint8_t subevt, uint8_t evt_len)
344 {
345 struct bt_hci_evt_vs *evt;
346
347 hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*evt) + evt_len);
348 evt = net_buf_add(buf, sizeof(*evt));
349 evt->subevent = subevt;
350
351 return net_buf_add(buf, evt_len);
352 }
353 #endif /* CONFIG_BT_HCI_VS_EVT */
354
355 #if defined(CONFIG_BT_HCI_MESH_EXT)
mesh_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)356 static void *mesh_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
357 {
358 struct bt_hci_evt_mesh *me;
359
360 hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*me) + melen);
361 me = net_buf_add(buf, sizeof(*me));
362 me->prefix = BT_HCI_MESH_EVT_PREFIX;
363 me->subevent = subevt;
364
365 return net_buf_add(buf, melen);
366 }
367 #endif /* CONFIG_BT_HCI_MESH_EXT */
368
369 #if defined(CONFIG_BT_CONN)
disconnect(struct net_buf * buf,struct net_buf ** evt)370 static void disconnect(struct net_buf *buf, struct net_buf **evt)
371 {
372 struct bt_hci_cp_disconnect *cmd = (void *)buf->data;
373 uint16_t handle;
374 uint8_t status;
375
376 handle = sys_le16_to_cpu(cmd->handle);
377 status = ll_terminate_ind_send(handle, cmd->reason);
378
379 *evt = cmd_status(status);
380 }
381
read_remote_ver_info(struct net_buf * buf,struct net_buf ** evt)382 static void read_remote_ver_info(struct net_buf *buf, struct net_buf **evt)
383 {
384 struct bt_hci_cp_read_remote_version_info *cmd = (void *)buf->data;
385 uint16_t handle;
386 uint8_t status;
387
388 handle = sys_le16_to_cpu(cmd->handle);
389 status = ll_version_ind_send(handle);
390
391 *evt = cmd_status(status);
392 }
393 #endif /* CONFIG_BT_CONN */
394
link_control_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)395 static int link_control_cmd_handle(uint16_t ocf, struct net_buf *cmd,
396 struct net_buf **evt)
397 {
398 switch (ocf) {
399 #if defined(CONFIG_BT_CONN)
400 case BT_OCF(BT_HCI_OP_DISCONNECT):
401 disconnect(cmd, evt);
402 break;
403 case BT_OCF(BT_HCI_OP_READ_REMOTE_VERSION_INFO):
404 read_remote_ver_info(cmd, evt);
405 break;
406 #endif /* CONFIG_BT_CONN */
407 default:
408 return -EINVAL;
409 }
410
411 return 0;
412 }
413
set_event_mask(struct net_buf * buf,struct net_buf ** evt)414 static void set_event_mask(struct net_buf *buf, struct net_buf **evt)
415 {
416 struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
417
418 event_mask = sys_get_le64(cmd->events);
419
420 *evt = cmd_complete_status(0x00);
421 }
422
set_event_mask_page_2(struct net_buf * buf,struct net_buf ** evt)423 static void set_event_mask_page_2(struct net_buf *buf, struct net_buf **evt)
424 {
425 struct bt_hci_cp_set_event_mask_page_2 *cmd = (void *)buf->data;
426
427 event_mask_page_2 = sys_get_le64(cmd->events_page_2);
428
429 *evt = cmd_complete_status(0x00);
430 }
431
reset(struct net_buf * buf,struct net_buf ** evt)432 static void reset(struct net_buf *buf, struct net_buf **evt)
433 {
434 #if defined(CONFIG_BT_HCI_MESH_EXT)
435 int i;
436
437 for (i = 0; i < ARRAY_SIZE(scan_filters); i++) {
438 scan_filters[i].count = 0U;
439 }
440 sf_curr = 0xFF;
441 #endif
442
443 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
444 dup_count = DUP_FILTER_DISABLED;
445 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
446 dup_scan = false;
447 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
448 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
449
450 /* reset event masks */
451 event_mask = DEFAULT_EVENT_MASK;
452 event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
453 le_event_mask = DEFAULT_LE_EVENT_MASK;
454
455 if (buf) {
456 ll_reset();
457 *evt = cmd_complete_status(0x00);
458 }
459
460 #if defined(CONFIG_BT_CONN)
461 conn_count = 0U;
462 #endif
463
464 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
465 cis_pending_count = 0U;
466 #endif
467
468 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
469 hci_hbuf_total = 0;
470 hci_hbuf_sent = 0U;
471 hci_hbuf_acked = 0U;
472 (void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
473 if (buf) {
474 atomic_set_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
475 k_poll_signal_raise(hbuf_signal, 0x0);
476 }
477 #endif
478
479 hci_recv_fifo_reset();
480 }
481
482 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_ctl_to_host_flow(struct net_buf * buf,struct net_buf ** evt)483 static void set_ctl_to_host_flow(struct net_buf *buf, struct net_buf **evt)
484 {
485 struct bt_hci_cp_set_ctl_to_host_flow *cmd = (void *)buf->data;
486 uint8_t flow_enable = cmd->flow_enable;
487 struct bt_hci_evt_cc_status *ccst;
488
489 ccst = hci_cmd_complete(evt, sizeof(*ccst));
490
491 /* require host buffer size before enabling flow control, and
492 * disallow if any connections are up
493 */
494 if (!hci_hbuf_total || conn_count) {
495 ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
496 return;
497 } else {
498 ccst->status = 0x00;
499 }
500
501 switch (flow_enable) {
502 case BT_HCI_CTL_TO_HOST_FLOW_DISABLE:
503 if (hci_hbuf_total < 0) {
504 /* already disabled */
505 return;
506 }
507 break;
508 case BT_HCI_CTL_TO_HOST_FLOW_ENABLE:
509 if (hci_hbuf_total > 0) {
510 /* already enabled */
511 return;
512 }
513 break;
514 default:
515 ccst->status = BT_HCI_ERR_INVALID_PARAM;
516 return;
517 }
518
519 hci_hbuf_sent = 0U;
520 hci_hbuf_acked = 0U;
521 (void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
522 hci_hbuf_total = -hci_hbuf_total;
523 }
524
host_buffer_size(struct net_buf * buf,struct net_buf ** evt)525 static void host_buffer_size(struct net_buf *buf, struct net_buf **evt)
526 {
527 struct bt_hci_cp_host_buffer_size *cmd = (void *)buf->data;
528 uint16_t acl_pkts = sys_le16_to_cpu(cmd->acl_pkts);
529 uint16_t acl_mtu = sys_le16_to_cpu(cmd->acl_mtu);
530 struct bt_hci_evt_cc_status *ccst;
531
532 ccst = hci_cmd_complete(evt, sizeof(*ccst));
533
534 if (hci_hbuf_total) {
535 ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
536 return;
537 }
538 /* fragmentation from controller to host not supported, require
539 * ACL MTU to be at least the LL MTU
540 */
541 if (acl_mtu < LL_LENGTH_OCTETS_RX_MAX) {
542 ccst->status = BT_HCI_ERR_INVALID_PARAM;
543 return;
544 }
545
546 LOG_DBG("FC: host buf size: %d", acl_pkts);
547 hci_hbuf_total = -acl_pkts;
548 }
549
host_num_completed_packets(struct net_buf * buf,struct net_buf ** evt)550 static void host_num_completed_packets(struct net_buf *buf,
551 struct net_buf **evt)
552 {
553 struct bt_hci_cp_host_num_completed_packets *cmd = (void *)buf->data;
554 struct bt_hci_evt_cc_status *ccst;
555 uint32_t count = 0U;
556
557 /* special case, no event returned except for error conditions */
558 if (hci_hbuf_total <= 0) {
559 ccst = hci_cmd_complete(evt, sizeof(*ccst));
560 ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
561 return;
562 } else if (!conn_count) {
563 ccst = hci_cmd_complete(evt, sizeof(*ccst));
564 ccst->status = BT_HCI_ERR_INVALID_PARAM;
565 return;
566 }
567
568 /* leave *evt == NULL so no event is generated */
569 for (uint8_t i = 0; i < cmd->num_handles; i++) {
570 uint16_t h = sys_le16_to_cpu(cmd->h[i].handle);
571 uint16_t c = sys_le16_to_cpu(cmd->h[i].count);
572
573 if ((h >= ARRAY_SIZE(hci_hbuf_pend)) ||
574 (c > hci_hbuf_pend[h])) {
575 ccst = hci_cmd_complete(evt, sizeof(*ccst));
576 ccst->status = BT_HCI_ERR_INVALID_PARAM;
577 return;
578 }
579
580 hci_hbuf_pend[h] -= c;
581 count += c;
582 }
583
584 LOG_DBG("FC: acked: %d", count);
585 hci_hbuf_acked += count;
586 k_poll_signal_raise(hbuf_signal, 0x0);
587 }
588 #endif
589
590 #if defined(CONFIG_BT_CTLR_LE_PING)
read_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)591 static void read_auth_payload_timeout(struct net_buf *buf, struct net_buf **evt)
592 {
593 struct bt_hci_cp_read_auth_payload_timeout *cmd = (void *)buf->data;
594 struct bt_hci_rp_read_auth_payload_timeout *rp;
595 uint16_t auth_payload_timeout;
596 uint16_t handle;
597 uint8_t status;
598
599 handle = sys_le16_to_cpu(cmd->handle);
600
601 status = ll_apto_get(handle, &auth_payload_timeout);
602
603 rp = hci_cmd_complete(evt, sizeof(*rp));
604 rp->status = status;
605 rp->handle = sys_cpu_to_le16(handle);
606 rp->auth_payload_timeout = sys_cpu_to_le16(auth_payload_timeout);
607 }
608
write_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)609 static void write_auth_payload_timeout(struct net_buf *buf,
610 struct net_buf **evt)
611 {
612 struct bt_hci_cp_write_auth_payload_timeout *cmd = (void *)buf->data;
613 struct bt_hci_rp_write_auth_payload_timeout *rp;
614 uint16_t auth_payload_timeout;
615 uint16_t handle;
616 uint8_t status;
617
618 handle = sys_le16_to_cpu(cmd->handle);
619 auth_payload_timeout = sys_le16_to_cpu(cmd->auth_payload_timeout);
620
621 status = ll_apto_set(handle, auth_payload_timeout);
622
623 rp = hci_cmd_complete(evt, sizeof(*rp));
624 rp->status = status;
625 rp->handle = sys_cpu_to_le16(handle);
626 }
627 #endif /* CONFIG_BT_CTLR_LE_PING */
628
629 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
configure_data_path(struct net_buf * buf,struct net_buf ** evt)630 static void configure_data_path(struct net_buf *buf,
631 struct net_buf **evt)
632 {
633 struct bt_hci_cp_configure_data_path *cmd = (void *)buf->data;
634 struct bt_hci_rp_configure_data_path *rp;
635
636 uint8_t *vs_config;
637 uint8_t status;
638
639 vs_config = &cmd->vs_config[0];
640
641 if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)) {
642 status = ll_configure_data_path(cmd->data_path_dir,
643 cmd->data_path_id,
644 cmd->vs_config_len,
645 vs_config);
646 } else {
647 status = BT_HCI_ERR_INVALID_PARAM;
648 }
649
650 rp = hci_cmd_complete(evt, sizeof(*rp));
651 rp->status = status;
652 }
653 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
654
655 #if defined(CONFIG_BT_CTLR_CONN_ISO)
read_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)656 static void read_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
657 {
658 struct bt_hci_rp_read_conn_accept_timeout *rp;
659 uint16_t timeout;
660
661 ARG_UNUSED(buf);
662
663 rp = hci_cmd_complete(evt, sizeof(*rp));
664
665 rp->status = ll_conn_iso_accept_timeout_get(&timeout);
666 rp->conn_accept_timeout = sys_cpu_to_le16(timeout);
667 }
668
write_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)669 static void write_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
670 {
671 struct bt_hci_cp_write_conn_accept_timeout *cmd = (void *)buf->data;
672 struct bt_hci_rp_write_conn_accept_timeout *rp;
673 uint16_t timeout;
674
675 timeout = sys_le16_to_cpu(cmd->conn_accept_timeout);
676
677 rp = hci_cmd_complete(evt, sizeof(*rp));
678
679 rp->status = ll_conn_iso_accept_timeout_set(timeout);
680 }
681 #endif /* CONFIG_BT_CTLR_CONN_ISO */
682
683 #if defined(CONFIG_BT_CONN)
read_tx_power_level(struct net_buf * buf,struct net_buf ** evt)684 static void read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
685 {
686 struct bt_hci_cp_read_tx_power_level *cmd = (void *)buf->data;
687 struct bt_hci_rp_read_tx_power_level *rp;
688 uint16_t handle;
689 uint8_t status;
690 uint8_t type;
691
692 handle = sys_le16_to_cpu(cmd->handle);
693 type = cmd->type;
694
695 rp = hci_cmd_complete(evt, sizeof(*rp));
696
697 status = ll_tx_pwr_lvl_get(BT_HCI_VS_LL_HANDLE_TYPE_CONN,
698 handle, type, &rp->tx_power_level);
699
700 rp->status = status;
701 rp->handle = sys_cpu_to_le16(handle);
702 }
703 #endif /* CONFIG_BT_CONN */
704
ctrl_bb_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)705 static int ctrl_bb_cmd_handle(uint16_t ocf, struct net_buf *cmd,
706 struct net_buf **evt)
707 {
708 switch (ocf) {
709 case BT_OCF(BT_HCI_OP_SET_EVENT_MASK):
710 set_event_mask(cmd, evt);
711 break;
712
713 case BT_OCF(BT_HCI_OP_RESET):
714 reset(cmd, evt);
715 break;
716
717 case BT_OCF(BT_HCI_OP_SET_EVENT_MASK_PAGE_2):
718 set_event_mask_page_2(cmd, evt);
719 break;
720
721 #if defined(CONFIG_BT_CTLR_CONN_ISO)
722 case BT_OCF(BT_HCI_OP_READ_CONN_ACCEPT_TIMEOUT):
723 read_conn_accept_timeout(cmd, evt);
724 break;
725
726 case BT_OCF(BT_HCI_OP_WRITE_CONN_ACCEPT_TIMEOUT):
727 write_conn_accept_timeout(cmd, evt);
728 break;
729 #endif /* CONFIG_BT_CTLR_CONN_ISO */
730
731 #if defined(CONFIG_BT_CONN)
732 case BT_OCF(BT_HCI_OP_READ_TX_POWER_LEVEL):
733 read_tx_power_level(cmd, evt);
734 break;
735 #endif /* CONFIG_BT_CONN */
736
737 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
738 case BT_OCF(BT_HCI_OP_SET_CTL_TO_HOST_FLOW):
739 set_ctl_to_host_flow(cmd, evt);
740 break;
741
742 case BT_OCF(BT_HCI_OP_HOST_BUFFER_SIZE):
743 host_buffer_size(cmd, evt);
744 break;
745
746 case BT_OCF(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS):
747 host_num_completed_packets(cmd, evt);
748 break;
749 #endif
750
751 #if defined(CONFIG_BT_CTLR_LE_PING)
752 case BT_OCF(BT_HCI_OP_READ_AUTH_PAYLOAD_TIMEOUT):
753 read_auth_payload_timeout(cmd, evt);
754 break;
755
756 case BT_OCF(BT_HCI_OP_WRITE_AUTH_PAYLOAD_TIMEOUT):
757 write_auth_payload_timeout(cmd, evt);
758 break;
759 #endif /* CONFIG_BT_CTLR_LE_PING */
760
761 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
762 case BT_OCF(BT_HCI_OP_CONFIGURE_DATA_PATH):
763 configure_data_path(cmd, evt);
764 break;
765 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
766
767 default:
768 return -EINVAL;
769 }
770
771 return 0;
772 }
773
read_local_version_info(struct net_buf * buf,struct net_buf ** evt)774 static void read_local_version_info(struct net_buf *buf, struct net_buf **evt)
775 {
776 struct bt_hci_rp_read_local_version_info *rp;
777
778 rp = hci_cmd_complete(evt, sizeof(*rp));
779
780 rp->status = 0x00;
781 rp->hci_version = LL_VERSION_NUMBER;
782 rp->hci_revision = sys_cpu_to_le16(0);
783 rp->lmp_version = LL_VERSION_NUMBER;
784 rp->manufacturer = sys_cpu_to_le16(ll_settings_company_id());
785 rp->lmp_subversion = sys_cpu_to_le16(ll_settings_subversion_number());
786 }
787
read_supported_commands(struct net_buf * buf,struct net_buf ** evt)788 static void read_supported_commands(struct net_buf *buf, struct net_buf **evt)
789 {
790 struct bt_hci_rp_read_supported_commands *rp;
791
792 rp = hci_cmd_complete(evt, sizeof(*rp));
793
794 rp->status = 0x00;
795 (void)memset(&rp->commands[0], 0, sizeof(rp->commands));
796
797 #if defined(CONFIG_BT_REMOTE_VERSION)
798 /* Read Remote Version Info. */
799 rp->commands[2] |= BIT(7);
800 #endif
801 /* Set Event Mask, and Reset. */
802 rp->commands[5] |= BIT(6) | BIT(7);
803
804 #if defined(CONFIG_BT_CTLR_CONN_ISO)
805 /* Read/Write Connection Accept Timeout */
806 rp->commands[7] |= BIT(2) | BIT(3);
807 #endif /* CONFIG_BT_CTLR_CONN_ISO */
808
809 /* Read TX Power Level. */
810 rp->commands[10] |= BIT(2);
811
812 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
813 /* Set FC, Host Buffer Size and Host Num Completed */
814 rp->commands[10] |= BIT(5) | BIT(6) | BIT(7);
815 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
816
817 /* Read Local Version Info, Read Local Supported Features. */
818 rp->commands[14] |= BIT(3) | BIT(5);
819 /* Read BD ADDR. */
820 rp->commands[15] |= BIT(1);
821
822 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
823 /* Read RSSI. */
824 rp->commands[15] |= BIT(5);
825 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
826
827 /* Set Event Mask Page 2 */
828 rp->commands[22] |= BIT(2);
829 /* LE Set Event Mask, LE Read Buffer Size, LE Read Local Supp Feats,
830 * Set Random Addr
831 */
832 rp->commands[25] |= BIT(0) | BIT(1) | BIT(2) | BIT(4);
833
834 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
835 /* LE Read FAL Size, LE Clear FAL */
836 rp->commands[26] |= BIT(6) | BIT(7);
837 /* LE Add Dev to FAL, LE Remove Dev from FAL */
838 rp->commands[27] |= BIT(0) | BIT(1);
839 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
840
841 /* LE Encrypt, LE Rand */
842 rp->commands[27] |= BIT(6) | BIT(7);
843 /* LE Read Supported States */
844 rp->commands[28] |= BIT(3);
845
846 #if defined(CONFIG_BT_BROADCASTER)
847 /* LE Set Adv Params, LE Read Adv Channel TX Power, LE Set Adv Data */
848 rp->commands[25] |= BIT(5) | BIT(6) | BIT(7);
849 /* LE Set Scan Response Data, LE Set Adv Enable */
850 rp->commands[26] |= BIT(0) | BIT(1);
851
852 #if defined(CONFIG_BT_CTLR_ADV_EXT)
853 /* LE Set Adv Set Random Addr, LE Set Ext Adv Params, LE Set Ext Adv
854 * Data, LE Set Ext Adv Scan Rsp Data, LE Set Ext Adv Enable, LE Read
855 * Max Adv Data Len, LE Read Num Supp Adv Sets
856 */
857 rp->commands[36] |= BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
858 BIT(6) | BIT(7);
859 /* LE Remove Adv Set, LE Clear Adv Sets */
860 rp->commands[37] |= BIT(0) | BIT(1);
861 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
862 /* LE Set PA Params, LE Set PA Data, LE Set PA Enable */
863 rp->commands[37] |= BIT(2) | BIT(3) | BIT(4);
864 #if defined(CONFIG_BT_CTLR_ADV_ISO)
865 /* LE Create BIG, LE Create BIG Test, LE Terminate BIG */
866 rp->commands[42] |= BIT(5) | BIT(6) | BIT(7);
867 #endif /* CONFIG_BT_CTLR_ADV_ISO */
868 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
869 #endif /* CONFIG_BT_CTLR_ADV_EXT */
870 #endif /* CONFIG_BT_BROADCASTER */
871
872 #if defined(CONFIG_BT_OBSERVER)
873 /* LE Set Scan Params, LE Set Scan Enable */
874 rp->commands[26] |= BIT(2) | BIT(3);
875
876 #if defined(CONFIG_BT_CTLR_ADV_EXT)
877 /* LE Set Extended Scan Params, LE Set Extended Scan Enable */
878 rp->commands[37] |= BIT(5) | BIT(6);
879 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
880 /* LE PA Create Sync, LE PA Create Sync Cancel, LE PA Terminate Sync */
881 rp->commands[38] |= BIT(0) | BIT(1) | BIT(2);
882 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
883 /* LE PA Add Device to Periodic Advertiser List,
884 * LE PA Remove Device from Periodic Advertiser List,
885 * LE Clear Periodic Advertiser List,
886 * LE Read Periodic Adveritiser List Size
887 */
888 rp->commands[38] |= BIT(3) | BIT(4) | BIT(5) | BIT(6);
889 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
890 /* LE Set PA Receive Enable */
891 rp->commands[40] |= BIT(5);
892 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
893 /* LE BIG Create Sync, LE BIG Terminate Sync */
894 rp->commands[43] |= BIT(0) | BIT(1);
895 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
896 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
897 #endif /* CONFIG_BT_CTLR_ADV_EXT */
898
899 #endif /* CONFIG_BT_OBSERVER */
900
901 #if defined(CONFIG_BT_CONN)
902 #if defined(CONFIG_BT_CENTRAL)
903 /* LE Create Connection, LE Create Connection Cancel */
904 rp->commands[26] |= BIT(4) | BIT(5);
905 /* Set Host Channel Classification */
906 rp->commands[27] |= BIT(3);
907
908 #if defined(CONFIG_BT_CTLR_ADV_EXT)
909 /* LE Extended Create Connection */
910 rp->commands[37] |= BIT(7);
911 #endif /* CONFIG_BT_CTLR_ADV_EXT */
912
913 #if defined(CONFIG_BT_CTLR_LE_ENC)
914 /* LE Start Encryption */
915 rp->commands[28] |= BIT(0);
916 #endif /* CONFIG_BT_CTLR_LE_ENC */
917
918 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
919 /* LE Set CIG Parameters */
920 rp->commands[41] |= BIT(7);
921 /* LE Set CIG Parameters Test, LE Create CIS, LE Remove CIS */
922 rp->commands[42] |= BIT(0) | BIT(1) | BIT(2);
923 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
924 #endif /* CONFIG_BT_CENTRAL */
925
926 #if defined(CONFIG_BT_PERIPHERAL)
927 #if defined(CONFIG_BT_CTLR_LE_ENC)
928 /* LE LTK Request Reply, LE LTK Request Negative Reply */
929 rp->commands[28] |= BIT(1) | BIT(2);
930 #endif /* CONFIG_BT_CTLR_LE_ENC */
931 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
932 /* LE Accept CIS Request, LE Reject CIS Request */
933 rp->commands[42] |= BIT(3) | BIT(4);
934 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
935 #endif /* CONFIG_BT_PERIPHERAL */
936
937 /* Disconnect. */
938 rp->commands[0] |= BIT(5);
939 /* LE Connection Update, LE Read Channel Map, LE Read Remote Features */
940 rp->commands[27] |= BIT(2) | BIT(4) | BIT(5);
941
942 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
943 /* LE Remote Conn Param Req and Neg Reply */
944 rp->commands[33] |= BIT(4) | BIT(5);
945 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
946
947 #if defined(CONFIG_BT_CTLR_LE_PING)
948 /* Read and Write authenticated payload timeout */
949 rp->commands[32] |= BIT(4) | BIT(5);
950 #endif /* CONFIG_BT_CTLR_LE_PING */
951
952 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
953 /* LE Set Data Length, and LE Read Suggested Data Length. */
954 rp->commands[33] |= BIT(6) | BIT(7);
955 /* LE Write Suggested Data Length. */
956 rp->commands[34] |= BIT(0);
957 /* LE Read Maximum Data Length. */
958 rp->commands[35] |= BIT(3);
959 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
960
961 #if defined(CONFIG_BT_CTLR_PHY)
962 /* LE Read PHY Command. */
963 rp->commands[35] |= BIT(4);
964 /* LE Set Default PHY Command. */
965 rp->commands[35] |= BIT(5);
966 /* LE Set PHY Command. */
967 rp->commands[35] |= BIT(6);
968 #endif /* CONFIG_BT_CTLR_PHY */
969 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
970 /* LE Request Peer SCA */
971 rp->commands[43] |= BIT(2);
972 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
973 #endif /* CONFIG_BT_CONN */
974
975 #if defined(CONFIG_BT_CTLR_DTM_HCI)
976 /* LE RX Test, LE TX Test, LE Test End */
977 rp->commands[28] |= BIT(4) | BIT(5) | BIT(6);
978 /* LE Enhanced RX Test. */
979 rp->commands[35] |= BIT(7);
980 /* LE Enhanced TX Test. */
981 rp->commands[36] |= BIT(0);
982 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
983 rp->commands[39] |= BIT(3);
984 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
985
986 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
987 rp->commands[39] |= BIT(4);
988 #endif
989
990 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
991 rp->commands[45] |= BIT(0);
992 #endif
993 #endif /* CONFIG_BT_CTLR_DTM_HCI */
994
995 #if defined(CONFIG_BT_CTLR_PRIVACY)
996 /* LE resolving list commands, LE Read Peer RPA */
997 rp->commands[34] |= BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7);
998 /* LE Read Local RPA, LE Set AR Enable, Set RPA Timeout */
999 rp->commands[35] |= BIT(0) | BIT(1) | BIT(2);
1000 /* LE Set Privacy Mode */
1001 rp->commands[39] |= BIT(2);
1002 #endif /* CONFIG_BT_CTLR_PRIVACY */
1003
1004 #if defined(CONFIG_BT_CTLR_DF)
1005 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1006 /* LE Set Connectionless CTE Transmit Parameters,
1007 * LE Set Connectionless CTE Transmit Enable
1008 */
1009 rp->commands[39] |= BIT(5) | BIT(6);
1010 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1011 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1012 /* LE Set Connectionless IQ Sampling Enable */
1013 rp->commands[39] |= BIT(7);
1014 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1015 /* LE Read Antenna Information */
1016 rp->commands[40] |= BIT(4);
1017 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1018 /* LE Set Connection CTE Transmit Parameters */
1019 rp->commands[40] |= BIT(1);
1020 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1021 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1022 /* LE Set Connection CTE Receive Parameters */
1023 rp->commands[40] |= BIT(0);
1024 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1025 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1026 /* LE Connection CTE Request Enable */
1027 rp->commands[40] |= BIT(2);
1028 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1029 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1030 /* LE Connection CTE Response Enable */
1031 rp->commands[40] |= BIT(3);
1032 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1033
1034 #endif /* CONFIG_BT_CTLR_DF */
1035
1036 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_TINYCRYPT_ECC)
1037 bt_hci_ecc_supported_commands(rp->commands);
1038 #endif /* CONFIG_BT_HCI_RAW && CONFIG_BT_TINYCRYPT_ECC */
1039
1040 /* LE Read TX Power. */
1041 rp->commands[38] |= BIT(7);
1042
1043 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1044 /* LE Read Buffer Size v2, LE Read ISO TX Sync */
1045 rp->commands[41] |= BIT(5) | BIT(6);
1046 /* LE ISO Transmit Test */
1047 rp->commands[43] |= BIT(5);
1048 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1049
1050 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1051 /* LE ISO Receive Test, LE ISO Read Test Counters */
1052 rp->commands[43] |= BIT(6) | BIT(7);
1053
1054 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
1055 /* LE Read ISO Link Quality */
1056 rp->commands[44] |= BIT(2);
1057 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
1058 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
1059
1060 #if defined(CONFIG_BT_CTLR_ISO)
1061 /* LE Setup ISO Data Path, LE Remove ISO Data Path */
1062 rp->commands[43] |= BIT(3) | BIT(4);
1063 /* LE ISO Test End */
1064 rp->commands[44] |= BIT(0);
1065 #endif /* CONFIG_BT_CTLR_ISO */
1066
1067 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
1068 /* LE Set Host Feature */
1069 rp->commands[44] |= BIT(1);
1070 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
1071
1072 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1073 /* Read Supported Codecs [v2], Codec Capabilities, Controller Delay, Configure Data Path */
1074 rp->commands[45] |= BIT(2) | BIT(3) | BIT(4) | BIT(5);
1075 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1076 }
1077
read_local_features(struct net_buf * buf,struct net_buf ** evt)1078 static void read_local_features(struct net_buf *buf, struct net_buf **evt)
1079 {
1080 struct bt_hci_rp_read_local_features *rp;
1081
1082 rp = hci_cmd_complete(evt, sizeof(*rp));
1083
1084 rp->status = 0x00;
1085 (void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1086 /* BR/EDR not supported and LE supported */
1087 rp->features[4] = (1 << 5) | (1 << 6);
1088 }
1089
read_bd_addr(struct net_buf * buf,struct net_buf ** evt)1090 static void read_bd_addr(struct net_buf *buf, struct net_buf **evt)
1091 {
1092 struct bt_hci_rp_read_bd_addr *rp;
1093
1094 rp = hci_cmd_complete(evt, sizeof(*rp));
1095
1096 rp->status = 0x00;
1097
1098 (void)ll_addr_read(0, &rp->bdaddr.val[0]);
1099 }
1100
1101 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
hci_vendor_read_std_codecs(const struct bt_hci_std_codec_info_v2 ** codecs)1102 uint8_t __weak hci_vendor_read_std_codecs(
1103 const struct bt_hci_std_codec_info_v2 **codecs)
1104 {
1105 ARG_UNUSED(codecs);
1106
1107 /* return number of supported codecs */
1108 return 0;
1109 }
1110
hci_vendor_read_vs_codecs(const struct bt_hci_vs_codec_info_v2 ** codecs)1111 uint8_t __weak hci_vendor_read_vs_codecs(
1112 const struct bt_hci_vs_codec_info_v2 **codecs)
1113 {
1114 ARG_UNUSED(codecs);
1115
1116 /* return number of supported codecs */
1117 return 0;
1118 }
1119
1120 /* NOTE: Not implementing the [v1] version.
1121 * Refer to BT Spec v5.3 Vol 4, Part E 7.4.8 Read Local Supported Codecs command
1122 * The [v1] version of this command shall only return codecs supported on the
1123 * BR/EDR physical transport, while the [v2] version shall return codecs
1124 * supported on all physical transports.
1125 */
read_codecs_v2(struct net_buf * buf,struct net_buf ** evt)1126 static void read_codecs_v2(struct net_buf *buf, struct net_buf **evt)
1127 {
1128 struct bt_hci_rp_read_codecs_v2 *rp;
1129 const struct bt_hci_std_codec_info_v2 *std_codec_info;
1130 const struct bt_hci_vs_codec_info_v2 *vs_codec_info;
1131 struct bt_hci_std_codecs_v2 *std_codecs;
1132 struct bt_hci_vs_codecs_v2 *vs_codecs;
1133 size_t std_codecs_bytes;
1134 size_t vs_codecs_bytes;
1135 uint8_t num_std_codecs;
1136 uint8_t num_vs_codecs;
1137 uint8_t i;
1138
1139 /* read standard codec information */
1140 num_std_codecs = hci_vendor_read_std_codecs(&std_codec_info);
1141 std_codecs_bytes = sizeof(struct bt_hci_std_codecs_v2) +
1142 num_std_codecs * sizeof(struct bt_hci_std_codec_info_v2);
1143 /* read vendor-specific codec information */
1144 num_vs_codecs = hci_vendor_read_vs_codecs(&vs_codec_info);
1145 vs_codecs_bytes = sizeof(struct bt_hci_vs_codecs_v2) +
1146 num_vs_codecs * sizeof(struct bt_hci_vs_codec_info_v2);
1147
1148 /* allocate response packet */
1149 rp = hci_cmd_complete(evt, sizeof(*rp) +
1150 std_codecs_bytes +
1151 vs_codecs_bytes);
1152 rp->status = 0x00;
1153
1154 /* copy standard codec information */
1155 std_codecs = (struct bt_hci_std_codecs_v2 *)&rp->codecs[0];
1156 std_codecs->num_codecs = num_std_codecs;
1157 for (i = 0; i < num_std_codecs; i++) {
1158 struct bt_hci_std_codec_info_v2 *codec;
1159
1160 codec = &std_codecs->codec_info[i];
1161 codec->codec_id = std_codec_info[i].codec_id;
1162 codec->transports = std_codec_info[i].transports;
1163 }
1164
1165 /* copy vendor specific codec information */
1166 vs_codecs = (struct bt_hci_vs_codecs_v2 *)&rp->codecs[std_codecs_bytes];
1167 vs_codecs->num_codecs = num_vs_codecs;
1168 for (i = 0; i < num_vs_codecs; i++) {
1169 struct bt_hci_vs_codec_info_v2 *codec;
1170
1171 codec = &vs_codecs->codec_info[i];
1172 codec->company_id =
1173 sys_cpu_to_le16(vs_codec_info[i].company_id);
1174 codec->codec_id = sys_cpu_to_le16(vs_codec_info[i].codec_id);
1175 codec->transports = vs_codec_info[i].transports;
1176 }
1177 }
1178
hci_vendor_read_codec_capabilities(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t * num_capabilities,size_t * capabilities_bytes,const uint8_t ** capabilities)1179 uint8_t __weak hci_vendor_read_codec_capabilities(uint8_t coding_format,
1180 uint16_t company_id,
1181 uint16_t vs_codec_id,
1182 uint8_t transport,
1183 uint8_t direction,
1184 uint8_t *num_capabilities,
1185 size_t *capabilities_bytes,
1186 const uint8_t **capabilities)
1187 {
1188 ARG_UNUSED(coding_format);
1189 ARG_UNUSED(company_id);
1190 ARG_UNUSED(vs_codec_id);
1191 ARG_UNUSED(transport);
1192 ARG_UNUSED(direction);
1193 ARG_UNUSED(capabilities);
1194
1195 *num_capabilities = 0;
1196 *capabilities_bytes = 0;
1197
1198 /* return status */
1199 return 0x00;
1200 }
1201
read_codec_capabilities(struct net_buf * buf,struct net_buf ** evt)1202 static void read_codec_capabilities(struct net_buf *buf, struct net_buf **evt)
1203 {
1204 struct bt_hci_cp_read_codec_capabilities *cmd = (void *)buf->data;
1205 struct bt_hci_rp_read_codec_capabilities *rp;
1206 const uint8_t *capabilities;
1207 size_t capabilities_bytes;
1208 uint8_t num_capabilities;
1209 uint16_t vs_codec_id;
1210 uint16_t company_id;
1211 uint8_t status;
1212
1213 company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1214 vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1215
1216 /* read codec capabilities */
1217 status = hci_vendor_read_codec_capabilities(cmd->codec_id.coding_format,
1218 company_id,
1219 vs_codec_id,
1220 cmd->transport,
1221 cmd->direction,
1222 &num_capabilities,
1223 &capabilities_bytes,
1224 &capabilities);
1225
1226 /* allocate response packet */
1227 rp = hci_cmd_complete(evt, sizeof(*rp) + capabilities_bytes);
1228 rp->status = status;
1229
1230 /* copy codec capabilities information */
1231 rp->num_capabilities = num_capabilities;
1232 memcpy(&rp->capabilities, capabilities, capabilities_bytes);
1233 }
1234
hci_vendor_read_ctlr_delay(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t codec_config_len,const uint8_t * codec_config,uint32_t * min_delay,uint32_t * max_delay)1235 uint8_t __weak hci_vendor_read_ctlr_delay(uint8_t coding_format,
1236 uint16_t company_id,
1237 uint16_t vs_codec_id,
1238 uint8_t transport,
1239 uint8_t direction,
1240 uint8_t codec_config_len,
1241 const uint8_t *codec_config,
1242 uint32_t *min_delay,
1243 uint32_t *max_delay)
1244 {
1245 ARG_UNUSED(coding_format);
1246 ARG_UNUSED(company_id);
1247 ARG_UNUSED(vs_codec_id);
1248 ARG_UNUSED(transport);
1249 ARG_UNUSED(direction);
1250 ARG_UNUSED(codec_config_len);
1251 ARG_UNUSED(codec_config);
1252
1253 *min_delay = 0;
1254 *max_delay = 0x3D0900; /* 4 seconds, maximum value allowed by spec */
1255
1256 /* return status */
1257 return 0x00;
1258 }
1259
read_ctlr_delay(struct net_buf * buf,struct net_buf ** evt)1260 static void read_ctlr_delay(struct net_buf *buf, struct net_buf **evt)
1261 {
1262 struct bt_hci_cp_read_ctlr_delay *cmd = (void *)buf->data;
1263 struct bt_hci_rp_read_ctlr_delay *rp;
1264 uint16_t vs_codec_id;
1265 uint16_t company_id;
1266 uint32_t min_delay;
1267 uint32_t max_delay;
1268 uint8_t status;
1269
1270 company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1271 vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1272
1273 status = hci_vendor_read_ctlr_delay(cmd->codec_id.coding_format,
1274 company_id,
1275 vs_codec_id,
1276 cmd->transport,
1277 cmd->direction,
1278 cmd->codec_config_len,
1279 cmd->codec_config,
1280 &min_delay,
1281 &max_delay);
1282
1283 rp = hci_cmd_complete(evt, sizeof(*rp));
1284 rp->status = status;
1285 sys_put_le24(min_delay, rp->min_ctlr_delay);
1286 sys_put_le24(max_delay, rp->max_ctlr_delay);
1287 }
1288 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1289
info_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1290 static int info_cmd_handle(uint16_t ocf, struct net_buf *cmd,
1291 struct net_buf **evt)
1292 {
1293 switch (ocf) {
1294 case BT_OCF(BT_HCI_OP_READ_LOCAL_VERSION_INFO):
1295 read_local_version_info(cmd, evt);
1296 break;
1297
1298 case BT_OCF(BT_HCI_OP_READ_SUPPORTED_COMMANDS):
1299 read_supported_commands(cmd, evt);
1300 break;
1301
1302 case BT_OCF(BT_HCI_OP_READ_LOCAL_FEATURES):
1303 read_local_features(cmd, evt);
1304 break;
1305
1306 case BT_OCF(BT_HCI_OP_READ_BD_ADDR):
1307 read_bd_addr(cmd, evt);
1308 break;
1309
1310 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1311 case BT_OCF(BT_HCI_OP_READ_CODECS_V2):
1312 read_codecs_v2(cmd, evt);
1313 break;
1314
1315 case BT_OCF(BT_HCI_OP_READ_CODEC_CAPABILITIES):
1316 read_codec_capabilities(cmd, evt);
1317 break;
1318
1319 case BT_OCF(BT_HCI_OP_READ_CTLR_DELAY):
1320 read_ctlr_delay(cmd, evt);
1321 break;
1322 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1323
1324 default:
1325 return -EINVAL;
1326 }
1327
1328 return 0;
1329 }
1330
1331 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
read_rssi(struct net_buf * buf,struct net_buf ** evt)1332 static void read_rssi(struct net_buf *buf, struct net_buf **evt)
1333 {
1334 struct bt_hci_cp_read_rssi *cmd = (void *)buf->data;
1335 struct bt_hci_rp_read_rssi *rp;
1336 uint16_t handle;
1337
1338 handle = sys_le16_to_cpu(cmd->handle);
1339
1340 rp = hci_cmd_complete(evt, sizeof(*rp));
1341
1342 rp->status = ll_rssi_get(handle, &rp->rssi);
1343
1344 rp->handle = sys_cpu_to_le16(handle);
1345 /* The Link Layer currently returns RSSI as an absolute value */
1346 rp->rssi = (!rp->status) ? -rp->rssi : 127;
1347 }
1348 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1349
status_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1350 static int status_cmd_handle(uint16_t ocf, struct net_buf *cmd,
1351 struct net_buf **evt)
1352 {
1353 switch (ocf) {
1354 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1355 case BT_OCF(BT_HCI_OP_READ_RSSI):
1356 read_rssi(cmd, evt);
1357 break;
1358 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1359
1360 default:
1361 return -EINVAL;
1362 }
1363
1364 return 0;
1365 }
1366
le_set_event_mask(struct net_buf * buf,struct net_buf ** evt)1367 static void le_set_event_mask(struct net_buf *buf, struct net_buf **evt)
1368 {
1369 struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
1370
1371 le_event_mask = sys_get_le64(cmd->events);
1372
1373 *evt = cmd_complete_status(0x00);
1374 }
1375
le_read_buffer_size(struct net_buf * buf,struct net_buf ** evt)1376 static void le_read_buffer_size(struct net_buf *buf, struct net_buf **evt)
1377 {
1378 struct bt_hci_rp_le_read_buffer_size *rp;
1379
1380 rp = hci_cmd_complete(evt, sizeof(*rp));
1381
1382 rp->status = 0x00;
1383
1384 rp->le_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1385 rp->le_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1386 }
1387
1388 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_read_buffer_size_v2(struct net_buf * buf,struct net_buf ** evt)1389 static void le_read_buffer_size_v2(struct net_buf *buf, struct net_buf **evt)
1390 {
1391 struct bt_hci_rp_le_read_buffer_size_v2 *rp;
1392
1393 rp = hci_cmd_complete(evt, sizeof(*rp));
1394
1395 rp->status = 0x00;
1396
1397 rp->acl_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1398 rp->acl_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1399 rp->iso_max_len = sys_cpu_to_le16(CONFIG_BT_CTLR_ISO_TX_BUFFER_SIZE);
1400 rp->iso_max_num = CONFIG_BT_CTLR_ISO_TX_BUFFERS;
1401 }
1402 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1403
le_read_local_features(struct net_buf * buf,struct net_buf ** evt)1404 static void le_read_local_features(struct net_buf *buf, struct net_buf **evt)
1405 {
1406 struct bt_hci_rp_le_read_local_features *rp;
1407
1408 rp = hci_cmd_complete(evt, sizeof(*rp));
1409
1410 rp->status = 0x00;
1411
1412 (void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1413 sys_put_le64(ll_feat_get(), rp->features);
1414 }
1415
le_set_random_address(struct net_buf * buf,struct net_buf ** evt)1416 static void le_set_random_address(struct net_buf *buf, struct net_buf **evt)
1417 {
1418 struct bt_hci_cp_le_set_random_address *cmd = (void *)buf->data;
1419 uint8_t status;
1420
1421 status = ll_addr_set(1, &cmd->bdaddr.val[0]);
1422
1423 *evt = cmd_complete_status(status);
1424 }
1425
1426 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
le_read_fal_size(struct net_buf * buf,struct net_buf ** evt)1427 static void le_read_fal_size(struct net_buf *buf, struct net_buf **evt)
1428 {
1429 struct bt_hci_rp_le_read_fal_size *rp;
1430
1431 rp = hci_cmd_complete(evt, sizeof(*rp));
1432 rp->status = 0x00;
1433
1434 rp->fal_size = ll_fal_size_get();
1435 }
1436
le_clear_fal(struct net_buf * buf,struct net_buf ** evt)1437 static void le_clear_fal(struct net_buf *buf, struct net_buf **evt)
1438 {
1439 uint8_t status;
1440
1441 status = ll_fal_clear();
1442
1443 *evt = cmd_complete_status(status);
1444 }
1445
le_add_dev_to_fal(struct net_buf * buf,struct net_buf ** evt)1446 static void le_add_dev_to_fal(struct net_buf *buf, struct net_buf **evt)
1447 {
1448 struct bt_hci_cp_le_add_dev_to_fal *cmd = (void *)buf->data;
1449 uint8_t status;
1450
1451 status = ll_fal_add(&cmd->addr);
1452
1453 *evt = cmd_complete_status(status);
1454 }
1455
le_rem_dev_from_fal(struct net_buf * buf,struct net_buf ** evt)1456 static void le_rem_dev_from_fal(struct net_buf *buf, struct net_buf **evt)
1457 {
1458 struct bt_hci_cp_le_rem_dev_from_fal *cmd = (void *)buf->data;
1459 uint8_t status;
1460
1461 status = ll_fal_remove(&cmd->addr);
1462
1463 *evt = cmd_complete_status(status);
1464 }
1465 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1466
le_encrypt(struct net_buf * buf,struct net_buf ** evt)1467 static void le_encrypt(struct net_buf *buf, struct net_buf **evt)
1468 {
1469 struct bt_hci_cp_le_encrypt *cmd = (void *)buf->data;
1470 struct bt_hci_rp_le_encrypt *rp;
1471 uint8_t enc_data[16];
1472
1473 ecb_encrypt(cmd->key, cmd->plaintext, enc_data, NULL);
1474
1475 rp = hci_cmd_complete(evt, sizeof(*rp));
1476
1477 rp->status = 0x00;
1478 memcpy(rp->enc_data, enc_data, 16);
1479 }
1480
le_rand(struct net_buf * buf,struct net_buf ** evt)1481 static void le_rand(struct net_buf *buf, struct net_buf **evt)
1482 {
1483 struct bt_hci_rp_le_rand *rp;
1484 uint8_t count = sizeof(rp->rand);
1485
1486 rp = hci_cmd_complete(evt, sizeof(*rp));
1487 rp->status = 0x00;
1488
1489 lll_csrand_get(rp->rand, count);
1490 }
1491
le_read_supp_states(struct net_buf * buf,struct net_buf ** evt)1492 static void le_read_supp_states(struct net_buf *buf, struct net_buf **evt)
1493 {
1494 struct bt_hci_rp_le_read_supp_states *rp;
1495 uint64_t states = 0U;
1496
1497 rp = hci_cmd_complete(evt, sizeof(*rp));
1498 rp->status = 0x00;
1499
1500 #define ST_ADV (BIT64(0) | BIT64(1) | BIT64(8) | BIT64(9) | BIT64(12) | \
1501 BIT64(13) | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1502 BIT64(20) | BIT64(21))
1503
1504 #define ST_SCA (BIT64(4) | BIT64(5) | BIT64(8) | BIT64(9) | BIT64(10) | \
1505 BIT64(11) | BIT64(12) | BIT64(13) | BIT64(14) | BIT64(15) | \
1506 BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(26) | \
1507 BIT64(27) | BIT64(30) | BIT64(31))
1508
1509 #define ST_PER (BIT64(2) | BIT64(3) | BIT64(7) | BIT64(10) | BIT64(11) | \
1510 BIT64(14) | BIT64(15) | BIT64(20) | BIT64(21) | BIT64(26) | \
1511 BIT64(27) | BIT64(29) | BIT64(30) | BIT64(31) | BIT64(32) | \
1512 BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | BIT64(37) | \
1513 BIT64(38) | BIT64(39) | BIT64(40) | BIT64(41))
1514
1515 #define ST_CEN (BIT64(6) | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1516 BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(28) | \
1517 BIT64(32) | BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | \
1518 BIT64(37) | BIT64(41))
1519
1520 #if defined(CONFIG_BT_BROADCASTER)
1521 states |= ST_ADV;
1522 #else
1523 states &= ~ST_ADV;
1524 #endif
1525 #if defined(CONFIG_BT_OBSERVER)
1526 states |= ST_SCA;
1527 #else
1528 states &= ~ST_SCA;
1529 #endif
1530 #if defined(CONFIG_BT_PERIPHERAL)
1531 states |= ST_PER;
1532 #else
1533 states &= ~ST_PER;
1534 #endif
1535 #if defined(CONFIG_BT_CENTRAL)
1536 states |= ST_CEN;
1537 #else
1538 states &= ~ST_CEN;
1539 #endif
1540 /* All states and combinations supported except:
1541 * Initiating State + Passive Scanning
1542 * Initiating State + Active Scanning
1543 */
1544 states &= ~(BIT64(22) | BIT64(23));
1545 LOG_DBG("states: 0x%08x%08x", (uint32_t)(states >> 32), (uint32_t)(states & 0xffffffff));
1546 sys_put_le64(states, rp->le_states);
1547 }
1548
1549 #if defined(CONFIG_BT_BROADCASTER)
le_set_adv_param(struct net_buf * buf,struct net_buf ** evt)1550 static void le_set_adv_param(struct net_buf *buf, struct net_buf **evt)
1551 {
1552 struct bt_hci_cp_le_set_adv_param *cmd = (void *)buf->data;
1553 uint16_t min_interval;
1554 uint8_t status;
1555
1556 if (adv_cmds_legacy_check(evt)) {
1557 return;
1558 }
1559
1560 min_interval = sys_le16_to_cpu(cmd->min_interval);
1561
1562 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
1563 (cmd->type != BT_HCI_ADV_DIRECT_IND)) {
1564 uint16_t max_interval = sys_le16_to_cpu(cmd->max_interval);
1565
1566 if ((min_interval > max_interval) ||
1567 (min_interval < 0x0020) ||
1568 (max_interval > 0x4000)) {
1569 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
1570 return;
1571 }
1572 }
1573
1574 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1575 status = ll_adv_params_set(0, 0, min_interval, cmd->type,
1576 cmd->own_addr_type, cmd->direct_addr.type,
1577 &cmd->direct_addr.a.val[0], cmd->channel_map,
1578 cmd->filter_policy, 0, 0, 0, 0, 0, 0);
1579 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1580 status = ll_adv_params_set(min_interval, cmd->type,
1581 cmd->own_addr_type, cmd->direct_addr.type,
1582 &cmd->direct_addr.a.val[0], cmd->channel_map,
1583 cmd->filter_policy);
1584 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1585
1586 *evt = cmd_complete_status(status);
1587 }
1588
le_read_adv_chan_tx_power(struct net_buf * buf,struct net_buf ** evt)1589 static void le_read_adv_chan_tx_power(struct net_buf *buf, struct net_buf **evt)
1590 {
1591 struct bt_hci_rp_le_read_chan_tx_power *rp;
1592
1593 if (adv_cmds_legacy_check(evt)) {
1594 return;
1595 }
1596
1597 rp = hci_cmd_complete(evt, sizeof(*rp));
1598
1599 rp->status = 0x00;
1600
1601 rp->tx_power_level = 0;
1602 }
1603
le_set_adv_data(struct net_buf * buf,struct net_buf ** evt)1604 static void le_set_adv_data(struct net_buf *buf, struct net_buf **evt)
1605 {
1606 struct bt_hci_cp_le_set_adv_data *cmd = (void *)buf->data;
1607 uint8_t status;
1608
1609 if (adv_cmds_legacy_check(evt)) {
1610 return;
1611 }
1612
1613 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1614 status = ll_adv_data_set(0, cmd->len, &cmd->data[0]);
1615 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1616 status = ll_adv_data_set(cmd->len, &cmd->data[0]);
1617 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1618
1619 *evt = cmd_complete_status(status);
1620 }
1621
le_set_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)1622 static void le_set_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
1623 {
1624 struct bt_hci_cp_le_set_scan_rsp_data *cmd = (void *)buf->data;
1625 uint8_t status;
1626
1627 if (adv_cmds_legacy_check(evt)) {
1628 return;
1629 }
1630
1631 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1632 status = ll_adv_scan_rsp_set(0, cmd->len, &cmd->data[0]);
1633 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1634 status = ll_adv_scan_rsp_set(cmd->len, &cmd->data[0]);
1635 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1636
1637 *evt = cmd_complete_status(status);
1638 }
1639
le_set_adv_enable(struct net_buf * buf,struct net_buf ** evt)1640 static void le_set_adv_enable(struct net_buf *buf, struct net_buf **evt)
1641 {
1642 struct bt_hci_cp_le_set_adv_enable *cmd = (void *)buf->data;
1643 uint8_t status;
1644
1645 if (adv_cmds_legacy_check(evt)) {
1646 return;
1647 }
1648
1649 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
1650 #if defined(CONFIG_BT_HCI_MESH_EXT)
1651 status = ll_adv_enable(0, cmd->enable, 0, 0, 0, 0, 0);
1652 #else /* !CONFIG_BT_HCI_MESH_EXT */
1653 status = ll_adv_enable(0, cmd->enable, 0, 0);
1654 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1655 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1656 status = ll_adv_enable(cmd->enable);
1657 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1658
1659 *evt = cmd_complete_status(status);
1660 }
1661
1662 #if defined(CONFIG_BT_CTLR_ADV_ISO)
le_create_big(struct net_buf * buf,struct net_buf ** evt)1663 static void le_create_big(struct net_buf *buf, struct net_buf **evt)
1664 {
1665 struct bt_hci_cp_le_create_big *cmd = (void *)buf->data;
1666 uint32_t sdu_interval;
1667 uint16_t max_latency;
1668 uint8_t big_handle;
1669 uint8_t adv_handle;
1670 uint16_t max_sdu;
1671 uint8_t status;
1672
1673 status = ll_adv_iso_by_hci_handle_new(cmd->big_handle, &big_handle);
1674 if (status) {
1675 *evt = cmd_status(status);
1676 return;
1677 }
1678
1679 status = ll_adv_set_by_hci_handle_get(cmd->adv_handle, &adv_handle);
1680 if (status) {
1681 *evt = cmd_status(status);
1682 return;
1683 }
1684
1685 sdu_interval = sys_get_le24(cmd->sdu_interval);
1686 max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1687 max_latency = sys_le16_to_cpu(cmd->max_latency);
1688
1689 status = ll_big_create(big_handle, adv_handle, cmd->num_bis,
1690 sdu_interval, max_sdu, max_latency, cmd->rtn,
1691 cmd->phy, cmd->packing, cmd->framing,
1692 cmd->encryption, cmd->bcode);
1693
1694 *evt = cmd_status(status);
1695 }
1696
le_create_big_test(struct net_buf * buf,struct net_buf ** evt)1697 static void le_create_big_test(struct net_buf *buf, struct net_buf **evt)
1698 {
1699 struct bt_hci_cp_le_create_big_test *cmd = (void *)buf->data;
1700 uint32_t sdu_interval;
1701 uint16_t iso_interval;
1702 uint16_t max_sdu;
1703 uint16_t max_pdu;
1704 uint8_t status;
1705
1706 sdu_interval = sys_get_le24(cmd->sdu_interval);
1707 iso_interval = sys_le16_to_cpu(cmd->iso_interval);
1708 max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1709 max_pdu = sys_le16_to_cpu(cmd->max_pdu);
1710
1711 status = ll_big_test_create(cmd->big_handle, cmd->adv_handle,
1712 cmd->num_bis, sdu_interval, iso_interval,
1713 cmd->nse, max_sdu, max_pdu, cmd->phy,
1714 cmd->packing, cmd->framing, cmd->bn,
1715 cmd->irc, cmd->pto, cmd->encryption,
1716 cmd->bcode);
1717
1718 *evt = cmd_status(status);
1719 }
1720
le_terminate_big(struct net_buf * buf,struct net_buf ** evt)1721 static void le_terminate_big(struct net_buf *buf, struct net_buf **evt)
1722 {
1723 struct bt_hci_cp_le_terminate_big *cmd = (void *)buf->data;
1724 uint8_t status;
1725
1726 status = ll_big_terminate(cmd->big_handle, cmd->reason);
1727
1728 *evt = cmd_status(status);
1729 }
1730 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1731 #endif /* CONFIG_BT_BROADCASTER */
1732
1733 #if defined(CONFIG_BT_OBSERVER)
le_set_scan_param(struct net_buf * buf,struct net_buf ** evt)1734 static void le_set_scan_param(struct net_buf *buf, struct net_buf **evt)
1735 {
1736 struct bt_hci_cp_le_set_scan_param *cmd = (void *)buf->data;
1737 uint16_t interval;
1738 uint16_t window;
1739 uint8_t status;
1740
1741 if (adv_cmds_legacy_check(evt)) {
1742 return;
1743 }
1744
1745 interval = sys_le16_to_cpu(cmd->interval);
1746 window = sys_le16_to_cpu(cmd->window);
1747
1748 status = ll_scan_params_set(cmd->scan_type, interval, window,
1749 cmd->addr_type, cmd->filter_policy);
1750
1751 *evt = cmd_complete_status(status);
1752 }
1753
le_set_scan_enable(struct net_buf * buf,struct net_buf ** evt)1754 static void le_set_scan_enable(struct net_buf *buf, struct net_buf **evt)
1755 {
1756 struct bt_hci_cp_le_set_scan_enable *cmd = (void *)buf->data;
1757 uint8_t status;
1758
1759 if (adv_cmds_legacy_check(evt)) {
1760 return;
1761 }
1762
1763 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
1764 /* Initialize duplicate filtering */
1765 if (cmd->enable && cmd->filter_dup) {
1766 if (0) {
1767
1768 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1769 } else if (dup_count == DUP_FILTER_DISABLED) {
1770 dup_scan = true;
1771
1772 /* All entries reset */
1773 dup_count = 0;
1774 dup_curr = 0U;
1775 } else if (!dup_scan) {
1776 dup_scan = true;
1777 dup_ext_adv_reset();
1778 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1779
1780 } else {
1781 /* All entries reset */
1782 dup_count = 0;
1783 dup_curr = 0U;
1784 }
1785 } else {
1786 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1787 dup_scan = false;
1788 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1789 dup_count = DUP_FILTER_DISABLED;
1790 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1791 }
1792 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
1793
1794 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1795 status = ll_scan_enable(cmd->enable, 0, 0);
1796 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1797 status = ll_scan_enable(cmd->enable);
1798 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1799
1800 /* NOTE: As filter duplicates is implemented here in HCI source code,
1801 * enabling of already enabled scanning shall succeed after
1802 * updates to filter duplicates is handled in the above
1803 * statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
1804 */
1805 if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
1806 (status == BT_HCI_ERR_CMD_DISALLOWED)) {
1807 status = BT_HCI_ERR_SUCCESS;
1808 }
1809
1810 *evt = cmd_complete_status(status);
1811 }
1812
1813 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
le_big_create_sync(struct net_buf * buf,struct net_buf ** evt)1814 static void le_big_create_sync(struct net_buf *buf, struct net_buf **evt)
1815 {
1816 struct bt_hci_cp_le_big_create_sync *cmd = (void *)buf->data;
1817 uint8_t status;
1818 uint16_t sync_handle;
1819 uint16_t sync_timeout;
1820
1821 sync_handle = sys_le16_to_cpu(cmd->sync_handle);
1822 sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
1823
1824 status = ll_big_sync_create(cmd->big_handle, sync_handle,
1825 cmd->encryption, cmd->bcode, cmd->mse,
1826 sync_timeout, cmd->num_bis, cmd->bis);
1827
1828 *evt = cmd_status(status);
1829 }
1830
1831
le_big_terminate_sync(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1832 static void le_big_terminate_sync(struct net_buf *buf, struct net_buf **evt,
1833 void **node_rx)
1834 {
1835 struct bt_hci_cp_le_big_terminate_sync *cmd = (void *)buf->data;
1836 struct bt_hci_rp_le_big_terminate_sync *rp;
1837 uint8_t big_handle;
1838 uint8_t status;
1839
1840 big_handle = cmd->big_handle;
1841 status = ll_big_sync_terminate(big_handle, node_rx);
1842
1843 rp = hci_cmd_complete(evt, sizeof(*rp));
1844 rp->status = status;
1845 rp->big_handle = big_handle;
1846 }
1847 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1848 #endif /* CONFIG_BT_OBSERVER */
1849
1850 #if defined(CONFIG_BT_CENTRAL)
check_cconn_params(bool ext,uint16_t scan_interval,uint16_t scan_window,uint16_t conn_interval_max,uint16_t conn_latency,uint16_t supervision_timeout)1851 static uint8_t check_cconn_params(bool ext, uint16_t scan_interval,
1852 uint16_t scan_window,
1853 uint16_t conn_interval_max,
1854 uint16_t conn_latency,
1855 uint16_t supervision_timeout)
1856 {
1857 if (scan_interval < 0x0004 || scan_window < 0x0004 ||
1858 (!ext && (scan_interval > 0x4000 || scan_window > 0x4000))) {
1859 return BT_HCI_ERR_INVALID_PARAM;
1860 }
1861
1862 if (conn_interval_max < 0x0006 || conn_interval_max > 0x0C80) {
1863 return BT_HCI_ERR_INVALID_PARAM;
1864 }
1865
1866 if (conn_latency > 0x01F3) {
1867 return BT_HCI_ERR_INVALID_PARAM;
1868 }
1869
1870 if (supervision_timeout < 0x000A || supervision_timeout > 0x0C80) {
1871 return BT_HCI_ERR_INVALID_PARAM;
1872 }
1873
1874 /* sto * 10ms > (1 + lat) * ci * 1.25ms * 2
1875 * sto * 10 > (1 + lat) * ci * 2.5
1876 * sto * 2 > (1 + lat) * ci * 0.5
1877 * sto * 4 > (1 + lat) * ci
1878 */
1879 if ((supervision_timeout << 2) <= ((1 + conn_latency) *
1880 conn_interval_max)) {
1881 return BT_HCI_ERR_INVALID_PARAM;
1882 }
1883
1884 return 0;
1885 }
1886
le_create_connection(struct net_buf * buf,struct net_buf ** evt)1887 static void le_create_connection(struct net_buf *buf, struct net_buf **evt)
1888 {
1889 struct bt_hci_cp_le_create_conn *cmd = (void *)buf->data;
1890 uint16_t supervision_timeout;
1891 uint16_t conn_interval_max;
1892 uint16_t scan_interval;
1893 uint16_t conn_latency;
1894 uint16_t scan_window;
1895 uint8_t status;
1896
1897 if (adv_cmds_legacy_check(NULL)) {
1898 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
1899 return;
1900 }
1901
1902 scan_interval = sys_le16_to_cpu(cmd->scan_interval);
1903 scan_window = sys_le16_to_cpu(cmd->scan_window);
1904 conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
1905 conn_latency = sys_le16_to_cpu(cmd->conn_latency);
1906 supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
1907
1908 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
1909 status = check_cconn_params(false, scan_interval,
1910 scan_window,
1911 conn_interval_max,
1912 conn_latency,
1913 supervision_timeout);
1914 if (status) {
1915 *evt = cmd_status(status);
1916 return;
1917 }
1918 }
1919
1920 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1921 status = ll_create_connection(scan_interval, scan_window,
1922 cmd->filter_policy,
1923 cmd->peer_addr.type,
1924 &cmd->peer_addr.a.val[0],
1925 cmd->own_addr_type, conn_interval_max,
1926 conn_latency, supervision_timeout,
1927 PHY_LEGACY);
1928 if (status) {
1929 *evt = cmd_status(status);
1930 return;
1931 }
1932
1933 status = ll_connect_enable(0U);
1934
1935 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1936 status = ll_create_connection(scan_interval, scan_window,
1937 cmd->filter_policy,
1938 cmd->peer_addr.type,
1939 &cmd->peer_addr.a.val[0],
1940 cmd->own_addr_type, conn_interval_max,
1941 conn_latency, supervision_timeout);
1942 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1943
1944 *evt = cmd_status(status);
1945 }
1946
le_create_conn_cancel(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1947 static void le_create_conn_cancel(struct net_buf *buf, struct net_buf **evt,
1948 void **node_rx)
1949 {
1950 uint8_t status;
1951
1952 status = ll_connect_disable(node_rx);
1953
1954 *evt = cmd_complete_status(status);
1955 }
1956
le_set_host_chan_classif(struct net_buf * buf,struct net_buf ** evt)1957 static void le_set_host_chan_classif(struct net_buf *buf, struct net_buf **evt)
1958 {
1959 struct bt_hci_cp_le_set_host_chan_classif *cmd = (void *)buf->data;
1960 uint8_t status;
1961
1962 status = ll_chm_update(&cmd->ch_map[0]);
1963
1964 *evt = cmd_complete_status(status);
1965 }
1966
1967 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_start_encryption(struct net_buf * buf,struct net_buf ** evt)1968 static void le_start_encryption(struct net_buf *buf, struct net_buf **evt)
1969 {
1970 struct bt_hci_cp_le_start_encryption *cmd = (void *)buf->data;
1971 uint16_t handle;
1972 uint8_t status;
1973
1974 handle = sys_le16_to_cpu(cmd->handle);
1975 status = ll_enc_req_send(handle,
1976 (uint8_t *)&cmd->rand,
1977 (uint8_t *)&cmd->ediv,
1978 &cmd->ltk[0]);
1979
1980 *evt = cmd_status(status);
1981 }
1982 #endif /* CONFIG_BT_CTLR_LE_ENC */
1983
1984 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
le_set_cig_parameters(struct net_buf * buf,struct net_buf ** evt)1985 static void le_set_cig_parameters(struct net_buf *buf, struct net_buf **evt)
1986 {
1987 struct bt_hci_cp_le_set_cig_params *cmd = (void *)buf->data;
1988 struct bt_hci_rp_le_set_cig_params *rp;
1989 uint32_t c_interval;
1990 uint32_t p_interval;
1991 uint16_t c_latency;
1992 uint16_t p_latency;
1993 uint8_t cis_count;
1994 uint8_t cig_id;
1995 uint8_t status;
1996 uint8_t i;
1997
1998 c_interval = sys_get_le24(cmd->c_interval);
1999 p_interval = sys_get_le24(cmd->p_interval);
2000 c_latency = sys_le16_to_cpu(cmd->c_latency);
2001 p_latency = sys_le16_to_cpu(cmd->p_latency);
2002
2003 cig_id = cmd->cig_id;
2004 cis_count = cmd->num_cis;
2005
2006 /* Create CIG or start modifying existing CIG */
2007 status = ll_cig_parameters_open(cig_id, c_interval, p_interval,
2008 cmd->sca, cmd->packing, cmd->framing,
2009 c_latency, p_latency, cis_count);
2010
2011 /* Configure individual CISes */
2012 for (i = 0; !status && i < cis_count; i++) {
2013 struct bt_hci_cis_params *params = &cmd->cis[i];
2014 uint16_t c_sdu;
2015 uint16_t p_sdu;
2016
2017 c_sdu = sys_le16_to_cpu(params->c_sdu);
2018 p_sdu = sys_le16_to_cpu(params->p_sdu);
2019
2020 status = ll_cis_parameters_set(params->cis_id, c_sdu, p_sdu,
2021 params->c_phy, params->p_phy,
2022 params->c_rtn, params->p_rtn);
2023 }
2024
2025 rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2026 rp->cig_id = cig_id;
2027
2028 /* Only apply parameters if all went well */
2029 if (!status) {
2030 uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2031
2032 status = ll_cig_parameters_commit(cig_id, handles);
2033
2034 if (status == BT_HCI_ERR_SUCCESS) {
2035 for (uint8_t j = 0; j < cis_count; j++) {
2036 rp->handle[j] = sys_cpu_to_le16(handles[j]);
2037 }
2038 }
2039 }
2040
2041 rp->num_handles = status ? 0U : cis_count;
2042 rp->status = status;
2043 }
2044
le_set_cig_params_test(struct net_buf * buf,struct net_buf ** evt)2045 static void le_set_cig_params_test(struct net_buf *buf, struct net_buf **evt)
2046 {
2047 struct bt_hci_cp_le_set_cig_params_test *cmd = (void *)buf->data;
2048 struct bt_hci_rp_le_set_cig_params_test *rp;
2049
2050 uint32_t c_interval;
2051 uint32_t p_interval;
2052 uint16_t iso_interval;
2053 uint8_t cis_count;
2054 uint8_t cig_id;
2055 uint8_t status;
2056 uint8_t i;
2057
2058 c_interval = sys_get_le24(cmd->c_interval);
2059 p_interval = sys_get_le24(cmd->p_interval);
2060 iso_interval = sys_le16_to_cpu(cmd->iso_interval);
2061
2062 cig_id = cmd->cig_id;
2063 cis_count = cmd->num_cis;
2064
2065 /* Create CIG or start modifying existing CIG */
2066 status = ll_cig_parameters_test_open(cig_id, c_interval,
2067 p_interval, cmd->c_ft,
2068 cmd->p_ft, iso_interval,
2069 cmd->sca, cmd->packing,
2070 cmd->framing,
2071 cis_count);
2072
2073 /* Configure individual CISes */
2074 for (i = 0; !status && i < cis_count; i++) {
2075 struct bt_hci_cis_params_test *params = &cmd->cis[i];
2076 uint16_t c_sdu;
2077 uint16_t p_sdu;
2078 uint16_t c_pdu;
2079 uint16_t p_pdu;
2080 uint8_t nse;
2081
2082 nse = params->nse;
2083 c_sdu = sys_le16_to_cpu(params->c_sdu);
2084 p_sdu = sys_le16_to_cpu(params->p_sdu);
2085 c_pdu = sys_le16_to_cpu(params->c_pdu);
2086 p_pdu = sys_le16_to_cpu(params->p_pdu);
2087
2088 status = ll_cis_parameters_test_set(params->cis_id, nse,
2089 c_sdu, p_sdu,
2090 c_pdu, p_pdu,
2091 params->c_phy,
2092 params->p_phy,
2093 params->c_bn,
2094 params->p_bn);
2095 }
2096
2097 rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2098 rp->cig_id = cig_id;
2099
2100 /* Only apply parameters if all went well */
2101 if (!status) {
2102 uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2103
2104 status = ll_cig_parameters_commit(cig_id, handles);
2105
2106 if (status == BT_HCI_ERR_SUCCESS) {
2107 for (uint8_t j = 0; j < cis_count; j++) {
2108 rp->handle[j] = sys_cpu_to_le16(handles[j]);
2109 }
2110 }
2111 }
2112
2113 rp->num_handles = status ? 0U : cis_count;
2114 rp->status = status;
2115 }
2116
le_create_cis(struct net_buf * buf,struct net_buf ** evt)2117 static void le_create_cis(struct net_buf *buf, struct net_buf **evt)
2118 {
2119 uint16_t handle_used[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP] = {0};
2120 struct bt_hci_cp_le_create_cis *cmd = (void *)buf->data;
2121 uint8_t status;
2122 uint8_t i;
2123
2124 /*
2125 * Only create a CIS if the Isochronous Channels (Host Support) feature bit
2126 * is set. Refer to BT Spec v5.4 Vol 6 Part B Section 4.6.33.1.
2127 */
2128 if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS))) {
2129 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2130 return;
2131 }
2132
2133 /*
2134 * Creating new CISes is disallowed until all previous CIS
2135 * established events have been generated
2136 */
2137 if (cis_pending_count) {
2138 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2139 return;
2140 }
2141
2142 /* Check all handles before actually starting to create CISes */
2143 status = 0x00;
2144 for (i = 0; !status && i < cmd->num_cis; i++) {
2145 uint16_t cis_handle;
2146 uint16_t acl_handle;
2147 uint8_t cis_idx;
2148
2149 cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2150 acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2151
2152 cis_idx = LL_CIS_IDX_FROM_HANDLE(cis_handle);
2153 if (handle_used[cis_idx]) {
2154 /* Handle must be unique in request */
2155 status = BT_HCI_ERR_INVALID_PARAM;
2156 break;
2157 }
2158
2159 handle_used[cis_idx]++;
2160 status = ll_cis_create_check(cis_handle, acl_handle);
2161 }
2162
2163 if (status) {
2164 *evt = cmd_status(status);
2165 return;
2166 }
2167
2168 /*
2169 * Actually create CISes, any errors are to be reported
2170 * through CIS established events
2171 */
2172 cis_pending_count = cmd->num_cis;
2173 for (i = 0; i < cmd->num_cis; i++) {
2174 uint16_t cis_handle;
2175 uint16_t acl_handle;
2176
2177 cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2178 acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2179 ll_cis_create(cis_handle, acl_handle);
2180 }
2181
2182 *evt = cmd_status(status);
2183 }
2184
le_remove_cig(struct net_buf * buf,struct net_buf ** evt)2185 static void le_remove_cig(struct net_buf *buf, struct net_buf **evt)
2186 {
2187 struct bt_hci_cp_le_remove_cig *cmd = (void *)buf->data;
2188 struct bt_hci_rp_le_remove_cig *rp;
2189 uint8_t status;
2190
2191 status = ll_cig_remove(cmd->cig_id);
2192
2193 rp = hci_cmd_complete(evt, sizeof(*rp));
2194 rp->status = status;
2195 rp->cig_id = cmd->cig_id;
2196 }
2197 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
2198
2199 #endif /* CONFIG_BT_CENTRAL */
2200
2201 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_transmit_test(struct net_buf * buf,struct net_buf ** evt)2202 static void le_iso_transmit_test(struct net_buf *buf, struct net_buf **evt)
2203 {
2204 struct bt_hci_cp_le_iso_transmit_test *cmd = (void *)buf->data;
2205 struct bt_hci_rp_le_iso_transmit_test *rp;
2206 uint16_t handle;
2207 uint8_t status;
2208
2209 handle = sys_le16_to_cpu(cmd->handle);
2210
2211 status = ll_iso_transmit_test(handle, cmd->payload_type);
2212
2213 rp = hci_cmd_complete(evt, sizeof(*rp));
2214 rp->status = status;
2215 rp->handle = sys_cpu_to_le16(handle);
2216 }
2217
le_read_iso_tx_sync(struct net_buf * buf,struct net_buf ** evt)2218 static void le_read_iso_tx_sync(struct net_buf *buf, struct net_buf **evt)
2219 {
2220 struct bt_hci_cp_le_read_iso_tx_sync *cmd = (void *)buf->data;
2221 struct bt_hci_rp_le_read_iso_tx_sync *rp;
2222 uint16_t handle_le16;
2223 uint32_t timestamp;
2224 uint32_t offset;
2225 uint16_t handle;
2226 uint8_t status;
2227 uint16_t seq;
2228
2229 handle_le16 = cmd->handle;
2230 handle = sys_le16_to_cpu(handle_le16);
2231
2232 status = ll_read_iso_tx_sync(handle, &seq, ×tamp, &offset);
2233
2234 rp = hci_cmd_complete(evt, sizeof(*rp));
2235 rp->status = status;
2236 rp->handle = handle_le16;
2237 rp->seq = sys_cpu_to_le16(seq);
2238 rp->timestamp = sys_cpu_to_le32(timestamp);
2239 sys_put_le24(offset, rp->offset);
2240 }
2241 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2242
2243 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_receive_test(struct net_buf * buf,struct net_buf ** evt)2244 static void le_iso_receive_test(struct net_buf *buf, struct net_buf **evt)
2245 {
2246 struct bt_hci_cp_le_iso_receive_test *cmd = (void *)buf->data;
2247 struct bt_hci_rp_le_iso_receive_test *rp;
2248 uint16_t handle;
2249 uint8_t status;
2250
2251 handle = sys_le16_to_cpu(cmd->handle);
2252
2253 status = ll_iso_receive_test(handle, cmd->payload_type);
2254
2255 rp = hci_cmd_complete(evt, sizeof(*rp));
2256 rp->status = status;
2257 rp->handle = sys_cpu_to_le16(handle);
2258 }
2259
le_iso_read_test_counters(struct net_buf * buf,struct net_buf ** evt)2260 static void le_iso_read_test_counters(struct net_buf *buf, struct net_buf **evt)
2261 {
2262 struct bt_hci_cp_le_read_test_counters *cmd = (void *)buf->data;
2263 struct bt_hci_rp_le_read_test_counters *rp;
2264 uint32_t received_cnt;
2265 uint32_t missed_cnt;
2266 uint32_t failed_cnt;
2267 uint16_t handle;
2268 uint8_t status;
2269
2270 handle = sys_le16_to_cpu(cmd->handle);
2271 status = ll_iso_read_test_counters(handle, &received_cnt,
2272 &missed_cnt, &failed_cnt);
2273
2274 rp = hci_cmd_complete(evt, sizeof(*rp));
2275 rp->status = status;
2276 rp->handle = sys_cpu_to_le16(handle);
2277 rp->received_cnt = sys_cpu_to_le32(received_cnt);
2278 rp->missed_cnt = sys_cpu_to_le32(missed_cnt);
2279 rp->failed_cnt = sys_cpu_to_le32(failed_cnt);
2280 }
2281
2282 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
le_read_iso_link_quality(struct net_buf * buf,struct net_buf ** evt)2283 static void le_read_iso_link_quality(struct net_buf *buf, struct net_buf **evt)
2284 {
2285 struct bt_hci_cp_le_read_iso_link_quality *cmd = (void *)buf->data;
2286 struct bt_hci_rp_le_read_iso_link_quality *rp;
2287 uint32_t tx_last_subevent_packets;
2288 uint32_t retransmitted_packets;
2289 uint32_t rx_unreceived_packets;
2290 uint32_t tx_unacked_packets;
2291 uint32_t tx_flushed_packets;
2292 uint32_t crc_error_packets;
2293 uint32_t duplicate_packets;
2294 uint16_t handle_le16;
2295 uint16_t handle;
2296 uint8_t status;
2297
2298 handle_le16 = cmd->handle;
2299 handle = sys_le16_to_cpu(handle_le16);
2300 status = ll_read_iso_link_quality(handle, &tx_unacked_packets,
2301 &tx_flushed_packets,
2302 &tx_last_subevent_packets,
2303 &retransmitted_packets,
2304 &crc_error_packets,
2305 &rx_unreceived_packets,
2306 &duplicate_packets);
2307
2308 rp = hci_cmd_complete(evt, sizeof(*rp));
2309 rp->status = status;
2310 rp->handle = handle_le16;
2311 rp->tx_unacked_packets = sys_cpu_to_le32(tx_unacked_packets);
2312 rp->tx_flushed_packets = sys_cpu_to_le32(tx_flushed_packets);
2313 rp->tx_last_subevent_packets =
2314 sys_cpu_to_le32(tx_last_subevent_packets);
2315 rp->retransmitted_packets = sys_cpu_to_le32(retransmitted_packets);
2316 rp->crc_error_packets = sys_cpu_to_le32(crc_error_packets);
2317 rp->rx_unreceived_packets = sys_cpu_to_le32(rx_unreceived_packets);
2318 rp->duplicate_packets = sys_cpu_to_le32(duplicate_packets);
2319 }
2320 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
2321
2322 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
2323
2324 #if defined(CONFIG_BT_CTLR_ISO)
le_setup_iso_path(struct net_buf * buf,struct net_buf ** evt)2325 static void le_setup_iso_path(struct net_buf *buf, struct net_buf **evt)
2326 {
2327 struct bt_hci_cp_le_setup_iso_path *cmd = (void *)buf->data;
2328 struct bt_hci_rp_le_setup_iso_path *rp;
2329 uint32_t controller_delay;
2330 uint8_t *codec_config;
2331 uint8_t coding_format;
2332 uint16_t vs_codec_id;
2333 uint16_t company_id;
2334 uint16_t handle;
2335 uint8_t status;
2336
2337 handle = sys_le16_to_cpu(cmd->handle);
2338 coding_format = cmd->codec_id.coding_format;
2339 company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
2340 vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
2341 controller_delay = sys_get_le24(cmd->controller_delay);
2342 codec_config = &cmd->codec_config[0];
2343
2344 status = ll_setup_iso_path(handle, cmd->path_dir, cmd->path_id,
2345 coding_format, company_id, vs_codec_id,
2346 controller_delay, cmd->codec_config_len,
2347 codec_config);
2348
2349 rp = hci_cmd_complete(evt, sizeof(*rp));
2350 rp->status = status;
2351 rp->handle = sys_cpu_to_le16(handle);
2352 }
2353
le_remove_iso_path(struct net_buf * buf,struct net_buf ** evt)2354 static void le_remove_iso_path(struct net_buf *buf, struct net_buf **evt)
2355 {
2356 struct bt_hci_cp_le_remove_iso_path *cmd = (void *)buf->data;
2357 struct bt_hci_rp_le_remove_iso_path *rp;
2358 uint16_t handle;
2359 uint8_t status;
2360
2361 handle = sys_le16_to_cpu(cmd->handle);
2362
2363 status = ll_remove_iso_path(handle, cmd->path_dir);
2364
2365 rp = hci_cmd_complete(evt, sizeof(*rp));
2366 rp->status = status;
2367 rp->handle = sys_cpu_to_le16(handle);
2368 }
2369
le_iso_test_end(struct net_buf * buf,struct net_buf ** evt)2370 static void le_iso_test_end(struct net_buf *buf, struct net_buf **evt)
2371 {
2372 struct bt_hci_cp_le_iso_test_end *cmd = (void *)buf->data;
2373 struct bt_hci_rp_le_iso_test_end *rp;
2374 uint32_t received_cnt;
2375 uint32_t missed_cnt;
2376 uint32_t failed_cnt;
2377 uint16_t handle;
2378 uint8_t status;
2379
2380 handle = sys_le16_to_cpu(cmd->handle);
2381 status = ll_iso_test_end(handle, &received_cnt, &missed_cnt,
2382 &failed_cnt);
2383
2384 rp = hci_cmd_complete(evt, sizeof(*rp));
2385 rp->status = status;
2386 rp->handle = sys_cpu_to_le16(handle);
2387 rp->received_cnt = sys_cpu_to_le32(received_cnt);
2388 rp->missed_cnt = sys_cpu_to_le32(missed_cnt);
2389 rp->failed_cnt = sys_cpu_to_le32(failed_cnt);
2390 }
2391 #endif /* CONFIG_BT_CTLR_ISO */
2392
2393 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
le_set_host_feature(struct net_buf * buf,struct net_buf ** evt)2394 static void le_set_host_feature(struct net_buf *buf, struct net_buf **evt)
2395 {
2396 struct bt_hci_cp_le_set_host_feature *cmd = (void *)buf->data;
2397 struct bt_hci_rp_le_set_host_feature *rp;
2398 uint8_t status;
2399
2400 status = ll_set_host_feature(cmd->bit_number, cmd->bit_value);
2401
2402 rp = hci_cmd_complete(evt, sizeof(*rp));
2403 rp->status = status;
2404 }
2405 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
2406
2407 #if defined(CONFIG_BT_PERIPHERAL)
2408 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_ltk_req_reply(struct net_buf * buf,struct net_buf ** evt)2409 static void le_ltk_req_reply(struct net_buf *buf, struct net_buf **evt)
2410 {
2411 struct bt_hci_cp_le_ltk_req_reply *cmd = (void *)buf->data;
2412 struct bt_hci_rp_le_ltk_req_reply *rp;
2413 uint16_t handle;
2414 uint8_t status;
2415
2416 handle = sys_le16_to_cpu(cmd->handle);
2417 status = ll_start_enc_req_send(handle, 0x00, &cmd->ltk[0]);
2418
2419 rp = hci_cmd_complete(evt, sizeof(*rp));
2420 rp->status = status;
2421 rp->handle = sys_cpu_to_le16(handle);
2422 }
2423
le_ltk_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2424 static void le_ltk_req_neg_reply(struct net_buf *buf, struct net_buf **evt)
2425 {
2426 struct bt_hci_cp_le_ltk_req_neg_reply *cmd = (void *)buf->data;
2427 struct bt_hci_rp_le_ltk_req_neg_reply *rp;
2428 uint16_t handle;
2429 uint8_t status;
2430
2431 handle = sys_le16_to_cpu(cmd->handle);
2432 status = ll_start_enc_req_send(handle, BT_HCI_ERR_PIN_OR_KEY_MISSING,
2433 NULL);
2434
2435 rp = hci_cmd_complete(evt, sizeof(*rp));
2436 rp->status = status;
2437 rp->handle = sys_le16_to_cpu(handle);
2438 }
2439 #endif /* CONFIG_BT_CTLR_LE_ENC */
2440
2441 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
le_accept_cis(struct net_buf * buf,struct net_buf ** evt)2442 static void le_accept_cis(struct net_buf *buf, struct net_buf **evt)
2443 {
2444 struct bt_hci_cp_le_accept_cis *cmd = (void *)buf->data;
2445 uint16_t handle;
2446 uint8_t status;
2447
2448 handle = sys_le16_to_cpu(cmd->handle);
2449 status = ll_cis_accept(handle);
2450 *evt = cmd_status(status);
2451 }
2452
le_reject_cis(struct net_buf * buf,struct net_buf ** evt)2453 static void le_reject_cis(struct net_buf *buf, struct net_buf **evt)
2454 {
2455 struct bt_hci_cp_le_reject_cis *cmd = (void *)buf->data;
2456 struct bt_hci_rp_le_reject_cis *rp;
2457 uint16_t handle;
2458 uint8_t status;
2459
2460 handle = sys_le16_to_cpu(cmd->handle);
2461 status = ll_cis_reject(handle, cmd->reason);
2462
2463 rp = hci_cmd_complete(evt, sizeof(*rp));
2464 rp->status = status;
2465 rp->handle = sys_cpu_to_le16(handle);
2466 }
2467 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
2468
2469 #endif /* CONFIG_BT_PERIPHERAL */
2470
2471 #if defined(CONFIG_BT_CONN)
2472 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
le_req_peer_sca(struct net_buf * buf,struct net_buf ** evt)2473 static void le_req_peer_sca(struct net_buf *buf, struct net_buf **evt)
2474 {
2475 struct bt_hci_cp_le_req_peer_sca *cmd = (void *)buf->data;
2476 uint16_t handle;
2477 uint8_t status;
2478
2479 handle = sys_le16_to_cpu(cmd->handle);
2480 status = ll_req_peer_sca(handle);
2481
2482 *evt = cmd_status(status);
2483 }
2484 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
2485
2486 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
le_read_remote_features(struct net_buf * buf,struct net_buf ** evt)2487 static void le_read_remote_features(struct net_buf *buf, struct net_buf **evt)
2488 {
2489 struct bt_hci_cp_le_read_remote_features *cmd = (void *)buf->data;
2490 uint16_t handle;
2491 uint8_t status;
2492
2493 handle = sys_le16_to_cpu(cmd->handle);
2494 status = ll_feature_req_send(handle);
2495
2496 *evt = cmd_status(status);
2497 }
2498 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
2499
le_read_chan_map(struct net_buf * buf,struct net_buf ** evt)2500 static void le_read_chan_map(struct net_buf *buf, struct net_buf **evt)
2501 {
2502 struct bt_hci_cp_le_read_chan_map *cmd = (void *)buf->data;
2503 struct bt_hci_rp_le_read_chan_map *rp;
2504 uint16_t handle;
2505 uint8_t status;
2506
2507 handle = sys_le16_to_cpu(cmd->handle);
2508
2509 rp = hci_cmd_complete(evt, sizeof(*rp));
2510
2511 status = ll_chm_get(handle, rp->ch_map);
2512
2513 rp->status = status;
2514 rp->handle = sys_le16_to_cpu(handle);
2515 }
2516
le_conn_update(struct net_buf * buf,struct net_buf ** evt)2517 static void le_conn_update(struct net_buf *buf, struct net_buf **evt)
2518 {
2519 struct hci_cp_le_conn_update *cmd = (void *)buf->data;
2520 uint16_t supervision_timeout;
2521 uint16_t conn_interval_min;
2522 uint16_t conn_interval_max;
2523 uint16_t conn_latency;
2524 uint16_t handle;
2525 uint8_t status;
2526
2527 handle = sys_le16_to_cpu(cmd->handle);
2528 conn_interval_min = sys_le16_to_cpu(cmd->conn_interval_min);
2529 conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
2530 conn_latency = sys_le16_to_cpu(cmd->conn_latency);
2531 supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
2532
2533 status = ll_conn_update(handle, 0, 0, conn_interval_min,
2534 conn_interval_max, conn_latency,
2535 supervision_timeout, NULL);
2536
2537 *evt = cmd_status(status);
2538 }
2539
2540 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
le_conn_param_req_reply(struct net_buf * buf,struct net_buf ** evt)2541 static void le_conn_param_req_reply(struct net_buf *buf, struct net_buf **evt)
2542 {
2543 struct bt_hci_cp_le_conn_param_req_reply *cmd = (void *)buf->data;
2544 struct bt_hci_rp_le_conn_param_req_reply *rp;
2545 uint16_t interval_min;
2546 uint16_t interval_max;
2547 uint16_t latency;
2548 uint16_t timeout;
2549 uint16_t handle;
2550 uint8_t status;
2551
2552 handle = sys_le16_to_cpu(cmd->handle);
2553 interval_min = sys_le16_to_cpu(cmd->interval_min);
2554 interval_max = sys_le16_to_cpu(cmd->interval_max);
2555 latency = sys_le16_to_cpu(cmd->latency);
2556 timeout = sys_le16_to_cpu(cmd->timeout);
2557
2558 status = ll_conn_update(handle, 2, 0, interval_min, interval_max,
2559 latency, timeout, NULL);
2560
2561 rp = hci_cmd_complete(evt, sizeof(*rp));
2562 rp->status = status;
2563 rp->handle = sys_cpu_to_le16(handle);
2564 }
2565
le_conn_param_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2566 static void le_conn_param_req_neg_reply(struct net_buf *buf,
2567 struct net_buf **evt)
2568 {
2569 struct bt_hci_cp_le_conn_param_req_neg_reply *cmd = (void *)buf->data;
2570 struct bt_hci_rp_le_conn_param_req_neg_reply *rp;
2571 uint16_t handle;
2572 uint8_t status;
2573
2574 handle = sys_le16_to_cpu(cmd->handle);
2575 status = ll_conn_update(handle, 2, cmd->reason, 0, 0, 0, 0, NULL);
2576
2577 rp = hci_cmd_complete(evt, sizeof(*rp));
2578 rp->status = status;
2579 rp->handle = sys_cpu_to_le16(handle);
2580 }
2581 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2582
2583 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
le_set_data_len(struct net_buf * buf,struct net_buf ** evt)2584 static void le_set_data_len(struct net_buf *buf, struct net_buf **evt)
2585 {
2586 struct bt_hci_cp_le_set_data_len *cmd = (void *)buf->data;
2587 struct bt_hci_rp_le_set_data_len *rp;
2588 uint16_t tx_octets;
2589 uint16_t tx_time;
2590 uint16_t handle;
2591 uint8_t status;
2592
2593 handle = sys_le16_to_cpu(cmd->handle);
2594 tx_octets = sys_le16_to_cpu(cmd->tx_octets);
2595 tx_time = sys_le16_to_cpu(cmd->tx_time);
2596 status = ll_length_req_send(handle, tx_octets, tx_time);
2597
2598 rp = hci_cmd_complete(evt, sizeof(*rp));
2599 rp->status = status;
2600 rp->handle = sys_cpu_to_le16(handle);
2601 }
2602
le_read_default_data_len(struct net_buf * buf,struct net_buf ** evt)2603 static void le_read_default_data_len(struct net_buf *buf, struct net_buf **evt)
2604 {
2605 struct bt_hci_rp_le_read_default_data_len *rp;
2606 uint16_t max_tx_octets;
2607 uint16_t max_tx_time;
2608
2609 rp = hci_cmd_complete(evt, sizeof(*rp));
2610
2611 ll_length_default_get(&max_tx_octets, &max_tx_time);
2612
2613 rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2614 rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2615 rp->status = 0x00;
2616 }
2617
le_write_default_data_len(struct net_buf * buf,struct net_buf ** evt)2618 static void le_write_default_data_len(struct net_buf *buf,
2619 struct net_buf **evt)
2620 {
2621 struct bt_hci_cp_le_write_default_data_len *cmd = (void *)buf->data;
2622 uint16_t max_tx_octets;
2623 uint16_t max_tx_time;
2624 uint8_t status;
2625
2626 max_tx_octets = sys_le16_to_cpu(cmd->max_tx_octets);
2627 max_tx_time = sys_le16_to_cpu(cmd->max_tx_time);
2628 status = ll_length_default_set(max_tx_octets, max_tx_time);
2629
2630 *evt = cmd_complete_status(status);
2631 }
2632
le_read_max_data_len(struct net_buf * buf,struct net_buf ** evt)2633 static void le_read_max_data_len(struct net_buf *buf, struct net_buf **evt)
2634 {
2635 struct bt_hci_rp_le_read_max_data_len *rp;
2636 uint16_t max_tx_octets;
2637 uint16_t max_tx_time;
2638 uint16_t max_rx_octets;
2639 uint16_t max_rx_time;
2640
2641 rp = hci_cmd_complete(evt, sizeof(*rp));
2642
2643 ll_length_max_get(&max_tx_octets, &max_tx_time,
2644 &max_rx_octets, &max_rx_time);
2645
2646 rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2647 rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2648 rp->max_rx_octets = sys_cpu_to_le16(max_rx_octets);
2649 rp->max_rx_time = sys_cpu_to_le16(max_rx_time);
2650 rp->status = 0x00;
2651 }
2652 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2653
2654 #if defined(CONFIG_BT_CTLR_PHY)
le_read_phy(struct net_buf * buf,struct net_buf ** evt)2655 static void le_read_phy(struct net_buf *buf, struct net_buf **evt)
2656 {
2657 struct bt_hci_cp_le_read_phy *cmd = (void *)buf->data;
2658 struct bt_hci_rp_le_read_phy *rp;
2659 uint16_t handle;
2660 uint8_t status;
2661
2662 handle = sys_le16_to_cpu(cmd->handle);
2663
2664 rp = hci_cmd_complete(evt, sizeof(*rp));
2665
2666 status = ll_phy_get(handle, &rp->tx_phy, &rp->rx_phy);
2667
2668 rp->status = status;
2669 rp->handle = sys_cpu_to_le16(handle);
2670 rp->tx_phy = find_lsb_set(rp->tx_phy);
2671 rp->rx_phy = find_lsb_set(rp->rx_phy);
2672 }
2673
le_set_default_phy(struct net_buf * buf,struct net_buf ** evt)2674 static void le_set_default_phy(struct net_buf *buf, struct net_buf **evt)
2675 {
2676 struct bt_hci_cp_le_set_default_phy *cmd = (void *)buf->data;
2677 uint8_t status;
2678
2679 if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2680 cmd->tx_phys = 0x07;
2681 }
2682 if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2683 cmd->rx_phys = 0x07;
2684 }
2685
2686 status = ll_phy_default_set(cmd->tx_phys, cmd->rx_phys);
2687
2688 *evt = cmd_complete_status(status);
2689 }
2690
le_set_phy(struct net_buf * buf,struct net_buf ** evt)2691 static void le_set_phy(struct net_buf *buf, struct net_buf **evt)
2692 {
2693 struct bt_hci_cp_le_set_phy *cmd = (void *)buf->data;
2694 uint16_t phy_opts;
2695 uint8_t mask_phys;
2696 uint16_t handle;
2697 uint8_t status;
2698
2699 handle = sys_le16_to_cpu(cmd->handle);
2700 phy_opts = sys_le16_to_cpu(cmd->phy_opts);
2701
2702 mask_phys = BT_HCI_LE_PHY_PREFER_1M;
2703 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_2M)) {
2704 mask_phys |= BT_HCI_LE_PHY_PREFER_2M;
2705 }
2706 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
2707 mask_phys |= BT_HCI_LE_PHY_PREFER_CODED;
2708 }
2709
2710 if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2711 cmd->tx_phys |= mask_phys;
2712 }
2713 if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2714 cmd->rx_phys |= mask_phys;
2715 }
2716
2717 if ((cmd->tx_phys | cmd->rx_phys) & ~mask_phys) {
2718 *evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
2719
2720 return;
2721 }
2722
2723 if (!(cmd->tx_phys & 0x07) ||
2724 !(cmd->rx_phys & 0x07)) {
2725 *evt = cmd_status(BT_HCI_ERR_INVALID_PARAM);
2726
2727 return;
2728 }
2729
2730 if (phy_opts & 0x03) {
2731 phy_opts -= 1U;
2732 phy_opts &= 1;
2733 } else {
2734 phy_opts = 0U;
2735 }
2736
2737 status = ll_phy_req_send(handle, cmd->tx_phys, phy_opts,
2738 cmd->rx_phys);
2739
2740 *evt = cmd_status(status);
2741 }
2742 #endif /* CONFIG_BT_CTLR_PHY */
2743 #endif /* CONFIG_BT_CONN */
2744
2745 #if defined(CONFIG_BT_CTLR_PRIVACY)
le_add_dev_to_rl(struct net_buf * buf,struct net_buf ** evt)2746 static void le_add_dev_to_rl(struct net_buf *buf, struct net_buf **evt)
2747 {
2748 struct bt_hci_cp_le_add_dev_to_rl *cmd = (void *)buf->data;
2749 uint8_t status;
2750
2751 status = ll_rl_add(&cmd->peer_id_addr, cmd->peer_irk, cmd->local_irk);
2752
2753 *evt = cmd_complete_status(status);
2754 }
2755
le_rem_dev_from_rl(struct net_buf * buf,struct net_buf ** evt)2756 static void le_rem_dev_from_rl(struct net_buf *buf, struct net_buf **evt)
2757 {
2758 struct bt_hci_cp_le_rem_dev_from_rl *cmd = (void *)buf->data;
2759 uint8_t status;
2760
2761 status = ll_rl_remove(&cmd->peer_id_addr);
2762
2763 *evt = cmd_complete_status(status);
2764 }
2765
le_clear_rl(struct net_buf * buf,struct net_buf ** evt)2766 static void le_clear_rl(struct net_buf *buf, struct net_buf **evt)
2767 {
2768 uint8_t status;
2769
2770 status = ll_rl_clear();
2771
2772 *evt = cmd_complete_status(status);
2773 }
2774
le_read_rl_size(struct net_buf * buf,struct net_buf ** evt)2775 static void le_read_rl_size(struct net_buf *buf, struct net_buf **evt)
2776 {
2777 struct bt_hci_rp_le_read_rl_size *rp;
2778
2779 rp = hci_cmd_complete(evt, sizeof(*rp));
2780
2781 rp->rl_size = ll_rl_size_get();
2782 rp->status = 0x00;
2783 }
2784
le_read_peer_rpa(struct net_buf * buf,struct net_buf ** evt)2785 static void le_read_peer_rpa(struct net_buf *buf, struct net_buf **evt)
2786 {
2787 struct bt_hci_cp_le_read_peer_rpa *cmd = (void *)buf->data;
2788 struct bt_hci_rp_le_read_peer_rpa *rp;
2789 bt_addr_le_t peer_id_addr;
2790
2791 bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2792 rp = hci_cmd_complete(evt, sizeof(*rp));
2793
2794 rp->status = ll_rl_crpa_get(&peer_id_addr, &rp->peer_rpa);
2795 }
2796
le_read_local_rpa(struct net_buf * buf,struct net_buf ** evt)2797 static void le_read_local_rpa(struct net_buf *buf, struct net_buf **evt)
2798 {
2799 struct bt_hci_cp_le_read_local_rpa *cmd = (void *)buf->data;
2800 struct bt_hci_rp_le_read_local_rpa *rp;
2801 bt_addr_le_t peer_id_addr;
2802
2803 bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2804 rp = hci_cmd_complete(evt, sizeof(*rp));
2805
2806 rp->status = ll_rl_lrpa_get(&peer_id_addr, &rp->local_rpa);
2807 }
2808
le_set_addr_res_enable(struct net_buf * buf,struct net_buf ** evt)2809 static void le_set_addr_res_enable(struct net_buf *buf, struct net_buf **evt)
2810 {
2811 struct bt_hci_cp_le_set_addr_res_enable *cmd = (void *)buf->data;
2812 uint8_t status;
2813
2814 status = ll_rl_enable(cmd->enable);
2815
2816 *evt = cmd_complete_status(status);
2817 }
2818
le_set_rpa_timeout(struct net_buf * buf,struct net_buf ** evt)2819 static void le_set_rpa_timeout(struct net_buf *buf, struct net_buf **evt)
2820 {
2821 struct bt_hci_cp_le_set_rpa_timeout *cmd = (void *)buf->data;
2822 uint16_t timeout = sys_le16_to_cpu(cmd->rpa_timeout);
2823
2824 ll_rl_timeout_set(timeout);
2825
2826 *evt = cmd_complete_status(0x00);
2827 }
2828
le_set_privacy_mode(struct net_buf * buf,struct net_buf ** evt)2829 static void le_set_privacy_mode(struct net_buf *buf, struct net_buf **evt)
2830 {
2831 struct bt_hci_cp_le_set_privacy_mode *cmd = (void *)buf->data;
2832 uint8_t status;
2833
2834 status = ll_priv_mode_set(&cmd->id_addr, cmd->mode);
2835
2836 *evt = cmd_complete_status(status);
2837 }
2838 #endif /* CONFIG_BT_CTLR_PRIVACY */
2839
le_read_tx_power(struct net_buf * buf,struct net_buf ** evt)2840 static void le_read_tx_power(struct net_buf *buf, struct net_buf **evt)
2841 {
2842 struct bt_hci_rp_le_read_tx_power *rp;
2843
2844 rp = hci_cmd_complete(evt, sizeof(*rp));
2845 rp->status = 0x00;
2846 ll_tx_pwr_get(&rp->min_tx_power, &rp->max_tx_power);
2847 }
2848
2849 #if defined(CONFIG_BT_CTLR_DF)
2850 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
le_df_set_cl_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)2851 static void le_df_set_cl_cte_tx_params(struct net_buf *buf,
2852 struct net_buf **evt)
2853 {
2854 struct bt_hci_cp_le_set_cl_cte_tx_params *cmd = (void *)buf->data;
2855 uint8_t adv_handle;
2856 uint8_t status;
2857
2858 if (adv_cmds_ext_check(evt)) {
2859 return;
2860 }
2861
2862 status = ll_adv_set_by_hci_handle_get(cmd->handle, &adv_handle);
2863 if (status) {
2864 *evt = cmd_complete_status(status);
2865 return;
2866 }
2867
2868 status = ll_df_set_cl_cte_tx_params(adv_handle, cmd->cte_len,
2869 cmd->cte_type, cmd->cte_count,
2870 cmd->switch_pattern_len,
2871 cmd->ant_ids);
2872
2873 *evt = cmd_complete_status(status);
2874 }
2875
le_df_set_cl_cte_enable(struct net_buf * buf,struct net_buf ** evt)2876 static void le_df_set_cl_cte_enable(struct net_buf *buf, struct net_buf **evt)
2877 {
2878 struct bt_hci_cp_le_set_cl_cte_tx_enable *cmd = (void *)buf->data;
2879 uint8_t status;
2880 uint8_t handle;
2881
2882 if (adv_cmds_ext_check(evt)) {
2883 return;
2884 }
2885
2886 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
2887 if (status) {
2888 *evt = cmd_complete_status(status);
2889 return;
2890 }
2891
2892 status = ll_df_set_cl_cte_tx_enable(handle, cmd->cte_enable);
2893
2894 *evt = cmd_complete_status(status);
2895 }
2896 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2897
2898 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
le_df_set_cl_iq_sampling_enable(struct net_buf * buf,struct net_buf ** evt)2899 static void le_df_set_cl_iq_sampling_enable(struct net_buf *buf, struct net_buf **evt)
2900 {
2901 struct bt_hci_cp_le_set_cl_cte_sampling_enable *cmd = (void *)buf->data;
2902 struct bt_hci_rp_le_set_cl_cte_sampling_enable *rp;
2903 uint16_t sync_handle;
2904 uint8_t status;
2905
2906 sync_handle = sys_le16_to_cpu(cmd->sync_handle);
2907
2908 status = ll_df_set_cl_iq_sampling_enable(sync_handle,
2909 cmd->sampling_enable,
2910 cmd->slot_durations,
2911 cmd->max_sampled_cte,
2912 cmd->switch_pattern_len,
2913 cmd->ant_ids);
2914
2915 rp = hci_cmd_complete(evt, sizeof(*rp));
2916
2917 rp->status = status;
2918 rp->sync_handle = sys_cpu_to_le16(sync_handle);
2919 }
2920 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2921
2922 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) || \
2923 defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
iq_convert_12_to_8_bits(int16_t data)2924 static int8_t iq_convert_12_to_8_bits(int16_t data)
2925 {
2926 if (data == IQ_SAMPLE_SATURATED_16_BIT) {
2927 return IQ_SAMPLE_SATURATED_8_BIT;
2928 }
2929
2930 #if defined(CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB)
2931 return (data > INT8_MAX || data < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2932 : IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2933 #else /* !CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2934 int16_t data_conv = IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2935
2936 return (data_conv > INT8_MAX || data_conv < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2937 : (int8_t)data_conv;
2938 #endif /* CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2939 }
2940 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT
2941 * || CONFIG_BT_CTLR_DF_CONN_CTE_RX
2942 */
2943
2944 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
le_df_connectionless_iq_report(struct pdu_data * pdu_rx,struct node_rx_pdu * node_rx,struct net_buf * buf)2945 static void le_df_connectionless_iq_report(struct pdu_data *pdu_rx,
2946 struct node_rx_pdu *node_rx,
2947 struct net_buf *buf)
2948 {
2949 struct bt_hci_evt_le_connectionless_iq_report *sep;
2950 struct node_rx_iq_report *iq_report;
2951 struct lll_sync *lll;
2952 uint8_t samples_cnt;
2953 int16_t rssi;
2954 uint16_t sync_handle;
2955 uint16_t per_evt_counter;
2956 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
2957 struct ll_sync_set *sync = NULL;
2958 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2959
2960 iq_report = (struct node_rx_iq_report *)node_rx;
2961
2962 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
2963 !(le_event_mask & BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT)) {
2964 return;
2965 }
2966
2967 lll = iq_report->hdr.rx_ftr.param;
2968
2969 /* If there is not LLL context and CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT is enabled
2970 * the controller is in the Direct Test Mode and may generate
2971 * the Connectionless IQ Report.
2972 */
2973 if (!lll && IS_ENABLED(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)) {
2974 /* Set sync_handle to 0x0FFF according to the BT Core 5.3 specification
2975 * Vol 4 7.7.65.21
2976 */
2977 sync_handle = 0x0FFF;
2978 /* Set periodic event counter to 0 since there is not periodic advertising train. */
2979 per_evt_counter = 0;
2980 }
2981
2982 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
2983 else {
2984 sync = HDR_LLL2ULL(lll);
2985
2986 /* TX LL thread has higher priority than RX thread. It may happen that
2987 * host successfully disables CTE sampling in the meantime.
2988 * It should be verified here, to avoid reporting IQ samples after
2989 * the functionality was disabled or if sync was lost.
2990 */
2991 if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) ||
2992 !sync->timeout_reload) {
2993 /* Drop further processing of the event. */
2994 return;
2995 }
2996
2997 /* Get the sync handle corresponding to the LLL context passed in the
2998 * node rx footer field.
2999 */
3000 sync_handle = ull_sync_handle_get(sync);
3001 per_evt_counter = iq_report->event_counter;
3002 }
3003 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
3004
3005 /* If packet status does not indicate insufficient resources for IQ samples and for
3006 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3007 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3008 */
3009 if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3010 samples_cnt = 0U;
3011 } else {
3012 samples_cnt = MAX(1, iq_report->sample_count);
3013 }
3014
3015 sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT,
3016 (sizeof(*sep) +
3017 (samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3018
3019 rssi = RSSI_DBM_TO_DECI_DBM(iq_report->hdr.rx_ftr.rssi);
3020
3021
3022 sep->sync_handle = sys_cpu_to_le16(sync_handle);
3023 sep->rssi = sys_cpu_to_le16(rssi);
3024 sep->rssi_ant_id = iq_report->rssi_ant_id;
3025 sep->cte_type = iq_report->cte_info.type;
3026
3027 sep->chan_idx = iq_report->chan_idx;
3028 sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
3029
3030 if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3031 sep->slot_durations = iq_report->local_slot_durations;
3032 } else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3033 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3034 } else {
3035 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3036 }
3037
3038 sep->packet_status = iq_report->packet_status;
3039
3040 if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3041 if (iq_report->sample_count == 0U) {
3042 sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3043 sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3044 } else {
3045 for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3046 sep->sample[idx].i =
3047 iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3048 sep->sample[idx].q =
3049 iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3050 }
3051 }
3052 }
3053
3054 sep->sample_count = samples_cnt;
3055 }
3056 #endif /* defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) */
3057
3058 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
le_df_set_conn_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)3059 static void le_df_set_conn_cte_tx_params(struct net_buf *buf,
3060 struct net_buf **evt)
3061 {
3062 struct bt_hci_cp_le_set_conn_cte_tx_params *cmd = (void *)buf->data;
3063 struct bt_hci_rp_le_set_conn_cte_tx_params *rp;
3064 uint16_t handle, handle_le16;
3065 uint8_t status;
3066
3067 handle_le16 = cmd->handle;
3068 handle = sys_le16_to_cpu(handle_le16);
3069
3070 status = ll_df_set_conn_cte_tx_params(handle, cmd->cte_types,
3071 cmd->switch_pattern_len,
3072 cmd->ant_ids);
3073
3074 rp = hci_cmd_complete(evt, sizeof(*rp));
3075
3076 rp->status = status;
3077 rp->handle = handle_le16;
3078 }
3079 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
3080
3081 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
le_df_set_conn_cte_rx_params(struct net_buf * buf,struct net_buf ** evt)3082 static void le_df_set_conn_cte_rx_params(struct net_buf *buf, struct net_buf **evt)
3083 {
3084 struct bt_hci_cp_le_set_conn_cte_rx_params *cmd = (void *)buf->data;
3085 struct bt_hci_rp_le_set_conn_cte_rx_params *rp;
3086 uint16_t handle, handle_le16;
3087 uint8_t status;
3088
3089 handle_le16 = cmd->handle;
3090 handle = sys_le16_to_cpu(handle_le16);
3091
3092 status = ll_df_set_conn_cte_rx_params(handle, cmd->sampling_enable, cmd->slot_durations,
3093 cmd->switch_pattern_len, cmd->ant_ids);
3094
3095 rp = hci_cmd_complete(evt, sizeof(*rp));
3096
3097 rp->status = status;
3098 rp->handle = handle_le16;
3099 }
3100
le_df_connection_iq_report(struct node_rx_pdu * node_rx,struct net_buf * buf)3101 static void le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
3102 {
3103 struct bt_hci_evt_le_connection_iq_report *sep;
3104 struct node_rx_iq_report *iq_report;
3105 struct lll_conn *lll;
3106 uint8_t samples_cnt;
3107 uint8_t phy_rx;
3108 int16_t rssi;
3109
3110 iq_report = (struct node_rx_iq_report *)node_rx;
3111
3112 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3113 !(le_event_mask & BT_EVT_MASK_LE_CONNECTION_IQ_REPORT)) {
3114 return;
3115 }
3116
3117 lll = iq_report->hdr.rx_ftr.param;
3118
3119 #if defined(CONFIG_BT_CTLR_PHY)
3120 phy_rx = lll->phy_rx;
3121
3122 /* Make sure the report is generated for connection on PHY UNCODED */
3123 LL_ASSERT(phy_rx != PHY_CODED);
3124 #else
3125 phy_rx = PHY_1M;
3126 #endif /* CONFIG_BT_CTLR_PHY */
3127
3128 /* TX LL thread has higher priority than RX thread. It may happen that host succefully
3129 * disables CTE sampling in the meantime. It should be verified here, to avoid reporing
3130 * IQ samples after the functionality was disabled.
3131 */
3132 if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
3133 /* Dropp further processing of the event. */
3134 return;
3135 }
3136
3137 /* If packet status does not indicate insufficient resources for IQ samples and for
3138 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3139 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3140 */
3141 if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3142 samples_cnt = 0;
3143 } else {
3144 samples_cnt = MAX(1, iq_report->sample_count);
3145 }
3146
3147 sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTION_IQ_REPORT,
3148 (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3149
3150 rssi = RSSI_DBM_TO_DECI_DBM(iq_report->hdr.rx_ftr.rssi);
3151
3152 sep->conn_handle = sys_cpu_to_le16(iq_report->hdr.handle);
3153 sep->rx_phy = phy_rx;
3154 sep->rssi = sys_cpu_to_le16(rssi);
3155 sep->rssi_ant_id = iq_report->rssi_ant_id;
3156 sep->cte_type = iq_report->cte_info.type;
3157
3158 sep->data_chan_idx = iq_report->chan_idx;
3159 sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
3160
3161 if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3162 sep->slot_durations = iq_report->local_slot_durations;
3163 } else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3164 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3165 } else {
3166 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3167 }
3168
3169 sep->packet_status = iq_report->packet_status;
3170
3171 if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3172 if (iq_report->sample_count == 0U) {
3173 sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3174 sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3175 } else {
3176 for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3177 sep->sample[idx].i =
3178 iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3179 sep->sample[idx].q =
3180 iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3181 }
3182 }
3183 }
3184
3185 sep->sample_count = samples_cnt;
3186 }
3187 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
3188
3189 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
le_df_set_conn_cte_req_enable(struct net_buf * buf,struct net_buf ** evt)3190 static void le_df_set_conn_cte_req_enable(struct net_buf *buf, struct net_buf **evt)
3191 {
3192 struct bt_hci_cp_le_conn_cte_req_enable *cmd = (void *)buf->data;
3193 struct bt_hci_rp_le_conn_cte_req_enable *rp;
3194 uint16_t handle, handle_le16;
3195 uint8_t status;
3196
3197 handle_le16 = cmd->handle;
3198 handle = sys_le16_to_cpu(handle_le16);
3199
3200 status = ll_df_set_conn_cte_req_enable(handle, cmd->enable,
3201 sys_le16_to_cpu(cmd->cte_request_interval),
3202 cmd->requested_cte_length, cmd->requested_cte_type);
3203 rp = hci_cmd_complete(evt, sizeof(*rp));
3204
3205 rp->status = status;
3206 rp->handle = handle_le16;
3207 }
3208
le_df_cte_req_failed(uint8_t error_code,uint16_t handle,struct net_buf * buf)3209 static void le_df_cte_req_failed(uint8_t error_code, uint16_t handle, struct net_buf *buf)
3210 {
3211 struct bt_hci_evt_le_cte_req_failed *sep;
3212
3213 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3214 !(le_event_mask & BT_EVT_MASK_LE_CTE_REQUEST_FAILED)) {
3215 return;
3216 }
3217
3218 sep = meta_evt(buf, BT_HCI_EVT_LE_CTE_REQUEST_FAILED, sizeof(*sep));
3219
3220 sep->status = error_code;
3221 sep->conn_handle = sys_cpu_to_le16(handle);
3222 }
3223 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
3224
3225 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
le_df_set_conn_cte_rsp_enable(struct net_buf * buf,struct net_buf ** evt)3226 static void le_df_set_conn_cte_rsp_enable(struct net_buf *buf, struct net_buf **evt)
3227 {
3228 struct bt_hci_cp_le_conn_cte_rsp_enable *cmd = (void *)buf->data;
3229 struct bt_hci_rp_le_conn_cte_rsp_enable *rp;
3230 uint16_t handle, handle_le16;
3231 uint8_t status;
3232
3233 handle_le16 = cmd->handle;
3234 handle = sys_le16_to_cpu(handle_le16);
3235
3236 status = ll_df_set_conn_cte_rsp_enable(handle, cmd->enable);
3237 rp = hci_cmd_complete(evt, sizeof(*rp));
3238
3239 rp->status = status;
3240 rp->handle = handle_le16;
3241 }
3242 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
3243
le_df_read_ant_inf(struct net_buf * buf,struct net_buf ** evt)3244 static void le_df_read_ant_inf(struct net_buf *buf, struct net_buf **evt)
3245 {
3246 struct bt_hci_rp_le_read_ant_info *rp;
3247 uint8_t max_switch_pattern_len;
3248 uint8_t switch_sample_rates;
3249 uint8_t max_cte_len;
3250 uint8_t num_ant;
3251
3252 ll_df_read_ant_inf(&switch_sample_rates, &num_ant,
3253 &max_switch_pattern_len, &max_cte_len);
3254
3255 rp = hci_cmd_complete(evt, sizeof(*rp));
3256
3257 rp->max_switch_pattern_len = max_switch_pattern_len;
3258 rp->switch_sample_rates = switch_sample_rates;
3259 rp->max_cte_len = max_cte_len;
3260 rp->num_ant = num_ant;
3261 rp->status = 0x00;
3262 }
3263 #endif /* CONFIG_BT_CTLR_DF */
3264
3265 #if defined(CONFIG_BT_CTLR_DTM_HCI)
le_rx_test(struct net_buf * buf,struct net_buf ** evt)3266 static void le_rx_test(struct net_buf *buf, struct net_buf **evt)
3267 {
3268 struct bt_hci_cp_le_rx_test *cmd = (void *)buf->data;
3269 uint8_t status;
3270
3271 status = ll_test_rx(cmd->rx_ch, BT_HCI_LE_RX_PHY_1M, BT_HCI_LE_MOD_INDEX_STANDARD,
3272 BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3273 BT_HCI_LE_TEST_SLOT_DURATION_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3274 NULL);
3275
3276 *evt = cmd_complete_status(status);
3277 }
3278
le_tx_test(struct net_buf * buf,struct net_buf ** evt)3279 static void le_tx_test(struct net_buf *buf, struct net_buf **evt)
3280 {
3281 struct bt_hci_cp_le_tx_test *cmd = (void *)buf->data;
3282 uint8_t status;
3283
3284 status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload,
3285 BT_HCI_LE_TX_PHY_1M, BT_HCI_LE_TEST_CTE_DISABLED,
3286 BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3287 NULL, BT_HCI_TX_TEST_POWER_MAX_SET);
3288
3289 *evt = cmd_complete_status(status);
3290 }
3291
le_test_end(struct net_buf * buf,struct net_buf ** evt)3292 static void le_test_end(struct net_buf *buf, struct net_buf **evt)
3293 {
3294 struct bt_hci_rp_le_test_end *rp;
3295 uint16_t rx_pkt_count;
3296 uint8_t status;
3297
3298 status = ll_test_end(&rx_pkt_count);
3299
3300 rp = hci_cmd_complete(evt, sizeof(*rp));
3301 rp->status = status;
3302 rp->rx_pkt_count = sys_cpu_to_le16(rx_pkt_count);
3303 }
3304
le_enh_rx_test(struct net_buf * buf,struct net_buf ** evt)3305 static void le_enh_rx_test(struct net_buf *buf, struct net_buf **evt)
3306 {
3307 struct bt_hci_cp_le_enh_rx_test *cmd = (void *)buf->data;
3308 uint8_t status;
3309
3310 status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, BT_HCI_LE_TEST_CTE_DISABLED,
3311 BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SLOT_DURATION_ANY,
3312 BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL);
3313
3314 *evt = cmd_complete_status(status);
3315 }
3316
3317 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
le_rx_test_v3(struct net_buf * buf,struct net_buf ** evt)3318 static void le_rx_test_v3(struct net_buf *buf, struct net_buf **evt)
3319 {
3320 struct bt_hci_cp_le_rx_test_v3 *cmd = (void *)buf->data;
3321 uint8_t status;
3322
3323 status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, cmd->expected_cte_len,
3324 cmd->expected_cte_type, cmd->slot_durations, cmd->switch_pattern_len,
3325 cmd->ant_ids);
3326
3327 *evt = cmd_complete_status(status);
3328 }
3329 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
3330
le_enh_tx_test(struct net_buf * buf,struct net_buf ** evt)3331 static void le_enh_tx_test(struct net_buf *buf, struct net_buf **evt)
3332 {
3333 struct bt_hci_cp_le_enh_tx_test *cmd = (void *)buf->data;
3334 uint8_t status;
3335
3336 status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3337 BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3338 BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL,
3339 BT_HCI_TX_TEST_POWER_MAX_SET);
3340
3341 *evt = cmd_complete_status(status);
3342 }
3343
3344 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
le_tx_test_v3(struct net_buf * buf,struct net_buf ** evt)3345 static void le_tx_test_v3(struct net_buf *buf, struct net_buf **evt)
3346 {
3347 struct bt_hci_cp_le_tx_test_v3 *cmd = (void *)buf->data;
3348 uint8_t status;
3349
3350 status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3351 cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3352 BT_HCI_TX_TEST_POWER_MAX_SET);
3353
3354 *evt = cmd_complete_status(status);
3355 }
3356 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
3357
3358 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
le_tx_test_v4(struct net_buf * buf,struct net_buf ** evt)3359 static void le_tx_test_v4(struct net_buf *buf, struct net_buf **evt)
3360 {
3361 struct bt_hci_cp_le_tx_test_v4 *cmd = (void *)buf->data;
3362 struct bt_hci_cp_le_tx_test_v4_tx_power *tx_power = (void *)(buf->data +
3363 sizeof(struct bt_hci_cp_le_tx_test_v4) + cmd->switch_pattern_len);
3364 uint8_t status;
3365
3366 status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3367 cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3368 tx_power->tx_power);
3369
3370 *evt = cmd_complete_status(status);
3371 }
3372 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
3373 #endif /* CONFIG_BT_CTLR_DTM_HCI */
3374
3375 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3376 #if defined(CONFIG_BT_BROADCASTER)
3377
le_set_adv_set_random_addr(struct net_buf * buf,struct net_buf ** evt)3378 static void le_set_adv_set_random_addr(struct net_buf *buf,
3379 struct net_buf **evt)
3380 {
3381 struct bt_hci_cp_le_set_adv_set_random_addr *cmd = (void *)buf->data;
3382 uint8_t status;
3383 uint8_t handle;
3384
3385 if (adv_cmds_ext_check(evt)) {
3386 return;
3387 }
3388
3389 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3390 if (status) {
3391 *evt = cmd_complete_status(status);
3392 return;
3393 }
3394
3395 status = ll_adv_aux_random_addr_set(handle, &cmd->bdaddr.val[0]);
3396
3397 *evt = cmd_complete_status(status);
3398 }
3399
le_set_ext_adv_param(struct net_buf * buf,struct net_buf ** evt)3400 static void le_set_ext_adv_param(struct net_buf *buf, struct net_buf **evt)
3401 {
3402 struct bt_hci_cp_le_set_ext_adv_param *cmd = (void *)buf->data;
3403 struct bt_hci_rp_le_set_ext_adv_param *rp;
3404 uint32_t min_interval;
3405 uint16_t evt_prop;
3406 uint8_t tx_pwr;
3407 uint8_t status;
3408 uint8_t phy_p;
3409 uint8_t phy_s;
3410 uint8_t handle;
3411
3412 if (adv_cmds_ext_check(evt)) {
3413 return;
3414 }
3415
3416 if (cmd->handle > BT_HCI_LE_ADV_HANDLE_MAX) {
3417 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3418 return;
3419 }
3420
3421 min_interval = sys_get_le24(cmd->prim_min_interval);
3422
3423 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3424 const uint32_t max_interval =
3425 sys_get_le24(cmd->prim_max_interval);
3426
3427 /* Compare advertising interval maximum with implementation
3428 * supported advertising interval maximum value defined in the
3429 * Kconfig CONFIG_BT_CTLR_ADV_INTERVAL_MAX.
3430 */
3431 if ((min_interval > max_interval) ||
3432 (min_interval < BT_HCI_LE_PRIM_ADV_INTERVAL_MIN) ||
3433 (max_interval > CONFIG_BT_CTLR_ADV_INTERVAL_MAX)) {
3434 *evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3435 return;
3436 }
3437 }
3438
3439 status = ll_adv_set_by_hci_handle_get_or_new(cmd->handle, &handle);
3440 if (status) {
3441 *evt = cmd_complete_status(status);
3442 return;
3443 }
3444
3445 evt_prop = sys_le16_to_cpu(cmd->props);
3446 tx_pwr = cmd->tx_power;
3447 phy_p = BIT(cmd->prim_adv_phy - 1);
3448 phy_s = BIT(cmd->sec_adv_phy - 1);
3449
3450 status = ll_adv_params_set(handle, evt_prop, min_interval,
3451 PDU_ADV_TYPE_EXT_IND, cmd->own_addr_type,
3452 cmd->peer_addr.type, cmd->peer_addr.a.val,
3453 cmd->prim_channel_map, cmd->filter_policy,
3454 &tx_pwr, phy_p, cmd->sec_adv_max_skip, phy_s,
3455 cmd->sid, cmd->scan_req_notify_enable);
3456
3457 rp = hci_cmd_complete(evt, sizeof(*rp));
3458 rp->status = status;
3459 rp->tx_power = tx_pwr;
3460 }
3461
le_set_ext_adv_data(struct net_buf * buf,struct net_buf ** evt)3462 static void le_set_ext_adv_data(struct net_buf *buf, struct net_buf **evt)
3463 {
3464 struct bt_hci_cp_le_set_ext_adv_data *cmd = (void *)buf->data;
3465 uint8_t status;
3466 uint8_t handle;
3467
3468 if (adv_cmds_ext_check(evt)) {
3469 return;
3470 }
3471
3472 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3473 if (status) {
3474 *evt = cmd_complete_status(status);
3475 return;
3476 }
3477
3478 status = ll_adv_aux_ad_data_set(handle, cmd->op, cmd->frag_pref,
3479 cmd->len, cmd->data);
3480
3481 *evt = cmd_complete_status(status);
3482 }
3483
le_set_ext_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)3484 static void le_set_ext_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
3485 {
3486 struct bt_hci_cp_le_set_ext_scan_rsp_data *cmd = (void *)buf->data;
3487 uint8_t status;
3488 uint8_t handle;
3489
3490 if (adv_cmds_ext_check(evt)) {
3491 return;
3492 }
3493
3494 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3495 if (status) {
3496 *evt = cmd_complete_status(status);
3497 return;
3498 }
3499
3500 status = ll_adv_aux_sr_data_set(handle, cmd->op, cmd->frag_pref,
3501 cmd->len, cmd->data);
3502
3503 *evt = cmd_complete_status(status);
3504 }
3505
le_set_ext_adv_enable(struct net_buf * buf,struct net_buf ** evt)3506 static void le_set_ext_adv_enable(struct net_buf *buf, struct net_buf **evt)
3507 {
3508 struct bt_hci_cp_le_set_ext_adv_enable *cmd = (void *)buf->data;
3509 struct bt_hci_ext_adv_set *s;
3510 uint8_t set_num;
3511 uint8_t status;
3512 uint8_t handle;
3513
3514 if (adv_cmds_ext_check(evt)) {
3515 return;
3516 }
3517
3518 set_num = cmd->set_num;
3519 if (!set_num) {
3520 if (cmd->enable) {
3521 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3522 return;
3523 }
3524
3525 status = ll_adv_disable_all();
3526
3527 *evt = cmd_complete_status(status);
3528
3529 return;
3530 }
3531
3532 /* Check for duplicate handles */
3533 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3534 for (uint8_t i = 0U; i < set_num - 1; i++) {
3535 for (uint8_t j = i + 1U; j < set_num; j++) {
3536 if (cmd->s[i].handle == cmd->s[j].handle) {
3537 *evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3538 return;
3539 }
3540 }
3541 }
3542 }
3543
3544 s = (void *) cmd->s;
3545 do {
3546 status = ll_adv_set_by_hci_handle_get(s->handle, &handle);
3547 if (status) {
3548 break;
3549 }
3550
3551 /* TODO: duration and events parameter use. */
3552 #if defined(CONFIG_BT_HCI_MESH_EXT)
3553 status = ll_adv_enable(handle, cmd->enable, 0, 0, 0, 0, 0);
3554 #else /* !CONFIG_BT_HCI_MESH_EXT */
3555 status = ll_adv_enable(handle, cmd->enable,
3556 sys_le16_to_cpu(s->duration), s->max_ext_adv_evts);
3557 #endif /* !CONFIG_BT_HCI_MESH_EXT */
3558 if (status) {
3559 /* TODO: how to handle succeeded ones before this
3560 * error.
3561 */
3562 break;
3563 }
3564
3565 s++;
3566 } while (--set_num);
3567
3568 *evt = cmd_complete_status(status);
3569 }
3570
le_read_max_adv_data_len(struct net_buf * buf,struct net_buf ** evt)3571 static void le_read_max_adv_data_len(struct net_buf *buf, struct net_buf **evt)
3572 {
3573 struct bt_hci_rp_le_read_max_adv_data_len *rp;
3574 uint16_t max_adv_data_len;
3575
3576 if (adv_cmds_ext_check(evt)) {
3577 return;
3578 }
3579
3580 rp = hci_cmd_complete(evt, sizeof(*rp));
3581
3582 max_adv_data_len = ll_adv_aux_max_data_length_get();
3583
3584 rp->max_adv_data_len = sys_cpu_to_le16(max_adv_data_len);
3585 rp->status = 0x00;
3586 }
3587
le_read_num_adv_sets(struct net_buf * buf,struct net_buf ** evt)3588 static void le_read_num_adv_sets(struct net_buf *buf, struct net_buf **evt)
3589 {
3590 struct bt_hci_rp_le_read_num_adv_sets *rp;
3591
3592 if (adv_cmds_ext_check(evt)) {
3593 return;
3594 }
3595
3596 rp = hci_cmd_complete(evt, sizeof(*rp));
3597
3598 rp->num_sets = ll_adv_aux_set_count_get();
3599 rp->status = 0x00;
3600 }
3601
le_remove_adv_set(struct net_buf * buf,struct net_buf ** evt)3602 static void le_remove_adv_set(struct net_buf *buf, struct net_buf **evt)
3603 {
3604 struct bt_hci_cp_le_remove_adv_set *cmd = (void *)buf->data;
3605 uint8_t status;
3606 uint8_t handle;
3607
3608 if (adv_cmds_ext_check(evt)) {
3609 return;
3610 }
3611
3612 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3613 if (status) {
3614 *evt = cmd_complete_status(status);
3615 return;
3616 }
3617
3618 status = ll_adv_aux_set_remove(handle);
3619
3620 *evt = cmd_complete_status(status);
3621 }
3622
le_clear_adv_sets(struct net_buf * buf,struct net_buf ** evt)3623 static void le_clear_adv_sets(struct net_buf *buf, struct net_buf **evt)
3624 {
3625 uint8_t status;
3626
3627 if (adv_cmds_ext_check(evt)) {
3628 return;
3629 }
3630
3631 status = ll_adv_aux_set_clear();
3632
3633 *evt = cmd_complete_status(status);
3634 }
3635
3636 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
le_set_per_adv_param(struct net_buf * buf,struct net_buf ** evt)3637 static void le_set_per_adv_param(struct net_buf *buf, struct net_buf **evt)
3638 {
3639 struct bt_hci_cp_le_set_per_adv_param *cmd = (void *)buf->data;
3640 uint16_t max_interval;
3641 uint16_t flags;
3642 uint8_t status;
3643 uint8_t handle;
3644
3645 if (adv_cmds_ext_check(evt)) {
3646 return;
3647 }
3648
3649 max_interval = sys_le16_to_cpu(cmd->max_interval);
3650
3651 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3652 const uint32_t min_interval =
3653 sys_le16_to_cpu(cmd->min_interval);
3654
3655 /* Compare periodic advertising interval maximum with
3656 * implementation supported periodic advertising interval
3657 * maximum value defined in the Kconfig
3658 * CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX.
3659 */
3660 if ((min_interval > max_interval) ||
3661 (min_interval < BT_HCI_LE_PER_ADV_INTERVAL_MIN) ||
3662 (max_interval > CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX)) {
3663 *evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3664 return;
3665 }
3666 }
3667
3668 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3669 if (status) {
3670 *evt = cmd_complete_status(status);
3671 return;
3672 }
3673
3674 flags = sys_le16_to_cpu(cmd->props);
3675
3676 status = ll_adv_sync_param_set(handle, max_interval, flags);
3677
3678 *evt = cmd_complete_status(status);
3679 }
3680
le_set_per_adv_data(struct net_buf * buf,struct net_buf ** evt)3681 static void le_set_per_adv_data(struct net_buf *buf, struct net_buf **evt)
3682 {
3683 struct bt_hci_cp_le_set_per_adv_data *cmd = (void *)buf->data;
3684 uint8_t status;
3685 uint8_t handle;
3686
3687 if (adv_cmds_ext_check(evt)) {
3688 return;
3689 }
3690
3691 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3692 if (status) {
3693 *evt = cmd_complete_status(status);
3694 return;
3695 }
3696
3697 status = ll_adv_sync_ad_data_set(handle, cmd->op, cmd->len,
3698 cmd->data);
3699
3700 *evt = cmd_complete_status(status);
3701 }
3702
le_set_per_adv_enable(struct net_buf * buf,struct net_buf ** evt)3703 static void le_set_per_adv_enable(struct net_buf *buf, struct net_buf **evt)
3704 {
3705 struct bt_hci_cp_le_set_per_adv_enable *cmd = (void *)buf->data;
3706 uint8_t status;
3707 uint8_t handle;
3708
3709 if (adv_cmds_ext_check(evt)) {
3710 return;
3711 }
3712
3713 status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3714 if (status) {
3715 *evt = cmd_complete_status(status);
3716 return;
3717 }
3718
3719 status = ll_adv_sync_enable(handle, cmd->enable);
3720
3721 *evt = cmd_complete_status(status);
3722 }
3723 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
3724 #endif /* CONFIG_BT_BROADCASTER */
3725
3726 #if defined(CONFIG_BT_OBSERVER)
le_set_ext_scan_param(struct net_buf * buf,struct net_buf ** evt)3727 static void le_set_ext_scan_param(struct net_buf *buf, struct net_buf **evt)
3728 {
3729 struct bt_hci_cp_le_set_ext_scan_param *cmd = (void *)buf->data;
3730 struct bt_hci_ext_scan_phy *p;
3731 uint8_t own_addr_type;
3732 uint8_t filter_policy;
3733 uint8_t phys_bitmask;
3734 uint8_t status;
3735 uint8_t phys;
3736
3737 if (adv_cmds_ext_check(evt)) {
3738 return;
3739 }
3740
3741 /* Number of bits set indicate scan sets to be configured by calling
3742 * ll_scan_params_set function.
3743 */
3744 phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
3745 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
3746 phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
3747 }
3748
3749 phys = cmd->phys;
3750 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
3751 (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
3752 *evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3753
3754 return;
3755 }
3756
3757 own_addr_type = cmd->own_addr_type;
3758 filter_policy = cmd->filter_policy;
3759 p = cmd->p;
3760
3761 /* Irrespective of enabled PHYs to scan for, ll_scan_params_set needs
3762 * to be called to initialise the scan sets.
3763 * Passing interval and window as 0, disable the particular scan set
3764 * from being enabled.
3765 */
3766 do {
3767 uint16_t interval;
3768 uint16_t window;
3769 uint8_t type;
3770 uint8_t phy;
3771
3772 /* Get single PHY bit from the loop bitmask */
3773 phy = BIT(find_lsb_set(phys_bitmask) - 1);
3774
3775 /* Pass the PHY (1M or Coded) of scan set in MSbits of type
3776 * parameter
3777 */
3778 type = (phy << 1);
3779
3780 /* If current PHY is one of the PHY in the Scanning_PHYs,
3781 * pick the supplied scan type, interval and window.
3782 */
3783 if (phys & phy) {
3784 type |= (p->type & 0x01);
3785 interval = sys_le16_to_cpu(p->interval);
3786 window = sys_le16_to_cpu(p->window);
3787 p++;
3788 } else {
3789 interval = 0U;
3790 window = 0U;
3791 }
3792
3793 status = ll_scan_params_set(type, interval, window,
3794 own_addr_type, filter_policy);
3795 if (status) {
3796 break;
3797 }
3798
3799 phys_bitmask &= (phys_bitmask - 1);
3800 } while (phys_bitmask);
3801
3802 *evt = cmd_complete_status(status);
3803 }
3804
le_set_ext_scan_enable(struct net_buf * buf,struct net_buf ** evt)3805 static void le_set_ext_scan_enable(struct net_buf *buf, struct net_buf **evt)
3806 {
3807 struct bt_hci_cp_le_set_ext_scan_enable *cmd = (void *)buf->data;
3808 uint8_t status;
3809
3810 if (adv_cmds_ext_check(evt)) {
3811 return;
3812 }
3813
3814 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3815 /* Initialize duplicate filtering */
3816 if (cmd->enable && cmd->filter_dup) {
3817 if (0) {
3818
3819 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3820 } else if (dup_count == DUP_FILTER_DISABLED) {
3821 dup_scan = true;
3822
3823 /* All entries reset */
3824 dup_count = 0;
3825 dup_curr = 0U;
3826 } else if (!dup_scan) {
3827 dup_scan = true;
3828 dup_ext_adv_reset();
3829 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3830
3831 } else {
3832 /* All entries reset */
3833 dup_count = 0;
3834 dup_curr = 0U;
3835 }
3836 } else {
3837 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3838 dup_scan = false;
3839 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3840 dup_count = DUP_FILTER_DISABLED;
3841 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3842 }
3843 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
3844
3845 status = ll_scan_enable(cmd->enable, sys_le16_to_cpu(cmd->duration),
3846 sys_le16_to_cpu(cmd->period));
3847
3848 /* NOTE: As filter duplicates is implemented here in HCI source code,
3849 * enabling of already enabled scanning shall succeed after
3850 * updates to filter duplicates is handled in the above
3851 * statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
3852 */
3853 if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
3854 (status == BT_HCI_ERR_CMD_DISALLOWED)) {
3855 status = BT_HCI_ERR_SUCCESS;
3856 }
3857
3858 *evt = cmd_complete_status(status);
3859 }
3860
3861 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
le_per_adv_create_sync(struct net_buf * buf,struct net_buf ** evt)3862 static void le_per_adv_create_sync(struct net_buf *buf, struct net_buf **evt)
3863 {
3864 struct bt_hci_cp_le_per_adv_create_sync *cmd = (void *)buf->data;
3865 uint16_t sync_timeout;
3866 uint8_t status;
3867 uint16_t skip;
3868
3869 if (adv_cmds_ext_check(NULL)) {
3870 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
3871 return;
3872 }
3873
3874 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
3875 (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST)) {
3876 *evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3877 return;
3878 }
3879
3880 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
3881 (cmd->options &
3882 (BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED |
3883 BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE)) ==
3884 BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3885 *evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3886 return;
3887 }
3888
3889 /* FIXME: Check for HCI LE Set Periodic Advertising Receive Enable
3890 * command support and if reporting is initially disabled then
3891 * return error code Connection Failed to be Established /
3892 * Synchronization Timeout (0x3E).
3893 */
3894
3895 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3896 /* Initialize duplicate filtering */
3897 if (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3898 if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
3899 dup_count = 0;
3900 dup_curr = 0U;
3901 } else {
3902 /* NOTE: Invalidate dup_ext_adv_mode array entries is
3903 * done when sync is established.
3904 */
3905 }
3906 } else if (!dup_scan) {
3907 dup_count = DUP_FILTER_DISABLED;
3908 }
3909 #endif
3910
3911 skip = sys_le16_to_cpu(cmd->skip);
3912 sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
3913
3914 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
3915 if ((cmd->cte_type & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_INVALID_VALUE) != 0) {
3916 status = BT_HCI_ERR_CMD_DISALLOWED;
3917 #else
3918 if (cmd->cte_type != BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_FILTERING) {
3919 status = BT_HCI_ERR_INVALID_PARAM;
3920 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
3921 } else {
3922 status = ll_sync_create(cmd->options, cmd->sid, cmd->addr.type, cmd->addr.a.val,
3923 skip, sync_timeout, cmd->cte_type);
3924 }
3925 *evt = cmd_status(status);
3926 }
3927
3928 static void le_per_adv_create_sync_cancel(struct net_buf *buf,
3929 struct net_buf **evt, void **node_rx)
3930 {
3931 struct bt_hci_evt_cc_status *ccst;
3932 uint8_t status;
3933
3934 if (adv_cmds_ext_check(evt)) {
3935 return;
3936 }
3937
3938 status = ll_sync_create_cancel(node_rx);
3939
3940 ccst = hci_cmd_complete(evt, sizeof(*ccst));
3941 ccst->status = status;
3942 }
3943
3944 static void le_per_adv_terminate_sync(struct net_buf *buf, struct net_buf **evt)
3945 {
3946 struct bt_hci_cp_le_per_adv_terminate_sync *cmd = (void *)buf->data;
3947 struct bt_hci_evt_cc_status *ccst;
3948 uint16_t handle;
3949 uint8_t status;
3950
3951 if (adv_cmds_ext_check(evt)) {
3952 return;
3953 }
3954
3955 handle = sys_le16_to_cpu(cmd->handle);
3956
3957 status = ll_sync_terminate(handle);
3958
3959 ccst = hci_cmd_complete(evt, sizeof(*ccst));
3960 ccst->status = status;
3961 }
3962
3963 static void le_per_adv_recv_enable(struct net_buf *buf, struct net_buf **evt)
3964 {
3965 struct bt_hci_cp_le_set_per_adv_recv_enable *cmd = (void *)buf->data;
3966 struct bt_hci_evt_cc_status *ccst;
3967 uint16_t handle;
3968 uint8_t status;
3969
3970 if (adv_cmds_ext_check(evt)) {
3971 return;
3972 }
3973
3974 handle = sys_le16_to_cpu(cmd->handle);
3975
3976 status = ll_sync_recv_enable(handle, cmd->enable);
3977
3978 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3979 if (!status) {
3980 if (cmd->enable &
3981 BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) {
3982 if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
3983 dup_count = 0;
3984 dup_curr = 0U;
3985 } else {
3986 /* NOTE: Invalidate dup_ext_adv_mode array
3987 * entries is done when sync is
3988 * established.
3989 */
3990 }
3991 } else if (!dup_scan) {
3992 dup_count = DUP_FILTER_DISABLED;
3993 }
3994 }
3995 #endif
3996
3997 ccst = hci_cmd_complete(evt, sizeof(*ccst));
3998 ccst->status = status;
3999 }
4000
4001 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4002 static void le_add_dev_to_pal(struct net_buf *buf, struct net_buf **evt)
4003 {
4004 struct bt_hci_cp_le_add_dev_to_per_adv_list *cmd = (void *)buf->data;
4005 uint8_t status;
4006
4007 if (adv_cmds_ext_check(evt)) {
4008 return;
4009 }
4010
4011 status = ll_pal_add(&cmd->addr, cmd->sid);
4012
4013 *evt = cmd_complete_status(status);
4014 }
4015
4016 static void le_rem_dev_from_pal(struct net_buf *buf, struct net_buf **evt)
4017 {
4018 struct bt_hci_cp_le_rem_dev_from_per_adv_list *cmd = (void *)buf->data;
4019 uint8_t status;
4020
4021 if (adv_cmds_ext_check(evt)) {
4022 return;
4023 }
4024
4025 status = ll_pal_remove(&cmd->addr, cmd->sid);
4026
4027 *evt = cmd_complete_status(status);
4028 }
4029
4030 static void le_clear_pal(struct net_buf *buf, struct net_buf **evt)
4031 {
4032 uint8_t status;
4033
4034 if (adv_cmds_ext_check(evt)) {
4035 return;
4036 }
4037
4038 status = ll_pal_clear();
4039
4040 *evt = cmd_complete_status(status);
4041 }
4042
4043 static void le_read_pal_size(struct net_buf *buf, struct net_buf **evt)
4044 {
4045 struct bt_hci_rp_le_read_per_adv_list_size *rp;
4046
4047 if (adv_cmds_ext_check(evt)) {
4048 return;
4049 }
4050
4051 rp = hci_cmd_complete(evt, sizeof(*rp));
4052 rp->status = 0x00;
4053
4054 rp->list_size = ll_pal_size_get();
4055 }
4056 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4057 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4058 #endif /* CONFIG_BT_OBSERVER */
4059
4060 #if defined(CONFIG_BT_CENTRAL)
4061 static void le_ext_create_connection(struct net_buf *buf, struct net_buf **evt)
4062 {
4063 struct bt_hci_cp_le_ext_create_conn *cmd = (void *)buf->data;
4064 struct bt_hci_ext_conn_phy *p;
4065 uint8_t peer_addr_type;
4066 uint8_t own_addr_type;
4067 uint8_t filter_policy;
4068 uint8_t phys_bitmask;
4069 uint8_t *peer_addr;
4070 uint8_t status;
4071 uint8_t phys;
4072
4073 if (adv_cmds_ext_check(NULL)) {
4074 *evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
4075 return;
4076 }
4077
4078 /* Number of bits set indicate scan sets to be configured by calling
4079 * ll_create_connection function.
4080 */
4081 phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
4082 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
4083 phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
4084 }
4085
4086 phys = cmd->phys;
4087 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
4088 (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
4089 *evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
4090
4091 return;
4092 }
4093
4094 filter_policy = cmd->filter_policy;
4095 own_addr_type = cmd->own_addr_type;
4096 peer_addr_type = cmd->peer_addr.type;
4097 peer_addr = cmd->peer_addr.a.val;
4098 p = cmd->p;
4099
4100 do {
4101 uint16_t supervision_timeout;
4102 uint16_t conn_interval_max;
4103 uint16_t scan_interval;
4104 uint16_t conn_latency;
4105 uint16_t scan_window;
4106 uint8_t phy;
4107
4108 phy = BIT(find_lsb_set(phys_bitmask) - 1);
4109
4110 if (phys & phy) {
4111 scan_interval = sys_le16_to_cpu(p->scan_interval);
4112 scan_window = sys_le16_to_cpu(p->scan_window);
4113 conn_interval_max =
4114 sys_le16_to_cpu(p->conn_interval_max);
4115 conn_latency = sys_le16_to_cpu(p->conn_latency);
4116 supervision_timeout =
4117 sys_le16_to_cpu(p->supervision_timeout);
4118
4119 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
4120 status = check_cconn_params(true, scan_interval,
4121 scan_window,
4122 conn_interval_max,
4123 conn_latency,
4124 supervision_timeout);
4125 if (status) {
4126 *evt = cmd_status(status);
4127 return;
4128 }
4129 }
4130
4131 status = ll_create_connection(scan_interval,
4132 scan_window,
4133 filter_policy,
4134 peer_addr_type,
4135 peer_addr,
4136 own_addr_type,
4137 conn_interval_max,
4138 conn_latency,
4139 supervision_timeout,
4140 phy);
4141 p++;
4142 } else {
4143 uint8_t type;
4144
4145 type = (phy << 1);
4146 /* NOTE: Pass invalid interval value to reset the PHY
4147 * value in the scan instance so not to start
4148 * scanning on the unselected PHY.
4149 */
4150 status = ll_scan_params_set(type, 0, 0, 0, 0);
4151 }
4152
4153 if (status) {
4154 *evt = cmd_status(status);
4155 return;
4156 }
4157
4158 phys_bitmask &= (phys_bitmask - 1);
4159 } while (phys_bitmask);
4160
4161 status = ll_connect_enable(phys & BT_HCI_LE_EXT_SCAN_PHY_CODED);
4162
4163 *evt = cmd_status(status);
4164 }
4165 #endif /* CONFIG_BT_CENTRAL */
4166 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4167
4168 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4169 static void le_cis_request(struct pdu_data *pdu_data,
4170 struct node_rx_pdu *node_rx,
4171 struct net_buf *buf)
4172 {
4173 struct bt_hci_evt_le_cis_req *sep;
4174 struct node_rx_conn_iso_req *req;
4175 void *node;
4176
4177 /* Check for pdu field being aligned before accessing CIS established
4178 * event.
4179 */
4180 node = pdu_data;
4181 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4182
4183 req = node;
4184 if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS)) ||
4185 !(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4186 !(le_event_mask & BT_EVT_MASK_LE_CIS_REQ)) {
4187 ll_cis_reject(req->cis_handle, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE);
4188 return;
4189 }
4190
4191 sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_REQ, sizeof(*sep));
4192 sep->acl_handle = sys_cpu_to_le16(node_rx->hdr.handle);
4193 sep->cis_handle = sys_cpu_to_le16(req->cis_handle);
4194 sep->cig_id = req->cig_id;
4195 sep->cis_id = req->cis_id;
4196 }
4197 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4198
4199 #if defined(CONFIG_BT_CTLR_CONN_ISO)
4200 static void le_cis_established(struct pdu_data *pdu_data,
4201 struct node_rx_pdu *node_rx,
4202 struct net_buf *buf)
4203 {
4204 struct lll_conn_iso_stream_rxtx *lll_cis_c;
4205 struct lll_conn_iso_stream_rxtx *lll_cis_p;
4206 struct bt_hci_evt_le_cis_established *sep;
4207 struct lll_conn_iso_stream *lll_cis;
4208 struct node_rx_conn_iso_estab *est;
4209 struct ll_conn_iso_stream *cis;
4210 struct ll_conn_iso_group *cig;
4211 bool is_central;
4212 void *node;
4213
4214 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4215 !(le_event_mask & BT_EVT_MASK_LE_CIS_ESTABLISHED)) {
4216 return;
4217 }
4218
4219 cis = node_rx->hdr.rx_ftr.param;
4220 cig = cis->group;
4221
4222 sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_ESTABLISHED, sizeof(*sep));
4223
4224 /* Check for pdu field being aligned before accessing CIS established
4225 * event.
4226 */
4227 node = pdu_data;
4228 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4229
4230 est = node;
4231 sep->status = est->status;
4232 sep->conn_handle = sys_cpu_to_le16(est->cis_handle);
4233
4234 if (!cig) {
4235 /* CIS was not established and instance was released */
4236 return;
4237 }
4238
4239 lll_cis = &cis->lll;
4240 is_central = cig->lll.role == BT_CONN_ROLE_CENTRAL;
4241 lll_cis_c = is_central ? &lll_cis->tx : &lll_cis->rx;
4242 lll_cis_p = is_central ? &lll_cis->rx : &lll_cis->tx;
4243
4244 sys_put_le24(cig->sync_delay, sep->cig_sync_delay);
4245 sys_put_le24(cis->sync_delay, sep->cis_sync_delay);
4246 sys_put_le24(cig->c_latency, sep->c_latency);
4247 sys_put_le24(cig->p_latency, sep->p_latency);
4248 sep->c_phy = find_lsb_set(lll_cis_c->phy);
4249 sep->p_phy = find_lsb_set(lll_cis_p->phy);
4250 sep->nse = lll_cis->nse;
4251 sep->c_bn = lll_cis_c->bn;
4252 sep->p_bn = lll_cis_p->bn;
4253 sep->c_ft = lll_cis_c->ft;
4254 sep->p_ft = lll_cis_p->ft;
4255 sep->c_max_pdu = sys_cpu_to_le16(lll_cis_c->max_pdu);
4256 sep->p_max_pdu = sys_cpu_to_le16(lll_cis_p->max_pdu);
4257 sep->interval = sys_cpu_to_le16(cig->iso_interval);
4258
4259 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4260 if (is_central) {
4261 cis_pending_count--;
4262 }
4263 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4264 }
4265 #endif /* CONFIG_BT_CTLR_CONN_ISO */
4266
4267 static int controller_cmd_handle(uint16_t ocf, struct net_buf *cmd,
4268 struct net_buf **evt, void **node_rx)
4269 {
4270 switch (ocf) {
4271 case BT_OCF(BT_HCI_OP_LE_SET_EVENT_MASK):
4272 le_set_event_mask(cmd, evt);
4273 break;
4274
4275 case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE):
4276 le_read_buffer_size(cmd, evt);
4277 break;
4278
4279 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4280 case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2):
4281 le_read_buffer_size_v2(cmd, evt);
4282 break;
4283 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4284
4285 case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_FEATURES):
4286 le_read_local_features(cmd, evt);
4287 break;
4288
4289 case BT_OCF(BT_HCI_OP_LE_SET_RANDOM_ADDRESS):
4290 le_set_random_address(cmd, evt);
4291 break;
4292
4293 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
4294 case BT_OCF(BT_HCI_OP_LE_READ_FAL_SIZE):
4295 le_read_fal_size(cmd, evt);
4296 break;
4297
4298 case BT_OCF(BT_HCI_OP_LE_CLEAR_FAL):
4299 le_clear_fal(cmd, evt);
4300 break;
4301
4302 case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_FAL):
4303 le_add_dev_to_fal(cmd, evt);
4304 break;
4305
4306 case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_FAL):
4307 le_rem_dev_from_fal(cmd, evt);
4308 break;
4309 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
4310
4311 case BT_OCF(BT_HCI_OP_LE_ENCRYPT):
4312 le_encrypt(cmd, evt);
4313 break;
4314
4315 case BT_OCF(BT_HCI_OP_LE_RAND):
4316 le_rand(cmd, evt);
4317 break;
4318
4319 case BT_OCF(BT_HCI_OP_LE_READ_SUPP_STATES):
4320 le_read_supp_states(cmd, evt);
4321 break;
4322
4323 #if defined(CONFIG_BT_BROADCASTER)
4324 case BT_OCF(BT_HCI_OP_LE_SET_ADV_PARAM):
4325 le_set_adv_param(cmd, evt);
4326 break;
4327
4328 case BT_OCF(BT_HCI_OP_LE_READ_ADV_CHAN_TX_POWER):
4329 le_read_adv_chan_tx_power(cmd, evt);
4330 break;
4331
4332 case BT_OCF(BT_HCI_OP_LE_SET_ADV_DATA):
4333 le_set_adv_data(cmd, evt);
4334 break;
4335
4336 case BT_OCF(BT_HCI_OP_LE_SET_SCAN_RSP_DATA):
4337 le_set_scan_rsp_data(cmd, evt);
4338 break;
4339
4340 case BT_OCF(BT_HCI_OP_LE_SET_ADV_ENABLE):
4341 le_set_adv_enable(cmd, evt);
4342 break;
4343
4344 #if defined(CONFIG_BT_CTLR_ADV_ISO)
4345 case BT_OCF(BT_HCI_OP_LE_CREATE_BIG):
4346 le_create_big(cmd, evt);
4347 break;
4348
4349 case BT_OCF(BT_HCI_OP_LE_CREATE_BIG_TEST):
4350 le_create_big_test(cmd, evt);
4351 break;
4352
4353 case BT_OCF(BT_HCI_OP_LE_TERMINATE_BIG):
4354 le_terminate_big(cmd, evt);
4355 break;
4356 #endif /* CONFIG_BT_CTLR_ADV_ISO */
4357 #endif /* CONFIG_BT_BROADCASTER */
4358
4359 #if defined(CONFIG_BT_OBSERVER)
4360 case BT_OCF(BT_HCI_OP_LE_SET_SCAN_PARAM):
4361 le_set_scan_param(cmd, evt);
4362 break;
4363
4364 case BT_OCF(BT_HCI_OP_LE_SET_SCAN_ENABLE):
4365 le_set_scan_enable(cmd, evt);
4366 break;
4367
4368 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
4369 case BT_OCF(BT_HCI_OP_LE_BIG_CREATE_SYNC):
4370 le_big_create_sync(cmd, evt);
4371 break;
4372
4373 case BT_OCF(BT_HCI_OP_LE_BIG_TERMINATE_SYNC):
4374 le_big_terminate_sync(cmd, evt, node_rx);
4375 break;
4376 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
4377 #endif /* CONFIG_BT_OBSERVER */
4378
4379 #if defined(CONFIG_BT_CENTRAL)
4380 case BT_OCF(BT_HCI_OP_LE_CREATE_CONN):
4381 le_create_connection(cmd, evt);
4382 break;
4383
4384 case BT_OCF(BT_HCI_OP_LE_CREATE_CONN_CANCEL):
4385 le_create_conn_cancel(cmd, evt, node_rx);
4386 break;
4387
4388 case BT_OCF(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF):
4389 le_set_host_chan_classif(cmd, evt);
4390 break;
4391
4392 #if defined(CONFIG_BT_CTLR_LE_ENC)
4393 case BT_OCF(BT_HCI_OP_LE_START_ENCRYPTION):
4394 le_start_encryption(cmd, evt);
4395 break;
4396 #endif /* CONFIG_BT_CTLR_LE_ENC */
4397
4398 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4399 case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS):
4400 le_set_cig_parameters(cmd, evt);
4401 break;
4402 case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS_TEST):
4403 le_set_cig_params_test(cmd, evt);
4404 break;
4405 case BT_OCF(BT_HCI_OP_LE_CREATE_CIS):
4406 le_create_cis(cmd, evt);
4407 break;
4408 case BT_OCF(BT_HCI_OP_LE_REMOVE_CIG):
4409 le_remove_cig(cmd, evt);
4410 break;
4411 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4412 #endif /* CONFIG_BT_CENTRAL */
4413
4414 #if defined(CONFIG_BT_PERIPHERAL)
4415 #if defined(CONFIG_BT_CTLR_LE_ENC)
4416 case BT_OCF(BT_HCI_OP_LE_LTK_REQ_REPLY):
4417 le_ltk_req_reply(cmd, evt);
4418 break;
4419
4420 case BT_OCF(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY):
4421 le_ltk_req_neg_reply(cmd, evt);
4422 break;
4423 #endif /* CONFIG_BT_CTLR_LE_ENC */
4424
4425 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4426 case BT_OCF(BT_HCI_OP_LE_ACCEPT_CIS):
4427 le_accept_cis(cmd, evt);
4428 break;
4429 case BT_OCF(BT_HCI_OP_LE_REJECT_CIS):
4430 le_reject_cis(cmd, evt);
4431 break;
4432 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4433 #endif /* CONFIG_BT_PERIPHERAL */
4434
4435 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
4436 case BT_OCF(BT_HCI_OP_LE_REQ_PEER_SC):
4437 le_req_peer_sca(cmd, evt);
4438 break;
4439 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
4440
4441 #if defined(CONFIG_BT_CTLR_ISO)
4442 case BT_OCF(BT_HCI_OP_LE_SETUP_ISO_PATH):
4443 le_setup_iso_path(cmd, evt);
4444 break;
4445 case BT_OCF(BT_HCI_OP_LE_REMOVE_ISO_PATH):
4446 le_remove_iso_path(cmd, evt);
4447 break;
4448 case BT_OCF(BT_HCI_OP_LE_ISO_TEST_END):
4449 le_iso_test_end(cmd, evt);
4450 break;
4451 #endif /* CONFIG_BT_CTLR_ISO */
4452
4453 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4454 case BT_OCF(BT_HCI_OP_LE_ISO_TRANSMIT_TEST):
4455 le_iso_transmit_test(cmd, evt);
4456 break;
4457 case BT_OCF(BT_HCI_OP_LE_READ_ISO_TX_SYNC):
4458 le_read_iso_tx_sync(cmd, evt);
4459 break;
4460 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4461
4462 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4463 case BT_OCF(BT_HCI_OP_LE_ISO_RECEIVE_TEST):
4464 le_iso_receive_test(cmd, evt);
4465 break;
4466 case BT_OCF(BT_HCI_OP_LE_ISO_READ_TEST_COUNTERS):
4467 le_iso_read_test_counters(cmd, evt);
4468 break;
4469 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
4470 case BT_OCF(BT_HCI_OP_LE_READ_ISO_LINK_QUALITY):
4471 le_read_iso_link_quality(cmd, evt);
4472 break;
4473 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
4474 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
4475
4476 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
4477 case BT_OCF(BT_HCI_OP_LE_SET_HOST_FEATURE):
4478 le_set_host_feature(cmd, evt);
4479 break;
4480 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
4481
4482 #if defined(CONFIG_BT_CONN)
4483 case BT_OCF(BT_HCI_OP_LE_READ_CHAN_MAP):
4484 le_read_chan_map(cmd, evt);
4485 break;
4486
4487 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
4488 case BT_OCF(BT_HCI_OP_LE_READ_REMOTE_FEATURES):
4489 le_read_remote_features(cmd, evt);
4490 break;
4491 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
4492
4493 case BT_OCF(BT_HCI_OP_LE_CONN_UPDATE):
4494 le_conn_update(cmd, evt);
4495 break;
4496
4497 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
4498 case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY):
4499 le_conn_param_req_reply(cmd, evt);
4500 break;
4501
4502 case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY):
4503 le_conn_param_req_neg_reply(cmd, evt);
4504 break;
4505 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
4506
4507 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4508 case BT_OCF(BT_HCI_OP_LE_SET_DATA_LEN):
4509 le_set_data_len(cmd, evt);
4510 break;
4511
4512 case BT_OCF(BT_HCI_OP_LE_READ_DEFAULT_DATA_LEN):
4513 le_read_default_data_len(cmd, evt);
4514 break;
4515
4516 case BT_OCF(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN):
4517 le_write_default_data_len(cmd, evt);
4518 break;
4519
4520 case BT_OCF(BT_HCI_OP_LE_READ_MAX_DATA_LEN):
4521 le_read_max_data_len(cmd, evt);
4522 break;
4523 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4524
4525 #if defined(CONFIG_BT_CTLR_PHY)
4526 case BT_OCF(BT_HCI_OP_LE_READ_PHY):
4527 le_read_phy(cmd, evt);
4528 break;
4529
4530 case BT_OCF(BT_HCI_OP_LE_SET_DEFAULT_PHY):
4531 le_set_default_phy(cmd, evt);
4532 break;
4533
4534 case BT_OCF(BT_HCI_OP_LE_SET_PHY):
4535 le_set_phy(cmd, evt);
4536 break;
4537 #endif /* CONFIG_BT_CTLR_PHY */
4538 #endif /* CONFIG_BT_CONN */
4539
4540 #if defined(CONFIG_BT_CTLR_ADV_EXT)
4541 #if defined(CONFIG_BT_BROADCASTER)
4542 case BT_OCF(BT_HCI_OP_LE_SET_ADV_SET_RANDOM_ADDR):
4543 le_set_adv_set_random_addr(cmd, evt);
4544 break;
4545
4546 case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_PARAM):
4547 le_set_ext_adv_param(cmd, evt);
4548 break;
4549
4550 case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_DATA):
4551 le_set_ext_adv_data(cmd, evt);
4552 break;
4553
4554 case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_RSP_DATA):
4555 le_set_ext_scan_rsp_data(cmd, evt);
4556 break;
4557
4558 case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_ENABLE):
4559 le_set_ext_adv_enable(cmd, evt);
4560 break;
4561
4562 case BT_OCF(BT_HCI_OP_LE_READ_MAX_ADV_DATA_LEN):
4563 le_read_max_adv_data_len(cmd, evt);
4564 break;
4565
4566 case BT_OCF(BT_HCI_OP_LE_READ_NUM_ADV_SETS):
4567 le_read_num_adv_sets(cmd, evt);
4568 break;
4569
4570 case BT_OCF(BT_HCI_OP_LE_REMOVE_ADV_SET):
4571 le_remove_adv_set(cmd, evt);
4572 break;
4573
4574 case BT_OCF(BT_HCI_OP_CLEAR_ADV_SETS):
4575 le_clear_adv_sets(cmd, evt);
4576 break;
4577
4578 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
4579 case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_PARAM):
4580 le_set_per_adv_param(cmd, evt);
4581 break;
4582
4583 case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_DATA):
4584 le_set_per_adv_data(cmd, evt);
4585 break;
4586
4587 case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_ENABLE):
4588 le_set_per_adv_enable(cmd, evt);
4589 break;
4590 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
4591 #endif /* CONFIG_BT_BROADCASTER */
4592
4593 #if defined(CONFIG_BT_OBSERVER)
4594 case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_PARAM):
4595 le_set_ext_scan_param(cmd, evt);
4596 break;
4597
4598 case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_ENABLE):
4599 le_set_ext_scan_enable(cmd, evt);
4600 break;
4601
4602 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
4603 case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC):
4604 le_per_adv_create_sync(cmd, evt);
4605 break;
4606
4607 case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC_CANCEL):
4608 le_per_adv_create_sync_cancel(cmd, evt, node_rx);
4609 break;
4610
4611 case BT_OCF(BT_HCI_OP_LE_PER_ADV_TERMINATE_SYNC):
4612 le_per_adv_terminate_sync(cmd, evt);
4613 break;
4614
4615 case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_RECV_ENABLE):
4616 le_per_adv_recv_enable(cmd, evt);
4617 break;
4618
4619 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4620 case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_PER_ADV_LIST):
4621 le_add_dev_to_pal(cmd, evt);
4622 break;
4623
4624 case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_PER_ADV_LIST):
4625 le_rem_dev_from_pal(cmd, evt);
4626 break;
4627
4628 case BT_OCF(BT_HCI_OP_LE_CLEAR_PER_ADV_LIST):
4629 le_clear_pal(cmd, evt);
4630 break;
4631
4632 case BT_OCF(BT_HCI_OP_LE_READ_PER_ADV_LIST_SIZE):
4633 le_read_pal_size(cmd, evt);
4634 break;
4635 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4636 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4637 #endif /* CONFIG_BT_OBSERVER */
4638
4639 #if defined(CONFIG_BT_CONN)
4640 #if defined(CONFIG_BT_CENTRAL)
4641 case BT_OCF(BT_HCI_OP_LE_EXT_CREATE_CONN):
4642 le_ext_create_connection(cmd, evt);
4643 break;
4644 #endif /* CONFIG_BT_CENTRAL */
4645 #endif /* CONFIG_BT_CONN */
4646 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4647
4648 #if defined(CONFIG_BT_CTLR_PRIVACY)
4649 case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_RL):
4650 le_add_dev_to_rl(cmd, evt);
4651 break;
4652 case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_RL):
4653 le_rem_dev_from_rl(cmd, evt);
4654 break;
4655 case BT_OCF(BT_HCI_OP_LE_CLEAR_RL):
4656 le_clear_rl(cmd, evt);
4657 break;
4658 case BT_OCF(BT_HCI_OP_LE_READ_RL_SIZE):
4659 le_read_rl_size(cmd, evt);
4660 break;
4661 case BT_OCF(BT_HCI_OP_LE_READ_PEER_RPA):
4662 le_read_peer_rpa(cmd, evt);
4663 break;
4664 case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_RPA):
4665 le_read_local_rpa(cmd, evt);
4666 break;
4667 case BT_OCF(BT_HCI_OP_LE_SET_ADDR_RES_ENABLE):
4668 le_set_addr_res_enable(cmd, evt);
4669 break;
4670 case BT_OCF(BT_HCI_OP_LE_SET_RPA_TIMEOUT):
4671 le_set_rpa_timeout(cmd, evt);
4672 break;
4673 case BT_OCF(BT_HCI_OP_LE_SET_PRIVACY_MODE):
4674 le_set_privacy_mode(cmd, evt);
4675 break;
4676 #endif /* CONFIG_BT_CTLR_PRIVACY */
4677
4678 case BT_OCF(BT_HCI_OP_LE_READ_TX_POWER):
4679 le_read_tx_power(cmd, evt);
4680 break;
4681
4682 #if defined(CONFIG_BT_CTLR_DF)
4683 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
4684 case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_PARAMS):
4685 le_df_set_cl_cte_tx_params(cmd, evt);
4686 break;
4687 case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_ENABLE):
4688 le_df_set_cl_cte_enable(cmd, evt);
4689 break;
4690 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
4691 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
4692 case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_SAMPLING_ENABLE):
4693 le_df_set_cl_iq_sampling_enable(cmd, evt);
4694 break;
4695 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
4696 case BT_OCF(BT_HCI_OP_LE_READ_ANT_INFO):
4697 le_df_read_ant_inf(cmd, evt);
4698 break;
4699 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
4700 case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_TX_PARAMS):
4701 le_df_set_conn_cte_tx_params(cmd, evt);
4702 break;
4703 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
4704 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
4705 case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_RX_PARAMS):
4706 le_df_set_conn_cte_rx_params(cmd, evt);
4707 break;
4708 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
4709 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
4710 case BT_OCF(BT_HCI_OP_LE_CONN_CTE_REQ_ENABLE):
4711 le_df_set_conn_cte_req_enable(cmd, evt);
4712 break;
4713 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
4714 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
4715 case BT_OCF(BT_HCI_OP_LE_CONN_CTE_RSP_ENABLE):
4716 le_df_set_conn_cte_rsp_enable(cmd, evt);
4717 break;
4718 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
4719 #endif /* CONFIG_BT_CTLR_DF */
4720
4721 #if defined(CONFIG_BT_CTLR_DTM_HCI)
4722 case BT_OCF(BT_HCI_OP_LE_RX_TEST):
4723 le_rx_test(cmd, evt);
4724 break;
4725 case BT_OCF(BT_HCI_OP_LE_TX_TEST):
4726 le_tx_test(cmd, evt);
4727 break;
4728 case BT_OCF(BT_HCI_OP_LE_TEST_END):
4729 le_test_end(cmd, evt);
4730 break;
4731 case BT_OCF(BT_HCI_OP_LE_ENH_RX_TEST):
4732 le_enh_rx_test(cmd, evt);
4733 break;
4734 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
4735 case BT_OCF(BT_HCI_OP_LE_RX_TEST_V3):
4736 le_rx_test_v3(cmd, evt);
4737 break;
4738 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
4739 case BT_OCF(BT_HCI_OP_LE_ENH_TX_TEST):
4740 le_enh_tx_test(cmd, evt);
4741 break;
4742 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
4743 case BT_OCF(BT_HCI_OP_LE_TX_TEST_V3):
4744 le_tx_test_v3(cmd, evt);
4745 break;
4746 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
4747 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
4748 case BT_OCF(BT_HCI_OP_LE_TX_TEST_V4):
4749 le_tx_test_v4(cmd, evt);
4750 break;
4751 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
4752 #endif /* CONFIG_BT_CTLR_DTM_HCI */
4753
4754 default:
4755 return -EINVAL;
4756 }
4757
4758 return 0;
4759 }
4760
4761 /* If Zephyr VS HCI commands are not enabled provide this functionality directly
4762 */
4763 #if !defined(CONFIG_BT_HCI_VS_EXT)
4764 uint8_t bt_read_static_addr(struct bt_hci_vs_static_addr addrs[], uint8_t size)
4765 {
4766 return hci_vendor_read_static_addr(addrs, size);
4767 }
4768 #endif /* !defined(CONFIG_BT_HCI_VS_EXT) */
4769
4770
4771 #if defined(CONFIG_BT_HCI_VS)
4772 static void vs_read_version_info(struct net_buf *buf, struct net_buf **evt)
4773 {
4774 struct bt_hci_rp_vs_read_version_info *rp;
4775
4776 rp = hci_cmd_complete(evt, sizeof(*rp));
4777
4778 rp->status = 0x00;
4779 rp->hw_platform = sys_cpu_to_le16(BT_HCI_VS_HW_PLAT);
4780 rp->hw_variant = sys_cpu_to_le16(BT_HCI_VS_HW_VAR);
4781
4782 rp->fw_variant = 0U;
4783 rp->fw_version = (KERNEL_VERSION_MAJOR & 0xff);
4784 rp->fw_revision = sys_cpu_to_le16(KERNEL_VERSION_MINOR);
4785 rp->fw_build = sys_cpu_to_le32(KERNEL_PATCHLEVEL & 0xffff);
4786 }
4787
4788 static void vs_read_supported_commands(struct net_buf *buf,
4789 struct net_buf **evt)
4790 {
4791 struct bt_hci_rp_vs_read_supported_commands *rp;
4792
4793 rp = hci_cmd_complete(evt, sizeof(*rp));
4794
4795 rp->status = 0x00;
4796 (void)memset(&rp->commands[0], 0, sizeof(rp->commands));
4797
4798 /* Set Version Information, Supported Commands, Supported Features. */
4799 rp->commands[0] |= BIT(0) | BIT(1) | BIT(2);
4800 #if defined(CONFIG_BT_HCI_VS_EXT)
4801 /* Write BD_ADDR, Read Build Info */
4802 rp->commands[0] |= BIT(5) | BIT(7);
4803 /* Read Static Addresses, Read Key Hierarchy Roots */
4804 rp->commands[1] |= BIT(0) | BIT(1);
4805 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
4806 /* Write Tx Power, Read Tx Power */
4807 rp->commands[1] |= BIT(5) | BIT(6);
4808 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
4809 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
4810 /* Read Supported USB Transport Modes */
4811 rp->commands[1] |= BIT(7);
4812 /* Set USB Transport Mode */
4813 rp->commands[2] |= BIT(0);
4814 #endif /* USB_DEVICE_BLUETOOTH_VS_H4 */
4815 #endif /* CONFIG_BT_HCI_VS_EXT */
4816 }
4817
4818 static void vs_read_supported_features(struct net_buf *buf,
4819 struct net_buf **evt)
4820 {
4821 struct bt_hci_rp_vs_read_supported_features *rp;
4822
4823 rp = hci_cmd_complete(evt, sizeof(*rp));
4824
4825 rp->status = 0x00;
4826 (void)memset(&rp->features[0], 0x00, sizeof(rp->features));
4827 }
4828
4829 uint8_t __weak hci_vendor_read_static_addr(struct bt_hci_vs_static_addr addrs[],
4830 uint8_t size)
4831 {
4832 ARG_UNUSED(addrs);
4833 ARG_UNUSED(size);
4834
4835 return 0;
4836 }
4837
4838 #if defined(CONFIG_BT_HCI_VS_EXT)
4839 static void vs_write_bd_addr(struct net_buf *buf, struct net_buf **evt)
4840 {
4841 struct bt_hci_cp_vs_write_bd_addr *cmd = (void *)buf->data;
4842
4843 ll_addr_set(0, &cmd->bdaddr.val[0]);
4844
4845 *evt = cmd_complete_status(0x00);
4846 }
4847
4848 static void vs_read_build_info(struct net_buf *buf, struct net_buf **evt)
4849 {
4850 struct bt_hci_rp_vs_read_build_info *rp;
4851
4852 #define HCI_VS_BUILD_INFO "Zephyr OS v" \
4853 KERNEL_VERSION_STRING CONFIG_BT_CTLR_HCI_VS_BUILD_INFO
4854
4855 const char build_info[] = HCI_VS_BUILD_INFO;
4856
4857 #define BUILD_INFO_EVT_LEN (sizeof(struct bt_hci_evt_hdr) + \
4858 sizeof(struct bt_hci_evt_cmd_complete) + \
4859 sizeof(struct bt_hci_rp_vs_read_build_info) + \
4860 sizeof(build_info))
4861
4862 BUILD_ASSERT(CONFIG_BT_BUF_EVT_RX_SIZE >= BUILD_INFO_EVT_LEN);
4863
4864 rp = hci_cmd_complete(evt, sizeof(*rp) + sizeof(build_info));
4865 rp->status = 0x00;
4866 memcpy(rp->info, build_info, sizeof(build_info));
4867 }
4868
4869 void __weak hci_vendor_read_key_hierarchy_roots(uint8_t ir[16], uint8_t er[16])
4870 {
4871 /* Mark IR as invalid */
4872 (void)memset(ir, 0x00, 16);
4873
4874 /* Mark ER as invalid */
4875 (void)memset(er, 0x00, 16);
4876 }
4877
4878 static void vs_read_static_addrs(struct net_buf *buf, struct net_buf **evt)
4879 {
4880 struct bt_hci_rp_vs_read_static_addrs *rp;
4881
4882 rp = hci_cmd_complete(evt, sizeof(*rp) +
4883 sizeof(struct bt_hci_vs_static_addr));
4884 rp->status = 0x00;
4885 rp->num_addrs = hci_vendor_read_static_addr(rp->a, 1);
4886 }
4887
4888 static void vs_read_key_hierarchy_roots(struct net_buf *buf,
4889 struct net_buf **evt)
4890 {
4891 struct bt_hci_rp_vs_read_key_hierarchy_roots *rp;
4892
4893 rp = hci_cmd_complete(evt, sizeof(*rp));
4894 rp->status = 0x00;
4895 hci_vendor_read_key_hierarchy_roots(rp->ir, rp->er);
4896 }
4897
4898 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
4899 static void vs_set_min_used_chans(struct net_buf *buf, struct net_buf **evt)
4900 {
4901 struct bt_hci_cp_vs_set_min_num_used_chans *cmd = (void *)buf->data;
4902 uint16_t handle = sys_le16_to_cpu(cmd->handle);
4903 uint8_t status;
4904
4905 status = ll_set_min_used_chans(handle, cmd->phys, cmd->min_used_chans);
4906
4907 *evt = cmd_complete_status(status);
4908 }
4909 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
4910
4911 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
4912 static void vs_write_tx_power_level(struct net_buf *buf, struct net_buf **evt)
4913 {
4914 struct bt_hci_cp_vs_write_tx_power_level *cmd = (void *)buf->data;
4915 struct bt_hci_rp_vs_write_tx_power_level *rp;
4916 uint8_t handle_type;
4917 uint16_t handle;
4918 uint8_t status;
4919
4920 handle_type = cmd->handle_type;
4921 handle = sys_le16_to_cpu(cmd->handle);
4922
4923 rp = hci_cmd_complete(evt, sizeof(*rp));
4924 rp->selected_tx_power = cmd->tx_power_level;
4925
4926 status = ll_tx_pwr_lvl_set(handle_type, handle, &rp->selected_tx_power);
4927
4928 rp->status = status;
4929 rp->handle_type = handle_type;
4930 rp->handle = sys_cpu_to_le16(handle);
4931 }
4932
4933 static void vs_read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
4934 {
4935 struct bt_hci_cp_vs_read_tx_power_level *cmd = (void *)buf->data;
4936 struct bt_hci_rp_vs_read_tx_power_level *rp;
4937 uint8_t handle_type;
4938 uint16_t handle;
4939 uint8_t status;
4940
4941 handle_type = cmd->handle_type;
4942 handle = sys_le16_to_cpu(cmd->handle);
4943
4944 rp = hci_cmd_complete(evt, sizeof(*rp));
4945
4946 status = ll_tx_pwr_lvl_get(handle_type, handle, 0, &rp->tx_power_level);
4947
4948 rp->status = status;
4949 rp->handle_type = handle_type;
4950 rp->handle = sys_cpu_to_le16(handle);
4951 }
4952 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
4953
4954 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
4955 /* A memory pool for vandor specific events for fatal error reporting purposes. */
4956 NET_BUF_POOL_FIXED_DEFINE(vs_err_tx_pool, 1, BT_BUF_EVT_RX_SIZE, 8, NULL);
4957
4958 /* The alias for convenience of Controller HCI implementation. Controller is build for
4959 * a particular architecture hence the alias will allow to avoid conditional compilation.
4960 * Host may be not aware of hardware architecture the Controller is working on, hence
4961 * all CPU data types for supported architectures should be available during build, hence
4962 * the alias is defined here.
4963 */
4964 #if defined(CONFIG_CPU_CORTEX_M)
4965 typedef struct bt_hci_vs_fata_error_cpu_data_cortex_m bt_hci_vs_fatal_error_cpu_data;
4966
4967 static void vs_err_fatal_cpu_data_fill(bt_hci_vs_fatal_error_cpu_data *cpu_data,
4968 const z_arch_esf_t *esf)
4969 {
4970 cpu_data->a1 = sys_cpu_to_le32(esf->basic.a1);
4971 cpu_data->a2 = sys_cpu_to_le32(esf->basic.a2);
4972 cpu_data->a3 = sys_cpu_to_le32(esf->basic.a3);
4973 cpu_data->a4 = sys_cpu_to_le32(esf->basic.a4);
4974 cpu_data->ip = sys_cpu_to_le32(esf->basic.ip);
4975 cpu_data->lr = sys_cpu_to_le32(esf->basic.lr);
4976 cpu_data->xpsr = sys_cpu_to_le32(esf->basic.xpsr);
4977 }
4978 #endif /* CONFIG_CPU_CORTEX_M */
4979
4980 static struct net_buf *vs_err_evt_create(uint8_t subevt, uint8_t len)
4981 {
4982 struct net_buf *buf;
4983
4984 buf = net_buf_alloc(&vs_err_tx_pool, K_FOREVER);
4985 if (buf) {
4986 struct bt_hci_evt_le_meta_event *me;
4987 struct bt_hci_evt_hdr *hdr;
4988
4989 net_buf_reserve(buf, BT_BUF_RESERVE);
4990 bt_buf_set_type(buf, BT_BUF_EVT);
4991
4992 hdr = net_buf_add(buf, sizeof(*hdr));
4993 hdr->evt = BT_HCI_EVT_VENDOR;
4994 hdr->len = len + sizeof(*me);
4995
4996 me = net_buf_add(buf, sizeof(*me));
4997 me->subevent = subevt;
4998 }
4999
5000 return buf;
5001 }
5002
5003 struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const z_arch_esf_t *esf)
5004 {
5005 /* Prepare vendor specific HCI Fatal Error event */
5006 struct bt_hci_vs_fatal_error_stack_frame *sf;
5007 bt_hci_vs_fatal_error_cpu_data *cpu_data;
5008 struct net_buf *buf;
5009
5010 buf = vs_err_evt_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_STACK_FRAME,
5011 sizeof(*sf) + sizeof(*cpu_data));
5012 if (buf != NULL) {
5013 sf = net_buf_add(buf, (sizeof(*sf) + sizeof(*cpu_data)));
5014 sf->reason = sys_cpu_to_le32(reason);
5015 sf->cpu_type = BT_HCI_EVT_VS_ERROR_CPU_TYPE_CORTEX_M;
5016
5017 vs_err_fatal_cpu_data_fill(
5018 (bt_hci_vs_fatal_error_cpu_data *)sf->cpu_data, esf);
5019 } else {
5020 LOG_ERR("Can't create HCI Fatal Error event");
5021 }
5022
5023 return buf;
5024 }
5025
5026 static struct net_buf *hci_vs_err_trace_create(uint8_t data_type,
5027 const char *file_path,
5028 uint32_t line, uint64_t pc)
5029 {
5030 uint32_t file_name_len = 0U, pos = 0U;
5031 struct net_buf *buf = NULL;
5032
5033 if (file_path) {
5034 /* Extract file name from a path */
5035 while (file_path[file_name_len] != '\0') {
5036 if (file_path[file_name_len] == '/') {
5037 pos = file_name_len + 1;
5038 }
5039 file_name_len++;
5040 }
5041 file_path += pos;
5042 file_name_len -= pos;
5043
5044 /* If file name was found in file_path, in other words: file_path is not empty
5045 * string and is not `foo/bar/`.
5046 */
5047 if (file_name_len) {
5048 /* Total data length: len = file name strlen + \0 + sizeof(line number)
5049 * Maximum length of an HCI event data is BT_BUF_EVT_RX_SIZE. If total data
5050 * length exceeds this maximum, truncate file name.
5051 */
5052 uint32_t data_len = 1 + sizeof(line);
5053
5054 /* If a buffer is created for a TRACE data, include sizeof(pc) in total
5055 * length.
5056 */
5057 if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5058 data_len += sizeof(pc);
5059 }
5060
5061 if (data_len + file_name_len > BT_BUF_EVT_RX_SIZE) {
5062 uint32_t overflow_len =
5063 file_name_len + data_len - BT_BUF_EVT_RX_SIZE;
5064
5065 /* Truncate the file name length by number of overflow bytes */
5066 file_name_len -= overflow_len;
5067 }
5068
5069 /* Get total event data length including file name length */
5070 data_len += file_name_len;
5071
5072 /* Prepare vendor specific HCI Fatal Error event */
5073 buf = vs_err_evt_create(data_type, data_len);
5074 if (buf != NULL) {
5075 if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5076 net_buf_add_le64(buf, pc);
5077 }
5078 net_buf_add_mem(buf, file_path, file_name_len);
5079 net_buf_add_u8(buf, STR_NULL_TERMINATOR);
5080 net_buf_add_le32(buf, line);
5081 } else {
5082 LOG_ERR("Can't create HCI Fatal Error event");
5083 }
5084 }
5085 }
5086
5087 return buf;
5088 }
5089
5090 struct net_buf *hci_vs_err_trace(const char *file, uint32_t line, uint64_t pc)
5091 {
5092 return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE, file, line, pc);
5093 }
5094
5095 struct net_buf *hci_vs_err_assert(const char *file, uint32_t line)
5096 {
5097 /* ASSERT data does not contain PC counter, because of that zero constant is used */
5098 return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_CTRL_ASSERT, file, line, 0U);
5099 }
5100 #endif /* CONFIG_BT_HCI_VS_FATAL_ERROR */
5101
5102 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
5103 static void vs_le_df_connectionless_iq_report(struct pdu_data *pdu_rx, struct node_rx_pdu *node_rx,
5104 struct net_buf *buf)
5105 {
5106 struct bt_hci_evt_vs_le_connectionless_iq_report *sep;
5107 struct node_rx_iq_report *iq_report;
5108 struct lll_sync *lll;
5109 uint8_t samples_cnt;
5110 int16_t rssi;
5111 uint16_t sync_handle;
5112 uint16_t per_evt_counter;
5113 struct ll_sync_set *sync = NULL;
5114
5115 iq_report = (struct node_rx_iq_report *)node_rx;
5116
5117 if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTIONLESS_IQ_REPORT)) {
5118 return;
5119 }
5120
5121 lll = iq_report->hdr.rx_ftr.param;
5122
5123 sync = HDR_LLL2ULL(lll);
5124
5125 /* TX LL thread has higher priority than RX thread. It may happen that
5126 * host successfully disables CTE sampling in the meantime.
5127 * It should be verified here, to avoid reporting IQ samples after
5128 * the functionality was disabled or if sync was lost.
5129 */
5130 if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) || !sync->timeout_reload) {
5131 /* Drop further processing of the event. */
5132 return;
5133 }
5134
5135 /* Get the sync handle corresponding to the LLL context passed in the
5136 * node rx footer field.
5137 */
5138 sync_handle = ull_sync_handle_get(sync);
5139 per_evt_counter = iq_report->event_counter;
5140
5141 /* If packet status does not indicate insufficient resources for IQ samples and for
5142 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5143 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE
5144 * value.
5145 */
5146 if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5147 samples_cnt = 0U;
5148 } else {
5149 samples_cnt = MAX(1, iq_report->sample_count);
5150 }
5151
5152 sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
5153 (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5154
5155 rssi = RSSI_DBM_TO_DECI_DBM(iq_report->hdr.rx_ftr.rssi);
5156
5157 sep->sync_handle = sys_cpu_to_le16(sync_handle);
5158 sep->rssi = sys_cpu_to_le16(rssi);
5159 sep->rssi_ant_id = iq_report->rssi_ant_id;
5160 sep->cte_type = iq_report->cte_info.type;
5161
5162 sep->chan_idx = iq_report->chan_idx;
5163 sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
5164
5165 if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5166 sep->slot_durations = iq_report->local_slot_durations;
5167 } else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5168 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5169 } else {
5170 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5171 }
5172
5173 sep->packet_status = iq_report->packet_status;
5174
5175 if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5176 if (iq_report->sample_count == 0U) {
5177 sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5178 sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5179 } else {
5180 for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5181 sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5182 sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5183 }
5184 }
5185 }
5186
5187 sep->sample_count = samples_cnt;
5188 }
5189 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
5190
5191 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
5192 static void vs_le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
5193 {
5194 struct bt_hci_evt_vs_le_connection_iq_report *sep;
5195 struct node_rx_iq_report *iq_report;
5196 struct lll_conn *lll;
5197 uint8_t samples_cnt;
5198 uint8_t phy_rx;
5199 int16_t rssi;
5200
5201 iq_report = (struct node_rx_iq_report *)node_rx;
5202
5203 if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTION_IQ_REPORT)) {
5204 return;
5205 }
5206
5207 lll = iq_report->hdr.rx_ftr.param;
5208
5209 #if defined(CONFIG_BT_CTLR_PHY)
5210 phy_rx = lll->phy_rx;
5211
5212 /* Make sure the report is generated for connection on PHY UNCODED */
5213 LL_ASSERT(phy_rx != PHY_CODED);
5214 #else
5215 phy_rx = PHY_1M;
5216 #endif /* CONFIG_BT_CTLR_PHY */
5217
5218 /* TX LL thread has higher priority than RX thread. It may happen that host succefully
5219 * disables CTE sampling in the meantime. It should be verified here, to avoid reporing
5220 * IQ samples after the functionality was disabled.
5221 */
5222 if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
5223 /* Dropp further processing of the event. */
5224 return;
5225 }
5226
5227 /* If packet status does not indicate insufficient resources for IQ samples and for
5228 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5229 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE value.
5230 */
5231 if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5232 samples_cnt = 0U;
5233 } else {
5234 samples_cnt = MAX(1, iq_report->sample_count);
5235 }
5236
5237 sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT,
5238 (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5239
5240 rssi = RSSI_DBM_TO_DECI_DBM(iq_report->hdr.rx_ftr.rssi);
5241
5242 sep->conn_handle = sys_cpu_to_le16(iq_report->hdr.handle);
5243 sep->rx_phy = phy_rx;
5244 sep->rssi = sys_cpu_to_le16(rssi);
5245 sep->rssi_ant_id = iq_report->rssi_ant_id;
5246 sep->cte_type = iq_report->cte_info.type;
5247
5248 sep->data_chan_idx = iq_report->chan_idx;
5249 sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
5250
5251 if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5252 sep->slot_durations = iq_report->local_slot_durations;
5253 } else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5254 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5255 } else {
5256 sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5257 }
5258
5259 sep->packet_status = iq_report->packet_status;
5260
5261 if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5262 if (iq_report->sample_count == 0U) {
5263 sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5264 sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5265 } else {
5266 for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5267 sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5268 sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5269 }
5270 }
5271 }
5272
5273 sep->sample_count = samples_cnt;
5274 }
5275 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
5276
5277 #endif /* CONFIG_BT_HCI_VS_EXT */
5278
5279 #if defined(CONFIG_BT_HCI_MESH_EXT)
5280 static void mesh_get_opts(struct net_buf *buf, struct net_buf **evt)
5281 {
5282 struct bt_hci_rp_mesh_get_opts *rp;
5283
5284 rp = hci_cmd_complete(evt, sizeof(*rp));
5285
5286 rp->status = 0x00;
5287 rp->opcode = BT_HCI_OC_MESH_GET_OPTS;
5288
5289 rp->revision = BT_HCI_MESH_REVISION;
5290 rp->ch_map = 0x7;
5291 /*@todo: nRF51 only */
5292 rp->min_tx_power = -30;
5293 /*@todo: nRF51 only */
5294 rp->max_tx_power = 4;
5295 rp->max_scan_filter = CONFIG_BT_CTLR_MESH_SCAN_FILTERS;
5296 rp->max_filter_pattern = CONFIG_BT_CTLR_MESH_SF_PATTERNS;
5297 rp->max_adv_slot = 1U;
5298 rp->evt_prefix_len = 0x01;
5299 rp->evt_prefix = BT_HCI_MESH_EVT_PREFIX;
5300 }
5301
5302 static void mesh_set_scan_filter(struct net_buf *buf, struct net_buf **evt)
5303 {
5304 struct bt_hci_cp_mesh_set_scan_filter *cmd = (void *)buf->data;
5305 struct bt_hci_rp_mesh_set_scan_filter *rp;
5306 uint8_t filter = cmd->scan_filter - 1;
5307 struct scan_filter *f;
5308 uint8_t status = 0x00;
5309 uint8_t i;
5310
5311 if (filter > ARRAY_SIZE(scan_filters) ||
5312 cmd->num_patterns > CONFIG_BT_CTLR_MESH_SF_PATTERNS) {
5313 status = BT_HCI_ERR_INVALID_PARAM;
5314 goto exit;
5315 }
5316
5317 if (filter == sf_curr) {
5318 status = BT_HCI_ERR_CMD_DISALLOWED;
5319 goto exit;
5320 }
5321
5322 /* duplicate filtering not supported yet */
5323 if (cmd->filter_dup) {
5324 status = BT_HCI_ERR_INVALID_PARAM;
5325 goto exit;
5326 }
5327
5328 f = &scan_filters[filter];
5329 for (i = 0U; i < cmd->num_patterns; i++) {
5330 if (!cmd->patterns[i].pattern_len ||
5331 cmd->patterns[i].pattern_len >
5332 BT_HCI_MESH_PATTERN_LEN_MAX) {
5333 status = BT_HCI_ERR_INVALID_PARAM;
5334 goto exit;
5335 }
5336 f->lengths[i] = cmd->patterns[i].pattern_len;
5337 memcpy(f->patterns[i], cmd->patterns[i].pattern, f->lengths[i]);
5338 }
5339
5340 f->count = cmd->num_patterns;
5341
5342 exit:
5343 rp = hci_cmd_complete(evt, sizeof(*rp));
5344 rp->status = status;
5345 rp->opcode = BT_HCI_OC_MESH_SET_SCAN_FILTER;
5346 rp->scan_filter = filter + 1;
5347 }
5348
5349 static void mesh_advertise(struct net_buf *buf, struct net_buf **evt)
5350 {
5351 struct bt_hci_cp_mesh_advertise *cmd = (void *)buf->data;
5352 struct bt_hci_rp_mesh_advertise *rp;
5353 uint8_t adv_slot = cmd->adv_slot;
5354 uint8_t status;
5355
5356 status = ll_mesh_advertise(adv_slot,
5357 cmd->own_addr_type, cmd->random_addr.val,
5358 cmd->ch_map, cmd->tx_power,
5359 cmd->min_tx_delay, cmd->max_tx_delay,
5360 cmd->retx_count, cmd->retx_interval,
5361 cmd->scan_duration, cmd->scan_delay,
5362 cmd->scan_filter, cmd->data_len, cmd->data);
5363 if (!status) {
5364 /* Yields 0xFF if no scan filter selected */
5365 sf_curr = cmd->scan_filter - 1;
5366 }
5367
5368 rp = hci_cmd_complete(evt, sizeof(*rp));
5369 rp->status = status;
5370 rp->opcode = BT_HCI_OC_MESH_ADVERTISE;
5371 rp->adv_slot = adv_slot;
5372 }
5373
5374 static void mesh_advertise_cancel(struct net_buf *buf, struct net_buf **evt)
5375 {
5376 struct bt_hci_cp_mesh_advertise_cancel *cmd = (void *)buf->data;
5377 struct bt_hci_rp_mesh_advertise_cancel *rp;
5378 uint8_t adv_slot = cmd->adv_slot;
5379 uint8_t status;
5380
5381 status = ll_mesh_advertise_cancel(adv_slot);
5382 if (!status) {
5383 /* Yields 0xFF if no scan filter selected */
5384 sf_curr = 0xFF;
5385 }
5386
5387 rp = hci_cmd_complete(evt, sizeof(*rp));
5388 rp->status = status;
5389 rp->opcode = BT_HCI_OC_MESH_ADVERTISE_CANCEL;
5390 rp->adv_slot = adv_slot;
5391 }
5392
5393 static int mesh_cmd_handle(struct net_buf *cmd, struct net_buf **evt)
5394 {
5395 struct bt_hci_cp_mesh *cp_mesh;
5396 uint8_t mesh_op;
5397
5398 if (cmd->len < sizeof(*cp_mesh)) {
5399 LOG_ERR("No HCI VSD Command header");
5400 return -EINVAL;
5401 }
5402
5403 cp_mesh = net_buf_pull_mem(cmd, sizeof(*cp_mesh));
5404 mesh_op = cp_mesh->opcode;
5405
5406 switch (mesh_op) {
5407 case BT_HCI_OC_MESH_GET_OPTS:
5408 mesh_get_opts(cmd, evt);
5409 break;
5410
5411 case BT_HCI_OC_MESH_SET_SCAN_FILTER:
5412 mesh_set_scan_filter(cmd, evt);
5413 break;
5414
5415 case BT_HCI_OC_MESH_ADVERTISE:
5416 mesh_advertise(cmd, evt);
5417 break;
5418
5419 case BT_HCI_OC_MESH_ADVERTISE_CANCEL:
5420 mesh_advertise_cancel(cmd, evt);
5421 break;
5422
5423 default:
5424 return -EINVAL;
5425 }
5426
5427 return 0;
5428 }
5429 #endif /* CONFIG_BT_HCI_MESH_EXT */
5430
5431 int hci_vendor_cmd_handle_common(uint16_t ocf, struct net_buf *cmd,
5432 struct net_buf **evt)
5433 {
5434 switch (ocf) {
5435 case BT_OCF(BT_HCI_OP_VS_READ_VERSION_INFO):
5436 vs_read_version_info(cmd, evt);
5437 break;
5438
5439 case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS):
5440 vs_read_supported_commands(cmd, evt);
5441 break;
5442
5443 case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES):
5444 vs_read_supported_features(cmd, evt);
5445 break;
5446
5447 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
5448 case BT_OCF(BT_HCI_OP_VS_READ_USB_TRANSPORT_MODE):
5449 break;
5450 case BT_OCF(BT_HCI_OP_VS_SET_USB_TRANSPORT_MODE):
5451 reset(cmd, evt);
5452 break;
5453 #endif /* CONFIG_USB_DEVICE_BLUETOOTH_VS_H4 */
5454
5455 #if defined(CONFIG_BT_HCI_VS_EXT)
5456 case BT_OCF(BT_HCI_OP_VS_READ_BUILD_INFO):
5457 vs_read_build_info(cmd, evt);
5458 break;
5459
5460 case BT_OCF(BT_HCI_OP_VS_WRITE_BD_ADDR):
5461 vs_write_bd_addr(cmd, evt);
5462 break;
5463
5464 case BT_OCF(BT_HCI_OP_VS_READ_STATIC_ADDRS):
5465 vs_read_static_addrs(cmd, evt);
5466 break;
5467
5468 case BT_OCF(BT_HCI_OP_VS_READ_KEY_HIERARCHY_ROOTS):
5469 vs_read_key_hierarchy_roots(cmd, evt);
5470 break;
5471
5472 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5473 case BT_OCF(BT_HCI_OP_VS_WRITE_TX_POWER_LEVEL):
5474 vs_write_tx_power_level(cmd, evt);
5475 break;
5476
5477 case BT_OCF(BT_HCI_OP_VS_READ_TX_POWER_LEVEL):
5478 vs_read_tx_power_level(cmd, evt);
5479 break;
5480 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5481
5482 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
5483 case BT_OCF(BT_HCI_OP_VS_SET_MIN_NUM_USED_CHANS):
5484 vs_set_min_used_chans(cmd, evt);
5485 break;
5486 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
5487 #endif /* CONFIG_BT_HCI_VS_EXT */
5488
5489 #if defined(CONFIG_BT_HCI_MESH_EXT)
5490 case BT_OCF(BT_HCI_OP_VS_MESH):
5491 mesh_cmd_handle(cmd, evt);
5492 break;
5493 #endif /* CONFIG_BT_HCI_MESH_EXT */
5494
5495 default:
5496 return -EINVAL;
5497 }
5498
5499 return 0;
5500 }
5501 #endif
5502
5503 struct net_buf *hci_cmd_handle(struct net_buf *cmd, void **node_rx)
5504 {
5505 struct bt_hci_cmd_hdr *chdr;
5506 struct net_buf *evt = NULL;
5507 uint16_t ocf;
5508 int err;
5509
5510 if (cmd->len < sizeof(*chdr)) {
5511 LOG_ERR("No HCI Command header");
5512 return NULL;
5513 }
5514
5515 chdr = net_buf_pull_mem(cmd, sizeof(*chdr));
5516 if (cmd->len < chdr->param_len) {
5517 LOG_ERR("Invalid HCI CMD packet length");
5518 return NULL;
5519 }
5520
5521 /* store in a global for later CC/CS event creation */
5522 _opcode = sys_le16_to_cpu(chdr->opcode);
5523
5524 ocf = BT_OCF(_opcode);
5525
5526 switch (BT_OGF(_opcode)) {
5527 case BT_OGF_LINK_CTRL:
5528 err = link_control_cmd_handle(ocf, cmd, &evt);
5529 break;
5530 case BT_OGF_BASEBAND:
5531 err = ctrl_bb_cmd_handle(ocf, cmd, &evt);
5532 break;
5533 case BT_OGF_INFO:
5534 err = info_cmd_handle(ocf, cmd, &evt);
5535 break;
5536 case BT_OGF_STATUS:
5537 err = status_cmd_handle(ocf, cmd, &evt);
5538 break;
5539 case BT_OGF_LE:
5540 err = controller_cmd_handle(ocf, cmd, &evt, node_rx);
5541 break;
5542 #if defined(CONFIG_BT_HCI_VS)
5543 case BT_OGF_VS:
5544 err = hci_vendor_cmd_handle(ocf, cmd, &evt);
5545 break;
5546 #endif
5547 default:
5548 err = -EINVAL;
5549 break;
5550 }
5551
5552 if (err == -EINVAL) {
5553 evt = cmd_status(BT_HCI_ERR_UNKNOWN_CMD);
5554 }
5555
5556 return evt;
5557 }
5558
5559 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
5560 defined(CONFIG_BT_CTLR_CONN_ISO)
5561 static void data_buf_overflow(struct net_buf **buf, uint8_t link_type)
5562 {
5563 struct bt_hci_evt_data_buf_overflow *ep;
5564
5565 if (!(event_mask & BT_EVT_MASK_DATA_BUFFER_OVERFLOW)) {
5566 return;
5567 }
5568
5569 *buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
5570 hci_evt_create(*buf, BT_HCI_EVT_DATA_BUF_OVERFLOW, sizeof(*ep));
5571 ep = net_buf_add(*buf, sizeof(*ep));
5572
5573 ep->link_type = link_type;
5574 }
5575 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_SYNC_ISO ||
5576 * CONFIG_BT_CTLR_CONN_ISO
5577 */
5578
5579 #if defined(CONFIG_BT_CONN)
5580 int hci_acl_handle(struct net_buf *buf, struct net_buf **evt)
5581 {
5582 struct node_tx *node_tx;
5583 struct bt_hci_acl_hdr *acl;
5584 struct pdu_data *pdu_data;
5585 uint16_t handle;
5586 uint8_t flags;
5587 uint16_t len;
5588
5589 *evt = NULL;
5590
5591 if (buf->len < sizeof(*acl)) {
5592 LOG_ERR("No HCI ACL header");
5593 return -EINVAL;
5594 }
5595
5596 acl = net_buf_pull_mem(buf, sizeof(*acl));
5597 len = sys_le16_to_cpu(acl->len);
5598 handle = sys_le16_to_cpu(acl->handle);
5599
5600 if (buf->len < len) {
5601 LOG_ERR("Invalid HCI ACL packet length");
5602 return -EINVAL;
5603 }
5604
5605 if (len > LL_LENGTH_OCTETS_TX_MAX) {
5606 LOG_ERR("Invalid HCI ACL Data length");
5607 return -EINVAL;
5608 }
5609
5610 /* assigning flags first because handle will be overwritten */
5611 flags = bt_acl_flags(handle);
5612 handle = bt_acl_handle(handle);
5613
5614 node_tx = ll_tx_mem_acquire();
5615 if (!node_tx) {
5616 LOG_ERR("Tx Buffer Overflow");
5617 data_buf_overflow(evt, BT_OVERFLOW_LINK_ACL);
5618 return -ENOBUFS;
5619 }
5620
5621 pdu_data = (void *)node_tx->pdu;
5622
5623 if (bt_acl_flags_bc(flags) != BT_ACL_POINT_TO_POINT) {
5624 return -EINVAL;
5625 }
5626
5627 switch (bt_acl_flags_pb(flags)) {
5628 case BT_ACL_START_NO_FLUSH:
5629 pdu_data->ll_id = PDU_DATA_LLID_DATA_START;
5630 break;
5631 case BT_ACL_CONT:
5632 pdu_data->ll_id = PDU_DATA_LLID_DATA_CONTINUE;
5633 break;
5634 default:
5635 /* BT_ACL_START and BT_ACL_COMPLETE not allowed on LE-U
5636 * from Host to Controller
5637 */
5638 return -EINVAL;
5639 }
5640
5641 pdu_data->len = len;
5642 memcpy(&pdu_data->lldata[0], buf->data, len);
5643
5644 if (ll_tx_mem_enqueue(handle, node_tx)) {
5645 LOG_ERR("Invalid Tx Enqueue");
5646 ll_tx_mem_release(node_tx);
5647 return -EINVAL;
5648 }
5649
5650 return 0;
5651 }
5652 #endif /* CONFIG_BT_CONN */
5653
5654 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
5655 int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
5656 {
5657 struct bt_hci_iso_data_hdr *iso_data_hdr;
5658 struct isoal_sdu_tx sdu_frag_tx;
5659 struct bt_hci_iso_hdr *iso_hdr;
5660 uint32_t *time_stamp;
5661 uint16_t handle;
5662 uint8_t pb_flag;
5663 uint8_t ts_flag;
5664 uint8_t flags;
5665 uint16_t len;
5666
5667 iso_data_hdr = NULL;
5668 *evt = NULL;
5669
5670 if (buf->len < sizeof(*iso_hdr)) {
5671 LOG_ERR("No HCI ISO header");
5672 return -EINVAL;
5673 }
5674
5675 iso_hdr = net_buf_pull_mem(buf, sizeof(*iso_hdr));
5676 handle = sys_le16_to_cpu(iso_hdr->handle);
5677 len = bt_iso_hdr_len(sys_le16_to_cpu(iso_hdr->len));
5678
5679 if (buf->len < len) {
5680 LOG_ERR("Invalid HCI ISO packet length");
5681 return -EINVAL;
5682 }
5683
5684 /* Assigning flags first because handle will be overwritten */
5685 flags = bt_iso_flags(handle);
5686 pb_flag = bt_iso_flags_pb(flags);
5687 ts_flag = bt_iso_flags_ts(flags);
5688 handle = bt_iso_handle(handle);
5689
5690 /* Extract time stamp */
5691 /* Set default to current time
5692 * BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
5693 * 3.1 Time_Offset in framed PDUs :
5694 * The Controller transmitting a SDU may use any of the following
5695 * methods to determine the value of the SDU reference time:
5696 * -- A captured time stamp of the SDU
5697 * -- A time stamp provided by the higher layer
5698 * -- A computed time stamp based on a sequence counter provided by the
5699 * higher layer
5700 * -- Any other method of determining Time_Offset
5701 * (Uses a timestamp computed from the difference in provided
5702 * timestamps, if the timestamp is deemed not based on the
5703 * controller's clock)
5704 */
5705 sdu_frag_tx.cntr_time_stamp = HAL_TICKER_TICKS_TO_US(ticker_ticks_now_get());
5706 if (ts_flag) {
5707 /* Use HCI provided time stamp */
5708 time_stamp = net_buf_pull_mem(buf, sizeof(*time_stamp));
5709 len -= sizeof(*time_stamp);
5710 sdu_frag_tx.time_stamp = sys_le32_to_cpu(*time_stamp);
5711 } else {
5712 /* Use controller's capture time */
5713 sdu_frag_tx.time_stamp = sdu_frag_tx.cntr_time_stamp;
5714 }
5715
5716 /* Extract ISO data header if included (PB_Flag 0b00 or 0b10) */
5717 if ((pb_flag & 0x01) == 0) {
5718 iso_data_hdr = net_buf_pull_mem(buf, sizeof(*iso_data_hdr));
5719 len -= sizeof(*iso_data_hdr);
5720 sdu_frag_tx.packet_sn = sys_le16_to_cpu(iso_data_hdr->sn);
5721 sdu_frag_tx.iso_sdu_length =
5722 sys_le16_to_cpu(bt_iso_pkt_len(iso_data_hdr->slen));
5723 } else {
5724 sdu_frag_tx.packet_sn = 0;
5725 sdu_frag_tx.iso_sdu_length = 0;
5726 }
5727
5728 /* Packet boudary flags should be bitwise identical to the SDU state
5729 * 0b00 BT_ISO_START
5730 * 0b01 BT_ISO_CONT
5731 * 0b10 BT_ISO_SINGLE
5732 * 0b11 BT_ISO_END
5733 */
5734 sdu_frag_tx.sdu_state = pb_flag;
5735 /* Fill in SDU buffer fields */
5736 sdu_frag_tx.dbuf = buf->data;
5737 sdu_frag_tx.size = len;
5738
5739 if (false) {
5740
5741 #if defined(CONFIG_BT_CTLR_CONN_ISO)
5742 /* Extract source handle from CIS or BIS handle by way of header and
5743 * data path
5744 */
5745 } else if (IS_CIS_HANDLE(handle)) {
5746 struct ll_conn_iso_stream *cis;
5747 struct ll_conn_iso_group *cig;
5748 struct ll_iso_stream_hdr *hdr;
5749 struct ll_iso_datapath *dp_in;
5750
5751 cis = ll_iso_stream_connected_get(handle);
5752 if (!cis) {
5753 return -EINVAL;
5754 }
5755
5756 cig = cis->group;
5757
5758 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
5759 uint64_t event_count;
5760 uint64_t pkt_seq_num;
5761
5762 /* Catch up local pkt_seq_num with internal pkt_seq_num */
5763 event_count = cis->lll.event_count;
5764 pkt_seq_num = event_count + 1U;
5765 /* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
5766 * then we simply check that the pb_flag is an even value, and
5767 * then pkt_seq_num is a future sequence number value compare
5768 * to last recorded number in cis->pkt_seq_num.
5769 *
5770 * When (pkt_seq_num - stream->pkt_seq_num) is negative then
5771 * BIT64(39) will be set (2's compliment value). The diff value
5772 * less than or equal to BIT64_MASK(38) means the diff value is
5773 * positive and hence pkt_seq_num is greater than
5774 * stream->pkt_seq_num. This calculation is valid for when value
5775 * rollover too.
5776 */
5777 if (!(pb_flag & 0x01) &&
5778 (((pkt_seq_num - cis->pkt_seq_num) &
5779 BIT64_MASK(39)) <= BIT64_MASK(38))) {
5780 cis->pkt_seq_num = pkt_seq_num;
5781 } else {
5782 pkt_seq_num = cis->pkt_seq_num;
5783 }
5784
5785 /* Pre-increment, when pg_flag is BT_ISO_SINGLE (0b10) or
5786 * BT_ISO_END (0b11) then we simple check if pb_flag has bit 1
5787 * is set, for next ISO data packet seq num comparison.
5788 */
5789 if (pb_flag & 0x10) {
5790 cis->pkt_seq_num++;
5791 }
5792
5793 /* Target next ISO event to avoid overlapping with, if any,
5794 * current ISO event
5795 */
5796 pkt_seq_num++;
5797 sdu_frag_tx.target_event = pkt_seq_num;
5798 sdu_frag_tx.grp_ref_point =
5799 isoal_get_wrapped_time_us(cig->cig_ref_point,
5800 ((pkt_seq_num - event_count) *
5801 cig->iso_interval *
5802 ISO_INT_UNIT_US));
5803
5804 #else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
5805 uint8_t event_offset;
5806
5807 /* We must ensure sufficient time for ISO-AL to fragment SDU and
5808 * deliver PDUs to the TX queue. By checking ull_ref_get, we
5809 * know if we are within the subevents of an ISO event. If so,
5810 * we can assume that we have enough time to deliver in the next
5811 * ISO event. If we're not active within the ISO event, we don't
5812 * know if there is enough time to deliver in the next event,
5813 * and for safety we set the target to current event + 2.
5814 *
5815 * For FT > 1, we have the opportunity to retransmit in later
5816 * event(s), in which case we have the option to target an
5817 * earlier event (this or next) because being late does not
5818 * instantly flush the payload.
5819 */
5820
5821 event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
5822
5823 if (cis->lll.tx.ft > 1) {
5824 /* FT > 1, target an earlier event */
5825 event_offset -= 1;
5826 }
5827
5828 sdu_frag_tx.target_event = cis->lll.event_count + event_offset;
5829 sdu_frag_tx.grp_ref_point =
5830 isoal_get_wrapped_time_us(cig->cig_ref_point,
5831 (event_offset *
5832 cig->iso_interval *
5833 ISO_INT_UNIT_US));
5834 #endif /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
5835
5836 /* Get controller's input data path for CIS */
5837 hdr = &cis->hdr;
5838 dp_in = hdr->datapath_in;
5839 if (!dp_in || dp_in->path_id != BT_HCI_DATAPATH_ID_HCI) {
5840 LOG_ERR("Input data path not set for HCI");
5841 return -EINVAL;
5842 }
5843
5844 /* Get input data path's source handle */
5845 isoal_source_handle_t source = dp_in->source_hdl;
5846
5847 /* Start Fragmentation */
5848 isoal_status_t isoal_status =
5849 isoal_tx_sdu_fragment(source, &sdu_frag_tx);
5850
5851 if (isoal_status) {
5852 if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
5853 data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
5854 return -ENOBUFS;
5855 }
5856
5857 return -EINVAL;
5858 }
5859
5860 /* TODO: Assign *evt if an immediate response is required */
5861 return 0;
5862 #endif /* CONFIG_BT_CTLR_CONN_ISO */
5863
5864 #if defined(CONFIG_BT_CTLR_ADV_ISO)
5865 } else if (IS_ADV_ISO_HANDLE(handle)) {
5866 struct lll_adv_iso_stream *stream;
5867 struct ll_adv_iso_set *adv_iso;
5868 struct lll_adv_iso *lll_iso;
5869 uint16_t stream_handle;
5870 uint16_t slen;
5871
5872 /* FIXME: Code only expects header present */
5873 slen = iso_data_hdr ? iso_data_hdr->slen : 0;
5874
5875 /* Check invalid BIS PDU length */
5876 if (slen > LL_BIS_OCTETS_TX_MAX) {
5877 LOG_ERR("Invalid HCI ISO Data length");
5878 return -EINVAL;
5879 }
5880
5881 /* Get BIS stream handle and stream context */
5882 stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
5883 stream = ull_adv_iso_stream_get(stream_handle);
5884 if (!stream || !stream->dp) {
5885 LOG_ERR("Invalid BIS stream");
5886 return -EINVAL;
5887 }
5888
5889 adv_iso = ull_adv_iso_by_stream_get(stream_handle);
5890 if (!adv_iso) {
5891 LOG_ERR("No BIG associated with stream handle");
5892 return -EINVAL;
5893 }
5894
5895 lll_iso = &adv_iso->lll;
5896
5897 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
5898 uint64_t event_count;
5899 uint64_t pkt_seq_num;
5900
5901 /* Catch up local pkt_seq_num with internal pkt_seq_num */
5902 event_count = lll_iso->payload_count / lll_iso->bn;
5903 pkt_seq_num = event_count;
5904 /* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
5905 * then we simply check that the pb_flag is an even value, and
5906 * then pkt_seq_num is a future sequence number value compare
5907 * to last recorded number in cis->pkt_seq_num.
5908 *
5909 * When (pkt_seq_num - stream->pkt_seq_num) is negative then
5910 * BIT64(39) will be set (2's compliment value). The diff value
5911 * less than or equal to BIT64_MASK(38) means the diff value is
5912 * positive and hence pkt_seq_num is greater than
5913 * stream->pkt_seq_num. This calculation is valid for when value
5914 * rollover too.
5915 */
5916 if (!(pb_flag & 0x01) &&
5917 (((pkt_seq_num - stream->pkt_seq_num) &
5918 BIT64_MASK(39)) <= BIT64_MASK(38))) {
5919 stream->pkt_seq_num = pkt_seq_num;
5920 } else {
5921 pkt_seq_num = stream->pkt_seq_num;
5922 }
5923
5924 /* Pre-increment, when pg_flag is BT_ISO_SINGLE (0b10) or
5925 * BT_ISO_END (0b11) then we simple check if pb_flag has bit 1
5926 * is set, for next ISO data packet seq num comparison.
5927 */
5928 if (pb_flag & 0x10) {
5929 stream->pkt_seq_num++;
5930 }
5931
5932 /* Target next ISO event to avoid overlapping with, if any,
5933 * current ISO event
5934 */
5935 /* FIXME: Implement ISO Tx ack generation early in done compared
5936 * to currently only in prepare. I.e. to ensure upper
5937 * layer has the number of completed packet before the
5938 * next BIG event, so as to supply new ISO data packets.
5939 * Without which upper layers need extra buffers to
5940 * buffer next ISO data packet.
5941 *
5942 * Enable below increment once early Tx ack is
5943 * implemented.
5944 *
5945 * pkt_seq_num++;
5946 */
5947 sdu_frag_tx.target_event = pkt_seq_num;
5948 sdu_frag_tx.grp_ref_point =
5949 isoal_get_wrapped_time_us(adv_iso->big_ref_point,
5950 (((pkt_seq_num + 1U) -
5951 event_count) *
5952 lll_iso->iso_interval *
5953 ISO_INT_UNIT_US));
5954
5955 #else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
5956 uint8_t target_event;
5957 uint8_t event_offset;
5958
5959 /* Determine the target event and the first event offset after
5960 * datapath setup.
5961 * event_offset mitigates the possibility of first SDU being
5962 * late on the datapath and avoid all subsequent SDUs being
5963 * dropped for a said SDU interval. i.e. upper layer is not
5964 * drifting, say first SDU dropped, hence subsequent SDUs all
5965 * dropped, is mitigated by offsetting the grp_ref_point.
5966 *
5967 * It is ok to do the below for every received ISO data, ISOAL
5968 * will not consider subsequent skewed target_event after the
5969 * first use of target_event value.
5970 *
5971 * In BIG implementation in LLL, payload_count corresponds to
5972 * the next BIG event, hence calculate grp_ref_point for next
5973 * BIG event by incrementing the previous elapsed big_ref_point
5974 * by one additional ISO interval.
5975 */
5976 target_event = lll_iso->payload_count / lll_iso->bn;
5977 event_offset = ull_ref_get(&adv_iso->ull) ? 0U : 1U;
5978 event_offset += lll_iso->latency_prepare;
5979
5980 sdu_frag_tx.target_event = target_event + event_offset;
5981 sdu_frag_tx.grp_ref_point =
5982 isoal_get_wrapped_time_us(adv_iso->big_ref_point,
5983 ((event_offset + 1U) *
5984 lll_iso->iso_interval *
5985 ISO_INT_UNIT_US));
5986 #endif /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
5987
5988 /* Start Fragmentation */
5989 /* FIXME: need to ensure ISO-AL returns proper isoal_status.
5990 * Currently there are cases where ISO-AL calls LL_ASSERT.
5991 */
5992 isoal_status_t isoal_status =
5993 isoal_tx_sdu_fragment(stream->dp->source_hdl, &sdu_frag_tx);
5994
5995 if (isoal_status) {
5996 if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
5997 data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
5998 return -ENOBUFS;
5999 }
6000
6001 return -EINVAL;
6002 }
6003
6004 return 0;
6005 #endif /* CONFIG_BT_CTLR_ADV_ISO */
6006
6007 }
6008
6009 return -EINVAL;
6010 }
6011 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
6012
6013 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6014 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6015 static void dup_ext_adv_adi_store(struct dup_ext_adv_mode *dup_mode,
6016 const struct pdu_adv_adi *adi,
6017 uint8_t data_status)
6018 {
6019 struct dup_ext_adv_set *adv_set;
6020
6021 adv_set = &dup_mode->set[dup_mode->set_curr];
6022
6023 adv_set->data_cmplt = (data_status ==
6024 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) ?
6025 1U : 0U;
6026
6027 if (adi) {
6028 (void)memcpy(&adv_set->adi, adi, sizeof(*adi));
6029 } else {
6030 (void)memset(&adv_set->adi, 0U, sizeof(*adi));
6031 }
6032
6033 if (dup_mode->set_count < CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
6034 dup_mode->set_count++;
6035 dup_mode->set_curr = dup_mode->set_count;
6036 } else {
6037 dup_mode->set_curr++;
6038 }
6039
6040 if (dup_mode->set_curr == CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
6041 dup_mode->set_curr = 0U;
6042 }
6043 }
6044
6045 static void dup_ext_adv_mode_reset(struct dup_ext_adv_mode *dup_adv_mode)
6046 {
6047 uint8_t adv_mode;
6048
6049 for (adv_mode = 0U; adv_mode < DUP_EXT_ADV_MODE_COUNT;
6050 adv_mode++) {
6051 struct dup_ext_adv_mode *dup_mode;
6052
6053 dup_mode = &dup_adv_mode[adv_mode];
6054 dup_mode->set_count = 0U;
6055 dup_mode->set_curr = 0U;
6056 }
6057 }
6058
6059 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
6060 static void dup_ext_adv_reset(void)
6061 {
6062 for (int32_t i = 0; i < dup_count; i++) {
6063 struct dup_entry *dup;
6064
6065 dup = &dup_filter[i];
6066 dup->mask = 0U;
6067 dup_ext_adv_mode_reset(dup->adv_mode);
6068 }
6069 }
6070
6071 static void dup_periodic_adv_reset(uint8_t addr_type, const uint8_t *addr,
6072 uint8_t sid)
6073 {
6074 for (int32_t addr_idx = 0; addr_idx < dup_count; addr_idx++) {
6075 struct dup_ext_adv_mode *dup_mode;
6076 struct dup_entry *dup;
6077
6078 dup = &dup_filter[addr_idx];
6079 if (memcmp(addr, dup->addr.a.val, sizeof(bt_addr_t)) ||
6080 (addr_type != dup->addr.type)) {
6081 continue;
6082 }
6083
6084 dup_mode = &dup->adv_mode[DUP_EXT_ADV_MODE_PERIODIC];
6085 for (uint16_t set_idx = 0; set_idx < dup_mode->set_count;
6086 set_idx++) {
6087 struct dup_ext_adv_set *adv_set;
6088
6089 adv_set = &dup_mode->set[set_idx];
6090 if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != sid) {
6091 continue;
6092 }
6093
6094 /* reset data complete state */
6095 adv_set->data_cmplt = 0U;
6096
6097 return;
6098 }
6099
6100 return;
6101 }
6102 }
6103 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
6104 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6105
6106 static inline bool is_dup_or_update(struct dup_entry *dup, uint8_t adv_type,
6107 uint8_t adv_mode,
6108 const struct pdu_adv_adi *adi,
6109 uint8_t data_status)
6110 {
6111 if (!(dup->mask & BIT(adv_type))) {
6112 /* report different adv types */
6113 dup->mask |= BIT(adv_type);
6114
6115 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6116 dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6117 data_status);
6118
6119 return false;
6120 } else if (adv_type != PDU_ADV_TYPE_EXT_IND) {
6121 /* drop duplicate legacy advertising */
6122 return true;
6123 } else if (dup->adv_mode[adv_mode].set_count == 0U) {
6124 /* report different extended adv mode */
6125 dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6126 data_status);
6127 return false;
6128 } else if (adi) {
6129 struct dup_ext_adv_mode *dup_mode;
6130 uint8_t j;
6131
6132 dup_mode = &dup->adv_mode[adv_mode];
6133 for (j = 0; j < dup_mode->set_count; j++) {
6134 struct dup_ext_adv_set *adv_set;
6135
6136 adv_set = &dup_mode->set[j];
6137 if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != PDU_ADV_ADI_SID_GET(adi)) {
6138 continue;
6139 }
6140
6141 if (PDU_ADV_ADI_DID_GET(&adv_set->adi) != PDU_ADV_ADI_DID_GET(adi)) {
6142 /* report different DID */
6143 adv_set->adi.did_sid_packed[0] = adi->did_sid_packed[0];
6144 adv_set->adi.did_sid_packed[1] = adi->did_sid_packed[1];
6145 /* set new data status */
6146 if (data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
6147 adv_set->data_cmplt = 1U;
6148 } else {
6149 adv_set->data_cmplt = 0U;
6150 }
6151
6152 return false;
6153 } else if (!adv_set->data_cmplt &&
6154 (data_status ==
6155 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE)) {
6156 /* report data complete */
6157 adv_set->data_cmplt = 1U;
6158 return false;
6159 } else if (!adv_set->data_cmplt) {
6160 /* report partial and incomplete data */
6161 return false;
6162 }
6163
6164 return true;
6165 }
6166
6167 dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6168 data_status);
6169 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6170
6171 return false;
6172 }
6173
6174 return true;
6175 }
6176
6177 static bool dup_found(uint8_t adv_type, uint8_t addr_type, const uint8_t *addr,
6178 uint8_t adv_mode, const struct pdu_adv_adi *adi,
6179 uint8_t data_status)
6180 {
6181 /* check for duplicate filtering */
6182 if (dup_count >= 0) {
6183 struct dup_entry *dup;
6184
6185 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6186 __ASSERT((adv_mode < ARRAY_SIZE(dup_filter[0].adv_mode)),
6187 "adv_mode index out-of-bound");
6188 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6189
6190 /* find for existing entry and update if changed */
6191 for (int32_t i = 0; i < dup_count; i++) {
6192 dup = &dup_filter[i];
6193 if (memcmp(addr, &dup->addr.a.val[0],
6194 sizeof(bt_addr_t)) ||
6195 (addr_type != dup->addr.type)) {
6196 continue;
6197 }
6198
6199 /* still duplicate or update entry with change */
6200 return is_dup_or_update(dup, adv_type, adv_mode, adi,
6201 data_status);
6202 }
6203
6204 /* insert into the duplicate filter */
6205 dup = &dup_filter[dup_curr];
6206 (void)memcpy(&dup->addr.a.val[0], addr, sizeof(bt_addr_t));
6207 dup->addr.type = addr_type;
6208 dup->mask = BIT(adv_type);
6209
6210 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6211 dup_ext_adv_mode_reset(dup->adv_mode);
6212 dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6213 data_status);
6214 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6215
6216 if (dup_count < CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6217 dup_count++;
6218 dup_curr = dup_count;
6219 } else {
6220 dup_curr++;
6221 }
6222
6223 if (dup_curr == CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6224 dup_curr = 0U;
6225 }
6226 }
6227
6228 return false;
6229 }
6230 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6231
6232 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6233 static inline void le_dir_adv_report(struct pdu_adv *adv, struct net_buf *buf,
6234 int8_t rssi, uint8_t rl_idx)
6235 {
6236 struct bt_hci_evt_le_direct_adv_report *drp;
6237 struct bt_hci_evt_le_direct_adv_info *dir_info;
6238
6239 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6240 !(le_event_mask & BT_EVT_MASK_LE_DIRECT_ADV_REPORT)) {
6241 return;
6242 }
6243
6244 LL_ASSERT(adv->type == PDU_ADV_TYPE_DIRECT_IND);
6245
6246 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6247 if (dup_scan &&
6248 dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6249 return;
6250 }
6251 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6252
6253 drp = meta_evt(buf, BT_HCI_EVT_LE_DIRECT_ADV_REPORT,
6254 sizeof(*drp) + sizeof(*dir_info));
6255
6256 drp->num_reports = 1U;
6257 dir_info = (void *)(((uint8_t *)drp) + sizeof(*drp));
6258
6259 /* Directed Advertising */
6260 dir_info->evt_type = BT_HCI_ADV_DIRECT_IND;
6261
6262 #if defined(CONFIG_BT_CTLR_PRIVACY)
6263 if (rl_idx < ll_rl_size_get()) {
6264 /* Store identity address */
6265 ll_rl_id_addr_get(rl_idx, &dir_info->addr.type,
6266 &dir_info->addr.a.val[0]);
6267 /* Mark it as identity address from RPA (0x02, 0x03) */
6268 dir_info->addr.type += 2U;
6269 } else {
6270 #else
6271 if (1) {
6272 #endif /* CONFIG_BT_CTLR_PRIVACY */
6273 dir_info->addr.type = adv->tx_addr;
6274 memcpy(&dir_info->addr.a.val[0], &adv->direct_ind.adv_addr[0],
6275 sizeof(bt_addr_t));
6276 }
6277
6278 dir_info->dir_addr.type = adv->rx_addr;
6279 memcpy(&dir_info->dir_addr.a.val[0],
6280 &adv->direct_ind.tgt_addr[0], sizeof(bt_addr_t));
6281
6282 dir_info->rssi = rssi;
6283 }
6284 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6285
6286 #if defined(CONFIG_BT_OBSERVER)
6287 #if defined(CONFIG_BT_HCI_MESH_EXT)
6288 static inline bool scan_filter_apply(uint8_t filter, uint8_t *data, uint8_t len)
6289 {
6290 struct scan_filter *f = &scan_filters[filter];
6291
6292 /* No patterns means filter out all advertising packets */
6293 for (uint8_t i = 0; i < f->count; i++) {
6294 /* Require at least the length of the pattern */
6295 if (len >= f->lengths[i] &&
6296 !memcmp(data, f->patterns[i], f->lengths[i])) {
6297 return true;
6298 }
6299 }
6300
6301 return false;
6302 }
6303
6304 static inline void le_mesh_scan_report(struct pdu_adv *adv,
6305 struct node_rx_pdu *node_rx,
6306 struct net_buf *buf, int8_t rssi)
6307 {
6308 uint8_t data_len = (adv->len - BDADDR_SIZE);
6309 struct bt_hci_evt_mesh_scanning_report *mep;
6310 struct bt_hci_evt_mesh_scan_report *sr;
6311 uint32_t instant;
6312 uint8_t chan;
6313
6314 LL_ASSERT(adv->type == PDU_ADV_TYPE_NONCONN_IND);
6315
6316 /* Filter based on currently active Scan Filter */
6317 if (sf_curr < ARRAY_SIZE(scan_filters) &&
6318 !scan_filter_apply(sf_curr, &adv->adv_ind.data[0], data_len)) {
6319 /* Drop the report */
6320 return;
6321 }
6322
6323 chan = node_rx->hdr.rx_ftr.chan;
6324 instant = node_rx->hdr.rx_ftr.anchor_ticks;
6325
6326 mep = mesh_evt(buf, BT_HCI_EVT_MESH_SCANNING_REPORT,
6327 sizeof(*mep) + sizeof(*sr));
6328
6329 mep->num_reports = 1U;
6330 sr = (void *)(((uint8_t *)mep) + sizeof(*mep));
6331 sr->addr.type = adv->tx_addr;
6332 memcpy(&sr->addr.a.val[0], &adv->adv_ind.addr[0], sizeof(bt_addr_t));
6333 sr->chan = chan;
6334 sr->rssi = rssi;
6335 sys_put_le32(instant, (uint8_t *)&sr->instant);
6336
6337 sr->data_len = data_len;
6338 memcpy(&sr->data[0], &adv->adv_ind.data[0], data_len);
6339 }
6340 #endif /* CONFIG_BT_HCI_MESH_EXT */
6341
6342 static void le_advertising_report(struct pdu_data *pdu_data,
6343 struct node_rx_pdu *node_rx,
6344 struct net_buf *buf)
6345 {
6346 const uint8_t c_adv_type[] = { 0x00, 0x01, 0x03, 0xff, 0x04,
6347 0xff, 0x02 };
6348 struct bt_hci_evt_le_advertising_report *sep;
6349 struct pdu_adv *adv = (void *)pdu_data;
6350 struct bt_hci_evt_le_advertising_info *adv_info;
6351 uint8_t data_len;
6352 uint8_t info_len;
6353 int8_t rssi;
6354 #if defined(CONFIG_BT_CTLR_PRIVACY)
6355 uint8_t rl_idx;
6356 #endif /* CONFIG_BT_CTLR_PRIVACY */
6357 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6358 uint8_t direct_report;
6359 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6360 int8_t *prssi;
6361
6362 rssi = -(node_rx->hdr.rx_ftr.rssi);
6363 #if defined(CONFIG_BT_CTLR_PRIVACY)
6364 rl_idx = node_rx->hdr.rx_ftr.rl_idx;
6365 #endif /* CONFIG_BT_CTLR_PRIVACY */
6366 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6367 direct_report = node_rx->hdr.rx_ftr.direct;
6368 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6369
6370 #if defined(CONFIG_BT_CTLR_PRIVACY)
6371 if (adv->tx_addr) {
6372 /* Update current RPA */
6373 ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6374 }
6375 #endif /* CONFIG_BT_CTLR_PRIVACY */
6376
6377 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6378 if (direct_report) {
6379 #if defined(CONFIG_BT_CTLR_PRIVACY)
6380 le_dir_adv_report(adv, buf, rssi, rl_idx);
6381 #else
6382 le_dir_adv_report(adv, buf, rssi, 0xFF);
6383 #endif /* CONFIG_BT_CTLR_PRIVACY */
6384 return;
6385 }
6386 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6387
6388 #if defined(CONFIG_BT_HCI_MESH_EXT)
6389 if (node_rx->hdr.type == NODE_RX_TYPE_MESH_REPORT) {
6390 le_mesh_scan_report(adv, node_rx, buf, rssi);
6391 return;
6392 }
6393 #endif /* CONFIG_BT_HCI_MESH_EXT */
6394
6395 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6396 !(le_event_mask & BT_EVT_MASK_LE_ADVERTISING_REPORT)) {
6397 return;
6398 }
6399
6400 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6401 if (dup_scan &&
6402 dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6403 return;
6404 }
6405 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6406
6407 if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6408 data_len = (adv->len - BDADDR_SIZE);
6409 } else {
6410 data_len = 0U;
6411 }
6412 info_len = sizeof(struct bt_hci_evt_le_advertising_info) + data_len +
6413 sizeof(*prssi);
6414 sep = meta_evt(buf, BT_HCI_EVT_LE_ADVERTISING_REPORT,
6415 sizeof(*sep) + info_len);
6416
6417 sep->num_reports = 1U;
6418 adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6419
6420 adv_info->evt_type = c_adv_type[adv->type];
6421
6422 #if defined(CONFIG_BT_CTLR_PRIVACY)
6423 if (rl_idx < ll_rl_size_get()) {
6424 /* Store identity address */
6425 ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6426 &adv_info->addr.a.val[0]);
6427 /* Mark it as identity address from RPA (0x02, 0x03) */
6428 adv_info->addr.type += 2U;
6429 } else {
6430 #else
6431 if (1) {
6432 #endif /* CONFIG_BT_CTLR_PRIVACY */
6433
6434 adv_info->addr.type = adv->tx_addr;
6435 memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6436 sizeof(bt_addr_t));
6437 }
6438
6439 adv_info->length = data_len;
6440 memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6441 /* RSSI */
6442 prssi = &adv_info->data[0] + data_len;
6443 *prssi = rssi;
6444 }
6445
6446 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6447 static void le_ext_adv_legacy_report(struct pdu_data *pdu_data,
6448 struct node_rx_pdu *node_rx,
6449 struct net_buf *buf)
6450 {
6451 /* Lookup event type based on pdu_adv_type set by LLL */
6452 const uint8_t evt_type_lookup[] = {
6453 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6454 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* ADV_IND */
6455 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_DIRECT |
6456 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* DIRECT_IND */
6457 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY), /* NONCONN_IND */
6458 0xff, /* Invalid index lookup */
6459 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6460 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6461 BT_HCI_LE_ADV_EVT_TYPE_SCAN), /* SCAN_RSP to an ADV_SCAN_IND
6462 */
6463 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6464 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6465 BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6466 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* SCAN_RSP to an ADV_IND,
6467 * NOTE: LLL explicitly sets
6468 * adv_type to
6469 * PDU_ADV_TYPE_ADV_IND_SCAN_RSP
6470 */
6471 (BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6472 BT_HCI_LE_ADV_EVT_TYPE_SCAN) /* SCAN_IND */
6473 };
6474 struct bt_hci_evt_le_ext_advertising_info *adv_info;
6475 struct bt_hci_evt_le_ext_advertising_report *sep;
6476 struct pdu_adv *adv = (void *)pdu_data;
6477 uint8_t data_len;
6478 uint8_t info_len;
6479 int8_t rssi;
6480
6481 #if defined(CONFIG_BT_CTLR_PRIVACY)
6482 uint8_t rl_idx;
6483 #endif /* CONFIG_BT_CTLR_PRIVACY */
6484
6485 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6486 !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
6487 return;
6488 }
6489
6490 /* The Link Layer currently returns RSSI as an absolute value */
6491 rssi = -(node_rx->hdr.rx_ftr.rssi);
6492
6493 #if defined(CONFIG_BT_CTLR_PRIVACY)
6494 rl_idx = node_rx->hdr.rx_ftr.rl_idx;
6495 #endif /* CONFIG_BT_CTLR_PRIVACY */
6496
6497 #if defined(CONFIG_BT_CTLR_PRIVACY)
6498 if (adv->tx_addr) {
6499 /* Update current RPA */
6500 ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6501 }
6502 #endif /* CONFIG_BT_CTLR_PRIVACY */
6503
6504 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6505 if (dup_scan &&
6506 dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6507 return;
6508 }
6509 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6510
6511 if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6512 data_len = (adv->len - BDADDR_SIZE);
6513 } else {
6514 data_len = 0U;
6515 }
6516
6517 info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6518 data_len;
6519 sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6520 sizeof(*sep) + info_len);
6521
6522 sep->num_reports = 1U;
6523 adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6524
6525 adv_info->evt_type = sys_cpu_to_le16((uint16_t)evt_type_lookup[adv->type]);
6526
6527 #if defined(CONFIG_BT_CTLR_PRIVACY)
6528 if (rl_idx < ll_rl_size_get()) {
6529 /* Store identity address */
6530 ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6531 &adv_info->addr.a.val[0]);
6532 /* Mark it as identity address from RPA (0x02, 0x03) */
6533 adv_info->addr.type += 2U;
6534 } else
6535 #endif /* CONFIG_BT_CTLR_PRIVACY */
6536 {
6537 adv_info->addr.type = adv->tx_addr;
6538 memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6539 sizeof(bt_addr_t));
6540 }
6541
6542 adv_info->prim_phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
6543 adv_info->sec_phy = 0U;
6544 adv_info->sid = 0xff;
6545 adv_info->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6546 adv_info->rssi = rssi;
6547 adv_info->interval = 0U;
6548
6549 if (adv->type == PDU_ADV_TYPE_DIRECT_IND) {
6550 adv_info->direct_addr.type = adv->rx_addr;
6551 bt_addr_copy(&adv_info->direct_addr.a,
6552 (void *)adv->direct_ind.tgt_addr);
6553 } else {
6554 adv_info->direct_addr.type = 0U;
6555 (void)memset(adv_info->direct_addr.a.val, 0U,
6556 sizeof(adv_info->direct_addr.a.val));
6557 }
6558
6559 adv_info->length = data_len;
6560 memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6561 }
6562
6563 static uint8_t ext_adv_direct_addr_type(struct lll_scan *lll,
6564 bool peer_resolved, bool direct_report,
6565 uint8_t rx_addr_type,
6566 const uint8_t *const rx_addr)
6567 {
6568 /* The directed address is resolvable private address, but Controller
6569 * could not resolve it.
6570 */
6571 if (direct_report) {
6572 return BT_ADDR_LE_UNRESOLVED;
6573 }
6574
6575 if (0) {
6576 #if defined(CONFIG_BT_CTLR_PRIVACY)
6577 /* Peer directed advertiser's address was resolved */
6578 } else if (peer_resolved) {
6579 struct ll_scan_set *scan;
6580
6581 scan = HDR_LLL2ULL(lll);
6582 if ((rx_addr_type == lll->init_addr_type) &&
6583 !memcmp(lll->init_addr, rx_addr, BDADDR_SIZE)) {
6584 /* Peer directed advertiser used local scanner's
6585 * initiator address.
6586 */
6587 return scan->own_addr_type;
6588 }
6589
6590 /* Peer directed advertiser used directed resolvable
6591 * private address generated from the local scanner's
6592 * Identity Resolution Key.
6593 */
6594 return scan->own_addr_type | BIT(1);
6595 #endif /* CONFIG_BT_CTLR_PRIVACY */
6596 } else {
6597 struct ll_scan_set *scan;
6598
6599 scan = HDR_LLL2ULL(lll);
6600
6601 /* Peer directed advertiser used local scanner's
6602 * initiator address.
6603 */
6604 return scan->own_addr_type;
6605 }
6606 }
6607
6608 static uint8_t ext_adv_data_get(const struct node_rx_pdu *node_rx_data,
6609 uint8_t *const sec_phy, int8_t *const tx_pwr,
6610 const uint8_t **const data)
6611 {
6612 const struct pdu_adv *adv = (void *)node_rx_data->pdu;
6613 const struct pdu_adv_com_ext_adv *p;
6614 const struct pdu_adv_ext_hdr *h;
6615 uint8_t hdr_buf_len;
6616 const uint8_t *ptr;
6617 uint8_t hdr_len;
6618
6619 *tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6620
6621 p = (void *)&adv->adv_ext_ind;
6622 h = (void *)p->ext_hdr_adv_data;
6623 ptr = (void *)h;
6624
6625 if (!p->ext_hdr_len) {
6626 hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
6627
6628 goto no_ext_hdr;
6629 }
6630
6631 ptr = h->data;
6632
6633 if (h->adv_addr) {
6634 ptr += BDADDR_SIZE;
6635 }
6636
6637 if (h->tgt_addr) {
6638 ptr += BDADDR_SIZE;
6639 }
6640
6641 if (h->adi) {
6642 ptr += sizeof(struct pdu_adv_adi);
6643 }
6644
6645 if (h->aux_ptr) {
6646 struct pdu_adv_aux_ptr *aux_ptr;
6647
6648 aux_ptr = (void *)ptr;
6649 ptr += sizeof(*aux_ptr);
6650
6651 *sec_phy = HCI_AUX_PHY_TO_HCI_PHY(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
6652 }
6653
6654 if (h->sync_info) {
6655 ptr += sizeof(struct pdu_adv_sync_info);
6656 }
6657
6658 if (h->tx_pwr) {
6659 *tx_pwr = *(int8_t *)ptr;
6660 ptr++;
6661 }
6662
6663 hdr_len = ptr - (uint8_t *)p;
6664 hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
6665 if (hdr_len < hdr_buf_len) {
6666 uint8_t acad_len = hdr_buf_len - hdr_len;
6667
6668 ptr += acad_len;
6669 hdr_len += acad_len;
6670 }
6671
6672 no_ext_hdr:
6673 if (hdr_len < adv->len) {
6674 *data = ptr;
6675
6676 return adv->len - hdr_len;
6677 }
6678
6679 return 0;
6680 }
6681
6682 static void node_rx_extra_list_release(struct node_rx_pdu *node_rx_extra)
6683 {
6684 while (node_rx_extra) {
6685 struct node_rx_pdu *node_rx_curr;
6686
6687 node_rx_curr = node_rx_extra;
6688 node_rx_extra = node_rx_curr->hdr.rx_ftr.extra;
6689
6690 node_rx_curr->hdr.next = NULL;
6691 ll_rx_mem_release((void **)&node_rx_curr);
6692 }
6693 }
6694
6695 static void ext_adv_info_fill(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6696 uint8_t adv_addr_type, const uint8_t *adv_addr,
6697 uint8_t direct_addr_type,
6698 const uint8_t *direct_addr, uint8_t rl_idx,
6699 int8_t tx_pwr, int8_t rssi,
6700 uint16_t interval_le16,
6701 const struct pdu_adv_adi *adi, uint8_t data_len,
6702 const uint8_t *data, struct net_buf *buf)
6703 {
6704 struct bt_hci_evt_le_ext_advertising_info *adv_info;
6705 struct bt_hci_evt_le_ext_advertising_report *sep;
6706 uint8_t info_len;
6707
6708 info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6709 data_len;
6710 sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6711 sizeof(*sep) + info_len);
6712
6713 sep->num_reports = 1U;
6714 adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6715
6716 adv_info->evt_type = sys_cpu_to_le16((uint16_t)evt_type);
6717
6718 if (0) {
6719 #if defined(CONFIG_BT_CTLR_PRIVACY)
6720 } else if (rl_idx < ll_rl_size_get()) {
6721 /* Store identity address */
6722 ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6723 adv_info->addr.a.val);
6724 /* Mark it as identity address from RPA (0x02, 0x03) */
6725 adv_info->addr.type += 2U;
6726 #else /* !CONFIG_BT_CTLR_PRIVACY */
6727 ARG_UNUSED(rl_idx);
6728 #endif /* !CONFIG_BT_CTLR_PRIVACY */
6729 } else if (adv_addr) {
6730 adv_info->addr.type = adv_addr_type;
6731 (void)memcpy(adv_info->addr.a.val, adv_addr, sizeof(bt_addr_t));
6732 } else {
6733 adv_info->addr.type = 0U;
6734 (void)memset(adv_info->addr.a.val, 0, sizeof(bt_addr_t));
6735 }
6736
6737 adv_info->prim_phy = find_lsb_set(phy);
6738 adv_info->sec_phy = sec_phy;
6739 adv_info->sid = (adi) ? PDU_ADV_ADI_SID_GET(adi) : BT_HCI_LE_EXT_ADV_SID_INVALID;
6740 adv_info->tx_power = tx_pwr;
6741 adv_info->rssi = rssi;
6742 adv_info->interval = interval_le16;
6743
6744 if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_DIRECT) {
6745 adv_info->direct_addr.type = direct_addr_type;
6746 (void)memcpy(adv_info->direct_addr.a.val, direct_addr,
6747 sizeof(bt_addr_t));
6748 } else {
6749 adv_info->direct_addr.type = 0U;
6750 (void)memset(adv_info->direct_addr.a.val, 0, sizeof(bt_addr_t));
6751 }
6752
6753 adv_info->length = data_len;
6754 (void)memcpy(adv_info->data, data, data_len);
6755 }
6756
6757 static void ext_adv_pdu_frag(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6758 uint8_t adv_addr_type, const uint8_t *adv_addr,
6759 uint8_t direct_addr_type,
6760 const uint8_t *direct_addr, uint8_t rl_idx,
6761 int8_t tx_pwr, int8_t rssi, uint16_t interval_le16,
6762 const struct pdu_adv_adi *adi,
6763 uint8_t data_len_max,
6764 uint16_t *const data_len_total,
6765 uint8_t *const data_len,
6766 const uint8_t **const data, struct net_buf *buf,
6767 struct net_buf **const evt_buf)
6768 {
6769 const uint8_t data_len_frag = MIN(*data_len, data_len_max);
6770
6771 do {
6772 /* Prepare a fragment of PDU data in a HCI event */
6773 ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type,
6774 adv_addr, direct_addr_type, direct_addr,
6775 rl_idx, tx_pwr, rssi, interval_le16, adi,
6776 data_len_frag, *data, *evt_buf);
6777
6778 *data += data_len_frag;
6779 *data_len -= data_len_frag;
6780 *data_len_total -= data_len_frag;
6781
6782 *evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
6783 LL_ASSERT(*evt_buf);
6784
6785 net_buf_frag_add(buf, *evt_buf);
6786
6787 /* Continue to fragment until last partial PDU data fragment,
6788 * remainder PDU data's HCI event will be prepare by caller.
6789 */
6790 } while (*data_len > data_len_max);
6791 }
6792
6793 static void ext_adv_data_frag(const struct node_rx_pdu *node_rx_data,
6794 uint8_t evt_type, uint8_t phy,
6795 uint8_t *const sec_phy, uint8_t adv_addr_type,
6796 const uint8_t *adv_addr, uint8_t direct_addr_type,
6797 const uint8_t *direct_addr, uint8_t rl_idx,
6798 int8_t *const tx_pwr, int8_t rssi,
6799 uint16_t interval_le16,
6800 const struct pdu_adv_adi *adi,
6801 uint8_t data_len_max, uint16_t data_len_total,
6802 uint8_t *const data_len,
6803 const uint8_t **const data, struct net_buf *buf,
6804 struct net_buf **const evt_buf)
6805 {
6806 evt_type |= (BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL << 5);
6807
6808 do {
6809 /* Fragment the PDU data */
6810 ext_adv_pdu_frag(evt_type, phy, *sec_phy, adv_addr_type,
6811 adv_addr, direct_addr_type, direct_addr,
6812 rl_idx, *tx_pwr, rssi, interval_le16, adi,
6813 data_len_max, &data_len_total, data_len,
6814 data, buf, evt_buf);
6815
6816 /* Check if more PDUs in the list */
6817 node_rx_data = node_rx_data->hdr.rx_ftr.extra;
6818 if (node_rx_data) {
6819 if (*data_len >= data_len_total) {
6820 /* Last fragment restricted to maximum scan
6821 * data length, caller will prepare the last
6822 * HCI fragment event.
6823 */
6824 break;
6825 } else if (*data_len) {
6826 /* Last fragment of current PDU data */
6827 ext_adv_pdu_frag(evt_type, phy, *sec_phy,
6828 adv_addr_type, adv_addr,
6829 direct_addr_type, direct_addr,
6830 rl_idx, *tx_pwr, rssi,
6831 interval_le16, adi,
6832 data_len_max, &data_len_total,
6833 data_len, data, buf, evt_buf);
6834 }
6835
6836 /* Get next PDU data in list */
6837 *data_len = ext_adv_data_get(node_rx_data, sec_phy,
6838 tx_pwr, data);
6839
6840 /* Restrict PDU data to maximum scan data length */
6841 if (*data_len > data_len_total) {
6842 *data_len = data_len_total;
6843 }
6844 }
6845
6846 /* Continue to fragment if current PDU data length less than
6847 * total data length or current PDU data length greater than
6848 * HCI event max length.
6849 */
6850 } while ((*data_len < data_len_total) || (*data_len > data_len_max));
6851 }
6852
6853 static void le_ext_adv_report(struct pdu_data *pdu_data,
6854 struct node_rx_pdu *node_rx,
6855 struct net_buf *buf, uint8_t phy)
6856 {
6857 int8_t scan_rsp_tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6858 int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6859 struct node_rx_pdu *node_rx_scan_data = NULL;
6860 struct node_rx_pdu *node_rx_data = NULL;
6861 const struct pdu_adv_adi *adi = NULL;
6862 uint16_t scan_data_len_total = 0U;
6863 struct node_rx_pdu *node_rx_curr;
6864 struct node_rx_pdu *node_rx_next;
6865 const uint8_t *scan_data = NULL;
6866 uint8_t scan_data_status = 0U;
6867 uint8_t direct_addr_type = 0U;
6868 uint16_t data_len_total = 0U;
6869 uint8_t *direct_addr = NULL;
6870 uint16_t interval_le16 = 0U;
6871 const uint8_t *data = NULL;
6872 uint8_t scan_data_len = 0U;
6873 uint8_t adv_addr_type = 0U;
6874 uint8_t sec_phy_scan = 0U;
6875 uint8_t *adv_addr = NULL;
6876 uint8_t data_status = 0U;
6877 struct net_buf *evt_buf;
6878 bool devmatch = false;
6879 uint8_t data_len = 0U;
6880 uint8_t evt_type = 0U;
6881 uint8_t sec_phy = 0U;
6882 uint8_t data_len_max;
6883 uint8_t rl_idx = 0U;
6884 struct pdu_adv *adv;
6885 int8_t rssi;
6886
6887 /* NOTE: This function uses a lot of initializers before the check and
6888 * return below, as an exception to initializing close to their locality
6889 * of reference. This is acceptable as the return is unlikely in typical
6890 * Controller use.
6891 */
6892 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6893 !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
6894 node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
6895 return;
6896 }
6897
6898 #if defined(CONFIG_BT_CTLR_PRIVACY)
6899 rl_idx = ll_rl_size_get();
6900 #endif /* CONFIG_BT_CTLR_PRIVACY */
6901
6902 adv = (void *)pdu_data;
6903 node_rx_curr = node_rx;
6904 node_rx_next = node_rx_curr->hdr.rx_ftr.extra;
6905 do {
6906 int8_t tx_pwr_curr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6907 struct pdu_adv_adi *adi_curr = NULL;
6908 uint8_t direct_addr_type_curr = 0U;
6909 bool direct_resolved_curr = false;
6910 uint8_t *direct_addr_curr = NULL;
6911 uint8_t adv_addr_type_curr = 0U;
6912 struct pdu_adv_com_ext_adv *p;
6913 uint8_t *adv_addr_curr = NULL;
6914 uint8_t data_len_curr = 0U;
6915 uint8_t *data_curr = NULL;
6916 struct pdu_adv_ext_hdr *h;
6917 uint8_t sec_phy_curr = 0U;
6918 uint8_t evt_type_curr;
6919 uint8_t hdr_buf_len;
6920 uint8_t hdr_len;
6921 uint8_t *ptr;
6922
6923 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6924 bool direct_report_curr = node_rx_curr->hdr.rx_ftr.direct;
6925 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6926
6927 #if defined(CONFIG_BT_CTLR_PRIVACY)
6928 uint8_t rl_idx_curr = node_rx_curr->hdr.rx_ftr.rl_idx;
6929
6930 direct_resolved_curr = node_rx_curr->hdr.rx_ftr.direct_resolved;
6931 #endif /* CONFIG_BT_CTLR_PRIVACY */
6932
6933 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
6934 defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
6935 const bool devmatch_curr = node_rx_curr->hdr.rx_ftr.devmatch;
6936 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
6937
6938 /* The Link Layer currently returns RSSI as an absolute value */
6939 rssi = -(node_rx_curr->hdr.rx_ftr.rssi);
6940
6941 LOG_DBG("phy= 0x%x, type= 0x%x, len= %u, tat= %u, rat= %u,"
6942 " rssi=%d dB", phy, adv->type, adv->len, adv->tx_addr,
6943 adv->rx_addr, rssi);
6944
6945 p = (void *)&adv->adv_ext_ind;
6946 h = (void *)p->ext_hdr_adv_data;
6947 ptr = (void *)h;
6948
6949 LOG_DBG(" Ext. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
6950
6951 evt_type_curr = p->adv_mode;
6952
6953 if (!p->ext_hdr_len) {
6954 hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
6955
6956 goto no_ext_hdr;
6957 }
6958
6959 ptr = h->data;
6960
6961 if (h->adv_addr) {
6962 bt_addr_le_t addr;
6963
6964 adv_addr_type_curr = adv->tx_addr;
6965 adv_addr_curr = ptr;
6966
6967 addr.type = adv->tx_addr;
6968 (void)memcpy(addr.a.val, ptr, sizeof(bt_addr_t));
6969 ptr += BDADDR_SIZE;
6970
6971 LOG_DBG(" AdvA: %s", bt_addr_le_str(&addr));
6972 }
6973
6974 if (h->tgt_addr) {
6975 struct lll_scan *lll;
6976 bt_addr_le_t addr;
6977
6978 lll = node_rx->hdr.rx_ftr.param;
6979
6980 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6981 direct_addr_type_curr =
6982 ext_adv_direct_addr_type(lll,
6983 direct_resolved_curr,
6984 direct_report_curr,
6985 adv->rx_addr, ptr);
6986 #else /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
6987 direct_addr_type_curr =
6988 ext_adv_direct_addr_type(lll,
6989 direct_resolved_curr,
6990 false, adv->rx_addr,
6991 ptr);
6992 #endif /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
6993
6994 direct_addr_curr = ptr;
6995 ptr += BDADDR_SIZE;
6996
6997 addr.type = adv->rx_addr;
6998 (void)memcpy(addr.a.val, direct_addr_curr,
6999 sizeof(bt_addr_t));
7000
7001 LOG_DBG(" TgtA: %s", bt_addr_le_str(&addr));
7002 }
7003
7004 if (h->adi) {
7005 adi_curr = (void *)ptr;
7006
7007 ptr += sizeof(*adi);
7008
7009 LOG_DBG(" AdvDataInfo DID = 0x%x, SID = 0x%x",
7010 PDU_ADV_ADI_DID_GET(adi_curr), PDU_ADV_ADI_SID_GET(adi_curr));
7011 }
7012
7013 if (h->aux_ptr) {
7014 struct pdu_adv_aux_ptr *aux_ptr;
7015 uint8_t aux_phy;
7016
7017 aux_ptr = (void *)ptr;
7018
7019 /* Don't report if invalid phy or AUX_ADV_IND was not received
7020 * See BT Core 5.4, Vol 6, Part B, Section 4.4.3.5:
7021 * If the Controller does not listen for or does not receive the
7022 * AUX_ADV_IND PDU, no report shall be generated
7023 */
7024 if ((node_rx_curr == node_rx && !node_rx_next) ||
7025 PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7026 struct node_rx_ftr *ftr;
7027
7028 ftr = &node_rx->hdr.rx_ftr;
7029 node_rx_extra_list_release(ftr->extra);
7030 return;
7031 }
7032
7033 ptr += sizeof(*aux_ptr);
7034
7035 sec_phy_curr = HCI_AUX_PHY_TO_HCI_PHY(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7036
7037 aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7038
7039 LOG_DBG(" AuxPtr chan_idx = %u, ca = %u, offs_units "
7040 "= %u offs = 0x%x, phy = 0x%x",
7041 aux_ptr->chan_idx, aux_ptr->ca,
7042 aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr), aux_phy);
7043 }
7044
7045 if (h->sync_info) {
7046 struct pdu_adv_sync_info *si;
7047
7048 si = (void *)ptr;
7049 ptr += sizeof(*si);
7050
7051 interval_le16 = si->interval;
7052
7053 LOG_DBG(" SyncInfo offs = %u, offs_unit = 0x%x, "
7054 "interval = 0x%x, sca = 0x%x, "
7055 "chan map = 0x%x 0x%x 0x%x 0x%x 0x%x, "
7056 "AA = 0x%x, CRC = 0x%x 0x%x 0x%x, "
7057 "evt cntr = 0x%x",
7058 sys_le16_to_cpu(si->offs),
7059 si->offs_units,
7060 sys_le16_to_cpu(si->interval),
7061 ((si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
7062 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
7063 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS),
7064 si->sca_chm[0], si->sca_chm[1], si->sca_chm[2],
7065 si->sca_chm[3],
7066 (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
7067 ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK),
7068 sys_le32_to_cpu(si->aa),
7069 si->crc_init[0], si->crc_init[1],
7070 si->crc_init[2], sys_le16_to_cpu(si->evt_cntr));
7071 }
7072
7073 if (h->tx_pwr) {
7074 tx_pwr_curr = *(int8_t *)ptr;
7075 ptr++;
7076
7077 LOG_DBG(" Tx pwr= %d dB", tx_pwr_curr);
7078 }
7079
7080 hdr_len = ptr - (uint8_t *)p;
7081 hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7082 if (hdr_len > hdr_buf_len) {
7083 LOG_WRN(" Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7084 } else {
7085 uint8_t acad_len = hdr_buf_len - hdr_len;
7086
7087 if (acad_len) {
7088 ptr += acad_len;
7089 hdr_len += acad_len;
7090 }
7091 }
7092
7093 no_ext_hdr:
7094 if (hdr_len < adv->len) {
7095 data_len_curr = adv->len - hdr_len;
7096 data_curr = ptr;
7097
7098 LOG_DBG(" AD Data (%u): <todo>", data_len);
7099 }
7100
7101 if (data_len_total + data_len_curr > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
7102 /* Truncating advertising data
7103 * Note that this has to be done at a PDU boundary, so stop
7104 * processing nodes from this one forward
7105 */
7106 if (scan_data) {
7107 scan_data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7108 } else {
7109 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7110 }
7111 break;
7112 }
7113
7114 if (node_rx_curr == node_rx) {
7115 evt_type = evt_type_curr;
7116 adv_addr_type = adv_addr_type_curr;
7117 adv_addr = adv_addr_curr;
7118 direct_addr_type = direct_addr_type_curr;
7119 direct_addr = direct_addr_curr;
7120 adi = adi_curr;
7121 sec_phy = sec_phy_curr;
7122 node_rx_data = node_rx_curr;
7123 data_len = data_len_curr;
7124 data_len_total = data_len;
7125 data = data_curr;
7126 scan_data_len_total = 0U;
7127 tx_pwr = tx_pwr_curr;
7128
7129 #if defined(CONFIG_BT_CTLR_PRIVACY)
7130 rl_idx = rl_idx_curr;
7131 #endif /* CONFIG_BT_CTLR_PRIVACY */
7132
7133 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7134 defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7135 devmatch = devmatch_curr;
7136 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7137
7138 } else {
7139 /* TODO: Validate current value with previous */
7140
7141 /* Detect the scan response in the list of node_rx */
7142 if (node_rx_curr->hdr.rx_ftr.scan_rsp) {
7143 node_rx_scan_data = node_rx_curr;
7144 if (sec_phy_curr) {
7145 sec_phy_scan = sec_phy_curr;
7146 } else {
7147 sec_phy_scan = sec_phy;
7148 }
7149 scan_data_len = data_len_curr;
7150 scan_data = data_curr;
7151 scan_rsp_tx_pwr = tx_pwr_curr;
7152 }
7153
7154 if (!adv_addr) {
7155 adv_addr_type = adv_addr_type_curr;
7156 adv_addr = adv_addr_curr;
7157 }
7158
7159 if (!direct_addr) {
7160 direct_addr_type = direct_addr_type_curr;
7161 direct_addr = direct_addr_curr;
7162 }
7163
7164 if (scan_data) {
7165 scan_data_len_total += data_len_curr;
7166 } else if (!data) {
7167 node_rx_data = node_rx_curr;
7168 data_len = data_len_curr;
7169 data_len_total = data_len;
7170 data = data_curr;
7171 tx_pwr = tx_pwr_curr;
7172 } else {
7173 data_len_total += data_len_curr;
7174 }
7175
7176 #if defined(CONFIG_BT_CTLR_PRIVACY)
7177 if (rl_idx >= ll_rl_size_get()) {
7178 rl_idx = rl_idx_curr;
7179 }
7180 #endif /* CONFIG_BT_CTLR_PRIVACY */
7181
7182 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7183 defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7184 if (!devmatch) {
7185 devmatch = devmatch_curr;
7186 }
7187 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7188 }
7189
7190 if (!node_rx_next) {
7191 bool has_aux_ptr = !!sec_phy_curr;
7192
7193 if (scan_data) {
7194 if (has_aux_ptr) {
7195 scan_data_status =
7196 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7197 }
7198 } else if (has_aux_ptr) {
7199 data_status =
7200 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7201 }
7202
7203 break;
7204 }
7205
7206 node_rx_curr = node_rx_next;
7207 node_rx_next = node_rx_curr->hdr.rx_ftr.extra;
7208 adv = (void *)node_rx_curr->pdu;
7209 } while (1);
7210
7211 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
7212 IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST) &&
7213 !devmatch) {
7214 node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7215 return;
7216 }
7217
7218 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
7219 if (adv_addr) {
7220 if (dup_scan &&
7221 dup_found(PDU_ADV_TYPE_EXT_IND, adv_addr_type, adv_addr,
7222 (evt_type & BIT_MASK(2)), adi, data_status)) {
7223 node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7224 return;
7225 }
7226 }
7227 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
7228
7229 /* If data incomplete */
7230 if (data_status) {
7231 /* Data incomplete and no more to come */
7232 if (!(adv_addr ||
7233 (adi && ((tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) ||
7234 data)))) {
7235 /* No device address and no valid AD data parsed or
7236 * Tx Power present for this PDU chain that has ADI,
7237 * skip HCI event generation.
7238 * In other terms, generate HCI event if device address
7239 * is present or if Tx pwr and/or data is present from
7240 * anonymous device.
7241 */
7242 node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7243 return;
7244 }
7245 }
7246
7247 /* Set directed advertising bit */
7248 if (direct_addr) {
7249 evt_type |= BT_HCI_LE_ADV_EVT_TYPE_DIRECT;
7250 }
7251
7252 /* HCI fragment */
7253 evt_buf = buf;
7254 data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7255 sizeof(struct bt_hci_evt_le_meta_event) -
7256 sizeof(struct bt_hci_evt_le_ext_advertising_report) -
7257 sizeof(struct bt_hci_evt_le_ext_advertising_info);
7258
7259 /* If PDU data length less than total data length or PDU data length
7260 * greater than maximum HCI event data length, then fragment.
7261 */
7262 if ((data_len < data_len_total) || (data_len > data_len_max)) {
7263 ext_adv_data_frag(node_rx_data, evt_type, phy, &sec_phy,
7264 adv_addr_type, adv_addr, direct_addr_type,
7265 direct_addr, rl_idx, &tx_pwr, rssi,
7266 interval_le16, adi, data_len_max,
7267 data_len_total, &data_len, &data, buf,
7268 &evt_buf);
7269 }
7270
7271 /* Set data status bits */
7272 evt_type |= (data_status << 5);
7273
7274 /* Start constructing the adv event for remainder of the PDU data */
7275 ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type, adv_addr,
7276 direct_addr_type, direct_addr, rl_idx, tx_pwr, rssi,
7277 interval_le16, adi, data_len, data, evt_buf);
7278
7279 /* If scan response event to be constructed */
7280 if (!scan_data) {
7281 node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7282
7283 return;
7284 }
7285
7286 /* Set scan response bit */
7287 evt_type |= BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP;
7288
7289 /* Clear the data status bits */
7290 evt_type &= ~(BIT_MASK(2) << 5);
7291
7292 /* Allocate, append as buf fragement and construct the scan response
7293 * event.
7294 */
7295 evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7296 LL_ASSERT(evt_buf);
7297
7298 net_buf_frag_add(buf, evt_buf);
7299
7300 /* If PDU data length less than total data length or PDU data length
7301 * greater than maximum HCI event data length, then fragment.
7302 */
7303 if ((scan_data_len < scan_data_len_total) ||
7304 (scan_data_len > data_len_max)) {
7305 ext_adv_data_frag(node_rx_scan_data, evt_type, phy,
7306 &sec_phy_scan, adv_addr_type, adv_addr,
7307 direct_addr_type, direct_addr, rl_idx,
7308 &scan_rsp_tx_pwr, rssi, interval_le16, adi,
7309 data_len_max, scan_data_len_total,
7310 &scan_data_len, &scan_data, buf, &evt_buf);
7311 }
7312
7313 /* set scan data status bits */
7314 evt_type |= (scan_data_status << 5);
7315
7316 /* Start constructing the event for remainder of the PDU data */
7317 ext_adv_info_fill(evt_type, phy, sec_phy_scan, adv_addr_type, adv_addr,
7318 direct_addr_type, direct_addr, rl_idx,
7319 scan_rsp_tx_pwr, rssi, interval_le16, adi,
7320 scan_data_len, scan_data, evt_buf);
7321
7322 node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7323 }
7324
7325 static void le_adv_ext_report(struct pdu_data *pdu_data,
7326 struct node_rx_pdu *node_rx,
7327 struct net_buf *buf, uint8_t phy)
7328 {
7329 struct pdu_adv *adv = (void *)pdu_data;
7330
7331 if ((adv->type == PDU_ADV_TYPE_EXT_IND) && adv->len) {
7332 le_ext_adv_report(pdu_data, node_rx, buf, phy);
7333 } else {
7334 le_ext_adv_legacy_report(pdu_data, node_rx, buf);
7335 }
7336 }
7337
7338 static void le_adv_ext_1M_report(struct pdu_data *pdu_data,
7339 struct node_rx_pdu *node_rx,
7340 struct net_buf *buf)
7341 {
7342 le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_1M);
7343 }
7344
7345 static void le_adv_ext_2M_report(struct pdu_data *pdu_data,
7346 struct node_rx_pdu *node_rx,
7347 struct net_buf *buf)
7348 {
7349 le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_2M);
7350 }
7351
7352 static void le_adv_ext_coded_report(struct pdu_data *pdu_data,
7353 struct node_rx_pdu *node_rx,
7354 struct net_buf *buf)
7355 {
7356 le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_CODED);
7357 }
7358
7359 static void le_scan_timeout(struct pdu_data *pdu_data,
7360 struct node_rx_pdu *node_rx, struct net_buf *buf)
7361 {
7362 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7363 !(le_event_mask & BT_EVT_MASK_LE_SCAN_TIMEOUT)) {
7364 return;
7365 }
7366
7367 meta_evt(buf, BT_HCI_EVT_LE_SCAN_TIMEOUT, 0U);
7368 }
7369
7370 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
7371 static void le_per_adv_sync_established(struct pdu_data *pdu_data,
7372 struct node_rx_pdu *node_rx,
7373 struct net_buf *buf)
7374 {
7375 struct bt_hci_evt_le_per_adv_sync_established *sep;
7376 struct ll_scan_set *scan;
7377 struct node_rx_sync *se;
7378 void *node;
7379
7380 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7381 !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED)) {
7382 return;
7383 }
7384
7385 sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
7386 sizeof(*sep));
7387
7388 /* Check for pdu field being aligned before accessing sync established
7389 * event.
7390 */
7391 node = pdu_data;
7392 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync));
7393
7394 se = node;
7395 sep->status = se->status;
7396
7397 if (se->status == BT_HCI_ERR_OP_CANCELLED_BY_HOST) {
7398 return;
7399 }
7400
7401 scan = node_rx->hdr.rx_ftr.param;
7402
7403 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7404 defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7405 dup_periodic_adv_reset(scan->periodic.adv_addr_type,
7406 scan->periodic.adv_addr,
7407 scan->periodic.sid);
7408 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7409 * CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7410 */
7411
7412 sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7413
7414 /* Resolved address, if private, has been populated in ULL */
7415 sep->adv_addr.type = scan->periodic.adv_addr_type;
7416 (void)memcpy(sep->adv_addr.a.val, scan->periodic.adv_addr, BDADDR_SIZE);
7417
7418 sep->sid = scan->periodic.sid;
7419 sep->phy = find_lsb_set(se->phy);
7420 sep->interval = sys_cpu_to_le16(se->interval);
7421 sep->clock_accuracy = se->sca;
7422 }
7423
7424 static void le_per_adv_sync_report(struct pdu_data *pdu_data,
7425 struct node_rx_pdu *node_rx,
7426 struct net_buf *buf)
7427 {
7428 struct node_rx_ftr *ftr = &node_rx->hdr.rx_ftr;
7429 int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7430 struct pdu_adv *adv = (void *)pdu_data;
7431 struct pdu_adv_aux_ptr *aux_ptr = NULL;
7432 const struct pdu_adv_adi *adi = NULL;
7433 uint8_t cte_type = BT_HCI_LE_NO_CTE;
7434 const struct ll_sync_set *sync;
7435 struct pdu_adv_com_ext_adv *p;
7436 struct pdu_adv_ext_hdr *h;
7437 uint16_t data_len_total;
7438 struct net_buf *evt_buf;
7439 uint8_t data_len = 0U;
7440 uint8_t acad_len = 0U;
7441 uint8_t *data = NULL;
7442 uint8_t data_len_max;
7443 uint8_t *acad = NULL;
7444 uint8_t hdr_buf_len;
7445 uint8_t hdr_len;
7446 uint8_t *ptr;
7447 int8_t rssi;
7448 bool accept;
7449
7450 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7451 (!(le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7452 !(le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT))) {
7453 return;
7454 }
7455
7456 /* NOTE: The timeout_reload field in the sync context is checked under
7457 * race condition between HCI Tx and Rx thread wherein a sync
7458 * terminate was performed which resets the timeout_reload field
7459 * before releasing the sync context back into its memory pool.
7460 * It is important that timeout_reload field is at safe offset
7461 * inside the sync context such that it is not corrupt while being
7462 * in the memory pool.
7463 *
7464 * This check ensures reports are not sent out after sync
7465 * terminate.
7466 */
7467 sync = HDR_LLL2ULL(ftr->param);
7468 if (unlikely(!sync->timeout_reload)) {
7469 return;
7470 }
7471
7472 if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7473 node_rx->hdr.rx_ftr.aux_failed) {
7474 struct bt_hci_evt_le_per_advertising_report *sep;
7475
7476 sep = meta_evt(buf,
7477 BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7478 sizeof(*sep));
7479
7480 sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7481 sep->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7482 sep->rssi = BT_HCI_LE_RSSI_NOT_AVAILABLE;
7483 sep->cte_type = BT_HCI_LE_NO_CTE;
7484 sep->data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7485 sep->length = 0;
7486
7487 return;
7488 }
7489
7490 /* The Link Layer currently returns RSSI as an absolute value */
7491 rssi = -(node_rx->hdr.rx_ftr.rssi);
7492
7493 LOG_DBG("len = %u, rssi = %d", adv->len, rssi);
7494
7495 p = (void *)&adv->adv_ext_ind;
7496 h = (void *)p->ext_hdr_adv_data;
7497 ptr = (void *)h;
7498
7499 LOG_DBG(" Per. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
7500
7501 if (!p->ext_hdr_len) {
7502 hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
7503
7504 goto no_ext_hdr;
7505 }
7506
7507 ptr = h->data;
7508
7509 if (h->adv_addr) {
7510 ptr += BDADDR_SIZE;
7511 }
7512
7513 if (h->tgt_addr) {
7514 ptr += BDADDR_SIZE;
7515 }
7516
7517 if (h->cte_info) {
7518 struct pdu_cte_info *cte_info;
7519
7520 cte_info = (void *)ptr;
7521 cte_type = cte_info->type;
7522 ptr++;
7523
7524 LOG_DBG(" CTE type= %d", cte_type);
7525 }
7526
7527 if (h->adi) {
7528 adi = (void *)ptr;
7529
7530 ptr += sizeof(struct pdu_adv_adi);
7531 }
7532
7533 /* AuxPtr */
7534 if (h->aux_ptr) {
7535 uint8_t aux_phy;
7536
7537 aux_ptr = (void *)ptr;
7538 if (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7539 return;
7540 }
7541
7542 ptr += sizeof(*aux_ptr);
7543
7544 aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7545
7546 LOG_DBG(" AuxPtr chan_idx = %u, ca = %u, offs_units "
7547 "= %u offs = 0x%x, phy = 0x%x",
7548 aux_ptr->chan_idx, aux_ptr->ca,
7549 aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr), aux_phy);
7550 }
7551
7552 /* No SyncInfo */
7553 if (h->sync_info) {
7554 ptr += sizeof(struct pdu_adv_sync_info);
7555 }
7556
7557 /* Tx Power */
7558 if (h->tx_pwr) {
7559 tx_pwr = *(int8_t *)ptr;
7560 ptr++;
7561
7562 LOG_DBG(" Tx pwr= %d dB", tx_pwr);
7563 }
7564
7565 hdr_len = ptr - (uint8_t *)p;
7566 hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7567 if (hdr_len > hdr_buf_len) {
7568 LOG_WRN(" Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7569 } else {
7570 acad_len = hdr_buf_len - hdr_len;
7571 if (acad_len) {
7572 acad = ptr;
7573
7574 ptr += acad_len;
7575 hdr_len += acad_len;
7576 }
7577 }
7578
7579 no_ext_hdr:
7580 if (hdr_len < adv->len) {
7581 data_len = adv->len - hdr_len;
7582 data = ptr;
7583
7584 LOG_DBG(" AD Data (%u): <todo>", data_len);
7585 }
7586
7587 if (0) {
7588
7589 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7590 defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7591 } else if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
7592 adi) {
7593 uint8_t data_status;
7594
7595 data_status = (aux_ptr) ?
7596 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL :
7597 BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7598
7599 accept = sync->rx_enable && ftr->sync_rx_enabled &&
7600 (!sync->nodups ||
7601 !dup_found(PDU_ADV_TYPE_EXT_IND,
7602 sync->peer_id_addr_type,
7603 sync->peer_id_addr,
7604 DUP_EXT_ADV_MODE_PERIODIC,
7605 adi, data_status));
7606 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7607 * CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7608 */
7609
7610 } else {
7611 accept = sync->rx_enable && ftr->sync_rx_enabled;
7612 }
7613
7614 data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7615 sizeof(struct bt_hci_evt_le_meta_event) -
7616 sizeof(struct bt_hci_evt_le_per_advertising_report);
7617 data_len_total = node_rx->hdr.rx_ftr.aux_data_len;
7618
7619 evt_buf = buf;
7620
7621 if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) && accept &&
7622 ((data_len_total - data_len) < CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7623
7624 /* Pass verdict in LL.TS.p19 section 4.2.3.6 Extended Scanning,
7625 * Passive, Periodic Advertising Report, RSSI and TX_Power
7626 * states:
7627 * TX_Power is set to value of the TxPower field for the
7628 * AUX_SYNC_IND received, and RSSI set to a valid value.
7629 * Subsequent reports with data and the status set to
7630 * "Incomplete, more data to come" or "complete" can have the
7631 * TX_Power field set to 0x7F.
7632 *
7633 * In the implementation data_len_total is the running total
7634 * AD data length so far, data_len is the current PDU's AD data
7635 * length. For AUX_SYNC_IND received, data_len_total ==
7636 * data_len.
7637 */
7638 if (data_len_total > data_len) {
7639 /* Subsequent reports */
7640 tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7641 }
7642
7643 data_len = MIN(data_len, (CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX +
7644 data_len - data_len_total));
7645
7646 do {
7647 struct bt_hci_evt_le_per_advertising_report *sep;
7648 uint8_t data_len_frag;
7649 uint8_t data_status;
7650
7651 data_len_frag = MIN(data_len, data_len_max);
7652
7653 /* Start constructing periodic advertising report */
7654 sep = meta_evt(evt_buf,
7655 BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7656 sizeof(*sep) + data_len_frag);
7657
7658 sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7659 sep->tx_power = tx_pwr;
7660 sep->rssi = rssi;
7661 sep->cte_type = cte_type;
7662 sep->length = data_len_frag;
7663 memcpy(&sep->data[0], data, data_len_frag);
7664
7665 data += data_len_frag;
7666 data_len -= data_len_frag;
7667
7668 if (data_len > 0) {
7669 /* Some data left in PDU, mark as partial data. */
7670 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7671
7672 evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7673 LL_ASSERT(evt_buf);
7674
7675 net_buf_frag_add(buf, evt_buf);
7676
7677 tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7678 } else if (!aux_ptr &&
7679 (data_len_total <= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7680 /* No data left, no AuxPtr, mark as complete data. */
7681 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7682 } else if (ftr->aux_sched &&
7683 (data_len_total < CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7684 /* No data left, but have AuxPtr and scheduled aux scan,
7685 * mark as partial data.
7686 */
7687 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7688 } else {
7689 /* No data left, have AuxPtr but not aux scan scheduled,
7690 * mark as incomplete data.
7691 */
7692 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7693 }
7694
7695 sep->data_status = data_status;
7696 } while (data_len > 0);
7697
7698 evt_buf = NULL;
7699 }
7700
7701 if ((le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT) && acad &&
7702 (acad_len >= (PDU_BIG_INFO_CLEARTEXT_SIZE +
7703 PDU_ADV_DATA_HEADER_SIZE))) {
7704 struct bt_hci_evt_le_biginfo_adv_report *sep;
7705 struct pdu_big_info *bi;
7706 uint8_t bi_size;
7707
7708 /* FIXME: Parse and find the BIGInfo */
7709 if (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] != BT_DATA_BIG_INFO) {
7710 return;
7711 }
7712
7713 bi_size = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
7714 bi = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
7715
7716 /* Allocate new event buffer if periodic advertising report was
7717 * constructed with the caller supplied buffer.
7718 */
7719 if (!evt_buf) {
7720 evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7721 LL_ASSERT(evt_buf);
7722
7723 net_buf_frag_add(buf, evt_buf);
7724 }
7725
7726 /* Start constructing BIGInfo advertising report */
7727 sep = meta_evt(evt_buf, BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
7728 sizeof(*sep));
7729
7730 sep->sync_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7731
7732 /* NOTE: both sep and bi struct store little-endian values,
7733 * explicit endian-ness conversion not required.
7734 */
7735 sep->num_bis = bi->num_bis;
7736 sep->nse = bi->nse;
7737 sep->iso_interval = bi->iso_interval;
7738 sep->bn = bi->bn;
7739 sep->pto = bi->pto;
7740 sep->irc = bi->irc;
7741 sep->max_pdu = bi->max_pdu;
7742 sys_put_le24(sys_le24_to_cpu(bi->sdu_interval),
7743 sep->sdu_interval);
7744 sep->max_sdu = bi->max_sdu;
7745 sep->phy = HCI_AUX_PHY_TO_HCI_PHY(bi->chm_phy[4] >> 5);
7746 sep->framing = (bi->payload_count_framing[4] >> 7) & 0x01;
7747 if (bi_size == (PDU_BIG_INFO_ENCRYPTED_SIZE + 1)) {
7748 sep->encryption = 1U;
7749 } else {
7750 sep->encryption = 0U;
7751 }
7752 }
7753 }
7754
7755 static void le_per_adv_sync_lost(struct pdu_data *pdu_data,
7756 struct node_rx_pdu *node_rx,
7757 struct net_buf *buf)
7758 {
7759 struct bt_hci_evt_le_per_adv_sync_lost *sep;
7760
7761 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7762 !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_LOST)) {
7763 return;
7764 }
7765
7766 sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, sizeof(*sep));
7767 sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7768 }
7769
7770 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
7771 static void le_big_sync_established(struct pdu_data *pdu,
7772 struct node_rx_pdu *node_rx,
7773 struct net_buf *buf)
7774 {
7775 struct bt_hci_evt_le_big_sync_established *sep;
7776 struct ll_sync_iso_set *sync_iso;
7777 struct node_rx_sync_iso *se;
7778 struct lll_sync_iso *lll;
7779 size_t evt_size;
7780 void *node;
7781
7782 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7783 !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED)) {
7784 return;
7785 }
7786
7787 sync_iso = node_rx->hdr.rx_ftr.param;
7788 lll = &sync_iso->lll;
7789
7790 evt_size = sizeof(*sep) + (lll->num_bis * sizeof(uint16_t));
7791
7792 sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED, evt_size);
7793 sep->big_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7794
7795 /* Check for pdu field being aligned before accessing ISO sync
7796 * established event.
7797 */
7798 node = pdu;
7799 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync_iso));
7800
7801 se = node;
7802 sep->status = se->status;
7803 if (sep->status) {
7804 return;
7805 }
7806
7807 /* FIXME: Fill latency */
7808 sys_put_le24(0, sep->latency);
7809
7810 sep->nse = lll->nse;
7811 sep->bn = lll->bn;
7812 sep->pto = lll->pto;
7813 sep->irc = lll->irc;
7814 sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
7815 sep->iso_interval = sys_cpu_to_le16(lll->iso_interval);
7816 sep->num_bis = lll->stream_count;
7817
7818 /* Connection handle list of all BISes synchronized in the BIG */
7819 for (uint8_t i = 0U; i < lll->stream_count; i++) {
7820 uint16_t handle;
7821
7822 handle = LL_BIS_SYNC_HANDLE_FROM_IDX(lll->stream_handle[i]);
7823 sep->handle[i] = sys_cpu_to_le16(handle);
7824 }
7825 }
7826
7827 static void le_big_sync_lost(struct pdu_data *pdu,
7828 struct node_rx_pdu *node_rx,
7829 struct net_buf *buf)
7830 {
7831 struct bt_hci_evt_le_big_sync_lost *sep;
7832
7833 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7834 !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_LOST)) {
7835 return;
7836 }
7837
7838 sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_LOST, sizeof(*sep));
7839 sep->big_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7840 sep->reason = *((uint8_t *)pdu);
7841 }
7842 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
7843 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
7844 #endif /* CONFIG_BT_CTLR_ADV_EXT */
7845 #endif /* CONFIG_BT_OBSERVER */
7846
7847 #if defined(CONFIG_BT_BROADCASTER)
7848 #if defined(CONFIG_BT_CTLR_ADV_EXT)
7849 static void le_adv_ext_terminate(struct pdu_data *pdu_data,
7850 struct node_rx_pdu *node_rx,
7851 struct net_buf *buf)
7852 {
7853 struct bt_hci_evt_le_adv_set_terminated *sep;
7854
7855 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7856 !(le_event_mask & BT_EVT_MASK_LE_ADV_SET_TERMINATED)) {
7857 return;
7858 }
7859
7860 sep = meta_evt(buf, BT_HCI_EVT_LE_ADV_SET_TERMINATED, sizeof(*sep));
7861 sep->status = node_rx->hdr.rx_ftr.param_adv_term.status;
7862 sep->adv_handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
7863 sep->conn_handle =
7864 sys_cpu_to_le16(node_rx->hdr.rx_ftr.param_adv_term.conn_handle);
7865 sep->num_completed_ext_adv_evts =
7866 node_rx->hdr.rx_ftr.param_adv_term.num_events;
7867 }
7868
7869 #if defined(CONFIG_BT_CTLR_ADV_ISO)
7870 static void le_big_complete(struct pdu_data *pdu_data,
7871 struct node_rx_pdu *node_rx,
7872 struct net_buf *buf)
7873 {
7874 struct bt_hci_evt_le_big_complete *sep;
7875 struct ll_adv_iso_set *adv_iso;
7876 struct lll_adv_iso *lll;
7877 size_t evt_size;
7878
7879 adv_iso = node_rx->hdr.rx_ftr.param;
7880 lll = &adv_iso->lll;
7881
7882 evt_size = sizeof(*sep) + (lll->num_bis * sizeof(uint16_t));
7883
7884 sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_COMPLETE, evt_size);
7885
7886 sep->status = BT_HCI_ERR_SUCCESS;
7887 sep->big_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7888
7889 if (sep->status) {
7890 return;
7891 }
7892
7893 /* FIXME: Fill sync delay and latency */
7894 sys_put_le24(0, sep->sync_delay);
7895 sys_put_le24(0, sep->latency);
7896
7897 sep->phy = find_lsb_set(lll->phy);
7898 sep->nse = lll->nse;
7899 sep->bn = lll->bn;
7900 sep->pto = lll->pto;
7901 sep->irc = lll->irc;
7902 sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
7903 sep->num_bis = lll->num_bis;
7904
7905 /* Connection handle list of all BISes in the BIG */
7906 for (uint8_t i = 0U; i < lll->num_bis; i++) {
7907 uint16_t handle;
7908
7909 handle = LL_BIS_ADV_HANDLE_FROM_IDX(lll->stream_handle[i]);
7910 sep->handle[i] = sys_cpu_to_le16(handle);
7911 }
7912 }
7913
7914 static void le_big_terminate(struct pdu_data *pdu,
7915 struct node_rx_pdu *node_rx,
7916 struct net_buf *buf)
7917 {
7918 struct bt_hci_evt_le_big_terminate *sep;
7919
7920 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7921 !(le_event_mask & BT_EVT_MASK_LE_BIG_TERMINATED)) {
7922 return;
7923 }
7924
7925 sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_TERMINATE, sizeof(*sep));
7926 sep->big_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7927 sep->reason = *((uint8_t *)pdu);
7928 }
7929 #endif /* CONFIG_BT_CTLR_ADV_ISO */
7930 #endif /* CONFIG_BT_CTLR_ADV_EXT */
7931 #endif /* CONFIG_BT_BROADCASTER */
7932
7933 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
7934 static void le_scan_req_received(struct pdu_data *pdu_data,
7935 struct node_rx_pdu *node_rx,
7936 struct net_buf *buf)
7937 {
7938 struct pdu_adv *adv = (void *)pdu_data;
7939 struct bt_hci_evt_le_scan_req_received *sep;
7940
7941 #if defined(CONFIG_BT_CTLR_PRIVACY)
7942 uint8_t rl_idx;
7943 #endif
7944
7945 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7946 !(le_event_mask & BT_EVT_MASK_LE_SCAN_REQ_RECEIVED)) {
7947 bt_addr_le_t addr;
7948 uint8_t handle;
7949 int8_t rssi;
7950
7951 handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
7952 addr.type = adv->tx_addr;
7953 memcpy(&addr.a.val[0], &adv->scan_req.scan_addr[0],
7954 sizeof(bt_addr_t));
7955
7956 /* The Link Layer currently returns RSSI as an absolute value */
7957 rssi = -(node_rx->hdr.rx_ftr.rssi);
7958
7959 LOG_DBG("handle: %d, addr: %s, rssi: %d dB.", handle, bt_addr_le_str(&addr), rssi);
7960
7961 return;
7962 }
7963
7964 sep = meta_evt(buf, BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, sizeof(*sep));
7965 sep->handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
7966 sep->addr.type = adv->tx_addr;
7967 memcpy(&sep->addr.a.val[0], &adv->scan_req.scan_addr[0],
7968 sizeof(bt_addr_t));
7969
7970 #if defined(CONFIG_BT_CTLR_PRIVACY)
7971 rl_idx = node_rx->hdr.rx_ftr.rl_idx;
7972 if (rl_idx < ll_rl_size_get()) {
7973 /* Store identity address */
7974 ll_rl_id_addr_get(rl_idx, &sep->addr.type,
7975 &sep->addr.a.val[0]);
7976 /* Mark it as identity address from RPA (0x02, 0x03) */
7977 sep->addr.type += 2U;
7978 } else {
7979 #else
7980 if (1) {
7981 #endif
7982 sep->addr.type = adv->tx_addr;
7983 memcpy(&sep->addr.a.val[0], &adv->adv_ind.addr[0],
7984 sizeof(bt_addr_t));
7985 }
7986 }
7987 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
7988
7989 #if defined(CONFIG_BT_CONN)
7990 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
7991 struct net_buf *buf)
7992 {
7993 struct bt_hci_evt_le_conn_complete *lecc;
7994 struct node_rx_cc *cc;
7995 uint8_t status;
7996 void *node;
7997
7998 /* Check for pdu field being aligned before accessing connection
7999 * complete event.
8000 */
8001 node = pdu_data;
8002 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cc));
8003
8004 cc = node;
8005 status = cc->status;
8006
8007 #if defined(CONFIG_BT_CTLR_PRIVACY)
8008 if (!status) {
8009 /* Update current RPA */
8010 ll_rl_crpa_set(cc->peer_addr_type,
8011 &cc->peer_addr[0], 0xff,
8012 &cc->peer_rpa[0]);
8013 }
8014 #endif
8015
8016 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8017 (!(le_event_mask & BT_EVT_MASK_LE_CONN_COMPLETE) &&
8018 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
8019 !(le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE))) {
8020 #else
8021 1)) {
8022 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
8023 return;
8024 }
8025
8026 if (!status) {
8027 conn_count++;
8028 }
8029
8030 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
8031 if (le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE) {
8032 struct bt_hci_evt_le_enh_conn_complete *leecc;
8033
8034 leecc = meta_evt(buf, BT_HCI_EVT_LE_ENH_CONN_COMPLETE,
8035 sizeof(*leecc));
8036
8037 if (status) {
8038 (void)memset(leecc, 0x00, sizeof(*leecc));
8039 leecc->status = status;
8040 return;
8041 }
8042
8043 leecc->status = 0x00;
8044 leecc->handle = sys_cpu_to_le16(handle);
8045 leecc->role = cc->role;
8046
8047 leecc->peer_addr.type = cc->peer_addr_type;
8048 memcpy(&leecc->peer_addr.a.val[0], &cc->peer_addr[0],
8049 BDADDR_SIZE);
8050
8051 #if defined(CONFIG_BT_CTLR_PRIVACY)
8052 memcpy(&leecc->local_rpa.val[0], &cc->local_rpa[0],
8053 BDADDR_SIZE);
8054 memcpy(&leecc->peer_rpa.val[0], &cc->peer_rpa[0],
8055 BDADDR_SIZE);
8056 #else /* !CONFIG_BT_CTLR_PRIVACY */
8057 memset(&leecc->local_rpa.val[0], 0, BDADDR_SIZE);
8058 memset(&leecc->peer_rpa.val[0], 0, BDADDR_SIZE);
8059 #endif /* !CONFIG_BT_CTLR_PRIVACY */
8060
8061 leecc->interval = sys_cpu_to_le16(cc->interval);
8062 leecc->latency = sys_cpu_to_le16(cc->latency);
8063 leecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
8064 leecc->clock_accuracy = cc->sca;
8065 return;
8066 }
8067 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
8068
8069 lecc = meta_evt(buf, BT_HCI_EVT_LE_CONN_COMPLETE, sizeof(*lecc));
8070
8071 if (status) {
8072 (void)memset(lecc, 0x00, sizeof(*lecc));
8073 lecc->status = status;
8074 return;
8075 }
8076
8077 lecc->status = 0x00;
8078 lecc->handle = sys_cpu_to_le16(handle);
8079 lecc->role = cc->role;
8080 lecc->peer_addr.type = cc->peer_addr_type & 0x1;
8081 memcpy(&lecc->peer_addr.a.val[0], &cc->peer_addr[0], BDADDR_SIZE);
8082 lecc->interval = sys_cpu_to_le16(cc->interval);
8083 lecc->latency = sys_cpu_to_le16(cc->latency);
8084 lecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
8085 lecc->clock_accuracy = cc->sca;
8086 }
8087
8088 void hci_disconn_complete_encode(struct pdu_data *pdu_data, uint16_t handle,
8089 struct net_buf *buf)
8090 {
8091 struct bt_hci_evt_disconn_complete *ep;
8092
8093 if (!(event_mask & BT_EVT_MASK_DISCONN_COMPLETE)) {
8094 return;
8095 }
8096
8097 hci_evt_create(buf, BT_HCI_EVT_DISCONN_COMPLETE, sizeof(*ep));
8098 ep = net_buf_add(buf, sizeof(*ep));
8099
8100 ep->status = 0x00;
8101 ep->handle = sys_cpu_to_le16(handle);
8102 ep->reason = *((uint8_t *)pdu_data);
8103 }
8104
8105 void hci_disconn_complete_process(uint16_t handle)
8106 {
8107 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8108 /* Clear any pending packets upon disconnection */
8109 /* Note: This requires linear handle values starting from 0 */
8110 if (handle >= ARRAY_SIZE(hci_hbuf_pend)) {
8111 return;
8112 }
8113
8114 hci_hbuf_acked += hci_hbuf_pend[handle];
8115 hci_hbuf_pend[handle] = 0U;
8116 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
8117
8118 conn_count--;
8119 }
8120
8121 static void le_conn_update_complete(struct pdu_data *pdu_data, uint16_t handle,
8122 struct net_buf *buf)
8123 {
8124 struct bt_hci_evt_le_conn_update_complete *sep;
8125 struct node_rx_cu *cu;
8126 void *node;
8127
8128 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8129 !(le_event_mask & BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE)) {
8130 return;
8131 }
8132
8133 sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE, sizeof(*sep));
8134
8135 /* Check for pdu field being aligned before accessing connection
8136 * update complete event.
8137 */
8138 node = pdu_data;
8139 LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cu));
8140
8141 cu = node;
8142 sep->status = cu->status;
8143 sep->handle = sys_cpu_to_le16(handle);
8144 sep->interval = sys_cpu_to_le16(cu->interval);
8145 sep->latency = sys_cpu_to_le16(cu->latency);
8146 sep->supv_timeout = sys_cpu_to_le16(cu->timeout);
8147 }
8148
8149 #if defined(CONFIG_BT_CTLR_LE_ENC)
8150 static void enc_refresh_complete(struct pdu_data *pdu_data, uint16_t handle,
8151 struct net_buf *buf)
8152 {
8153 struct bt_hci_evt_encrypt_key_refresh_complete *ep;
8154
8155 if (!(event_mask & BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE)) {
8156 return;
8157 }
8158
8159 hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
8160 sizeof(*ep));
8161 ep = net_buf_add(buf, sizeof(*ep));
8162
8163 ep->status = 0x00;
8164 ep->handle = sys_cpu_to_le16(handle);
8165 }
8166 #endif /* CONFIG_BT_CTLR_LE_ENC */
8167
8168 #if defined(CONFIG_BT_CTLR_LE_PING)
8169 static void auth_payload_timeout_exp(struct pdu_data *pdu_data, uint16_t handle,
8170 struct net_buf *buf)
8171 {
8172 struct bt_hci_evt_auth_payload_timeout_exp *ep;
8173
8174 if (!(event_mask_page_2 & BT_EVT_MASK_AUTH_PAYLOAD_TIMEOUT_EXP)) {
8175 return;
8176 }
8177
8178 hci_evt_create(buf, BT_HCI_EVT_AUTH_PAYLOAD_TIMEOUT_EXP, sizeof(*ep));
8179 ep = net_buf_add(buf, sizeof(*ep));
8180
8181 ep->handle = sys_cpu_to_le16(handle);
8182 }
8183 #endif /* CONFIG_BT_CTLR_LE_PING */
8184
8185 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8186 static void le_chan_sel_algo(struct pdu_data *pdu_data, uint16_t handle,
8187 struct net_buf *buf)
8188 {
8189 struct bt_hci_evt_le_chan_sel_algo *sep;
8190 struct node_rx_cs *cs;
8191
8192 cs = (void *)pdu_data;
8193
8194 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8195 !(le_event_mask & BT_EVT_MASK_LE_CHAN_SEL_ALGO)) {
8196 LOG_DBG("handle: 0x%04x, CSA: %x.", handle, cs->csa);
8197 return;
8198 }
8199
8200 sep = meta_evt(buf, BT_HCI_EVT_LE_CHAN_SEL_ALGO, sizeof(*sep));
8201
8202 sep->handle = sys_cpu_to_le16(handle);
8203 sep->chan_sel_algo = cs->csa;
8204 }
8205 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8206
8207 #if defined(CONFIG_BT_CTLR_PHY)
8208 static void le_phy_upd_complete(struct pdu_data *pdu_data, uint16_t handle,
8209 struct net_buf *buf)
8210 {
8211 struct bt_hci_evt_le_phy_update_complete *sep;
8212 struct node_rx_pu *pu;
8213
8214 pu = (void *)pdu_data;
8215
8216 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8217 !(le_event_mask & BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE)) {
8218 LOG_WRN("handle: 0x%04x, status: %x, tx: %x, rx: %x.", handle, pu->status,
8219 find_lsb_set(pu->tx), find_lsb_set(pu->rx));
8220 return;
8221 }
8222
8223 sep = meta_evt(buf, BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE, sizeof(*sep));
8224
8225 sep->status = pu->status;
8226 sep->handle = sys_cpu_to_le16(handle);
8227 sep->tx_phy = find_lsb_set(pu->tx);
8228 sep->rx_phy = find_lsb_set(pu->rx);
8229 }
8230 #endif /* CONFIG_BT_CTLR_PHY */
8231
8232 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8233 static void le_req_peer_sca_complete(struct pdu_data *pdu, uint16_t handle,
8234 struct net_buf *buf)
8235 {
8236 struct bt_hci_evt_le_req_peer_sca_complete *sep;
8237 struct node_rx_sca *scau;
8238
8239 scau = (void *)pdu;
8240
8241 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8242 !(le_event_mask & BT_EVT_MASK_LE_REQ_PEER_SCA_COMPLETE)) {
8243 LOG_WRN("handle: 0x%04x, status: %x, sca: %x.", handle,
8244 scau->status,
8245 scau->sca);
8246 return;
8247 }
8248
8249 sep = meta_evt(buf, BT_HCI_EVT_LE_REQ_PEER_SCA_COMPLETE, sizeof(*sep));
8250
8251 sep->status = scau->status;
8252 sep->handle = sys_cpu_to_le16(handle);
8253 sep->sca = scau->sca;
8254 }
8255 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8256 #endif /* CONFIG_BT_CONN */
8257
8258 #if defined(CONFIG_BT_HCI_MESH_EXT)
8259 static void mesh_adv_cplt(struct pdu_data *pdu_data,
8260 struct node_rx_pdu *node_rx,
8261 struct net_buf *buf)
8262 {
8263 struct bt_hci_evt_mesh_adv_complete *mep;
8264
8265 mep = mesh_evt(buf, BT_HCI_EVT_MESH_ADV_COMPLETE, sizeof(*mep));
8266 mep->adv_slot = ((uint8_t *)pdu_data)[0];
8267 }
8268 #endif /* CONFIG_BT_HCI_MESH_EXT */
8269
8270 /**
8271 * @brief Encode a control-PDU into an HCI buffer
8272 * @details Execution context: Host thread
8273 *
8274 * @param node_rx_pdu[in] RX node containing header and PDU
8275 * @param pdu_data[in] PDU. Same as node_rx_pdu->pdu, but more convenient
8276 * @param net_buf[out] Upwards-going HCI buffer to fill
8277 */
8278 static void encode_control(struct node_rx_pdu *node_rx,
8279 struct pdu_data *pdu_data, struct net_buf *buf)
8280 {
8281 uint16_t handle;
8282
8283 handle = node_rx->hdr.handle;
8284
8285 switch (node_rx->hdr.type) {
8286 #if defined(CONFIG_BT_OBSERVER)
8287 case NODE_RX_TYPE_REPORT:
8288 le_advertising_report(pdu_data, node_rx, buf);
8289 break;
8290
8291 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8292 case NODE_RX_TYPE_EXT_1M_REPORT:
8293 le_adv_ext_1M_report(pdu_data, node_rx, buf);
8294 break;
8295
8296 case NODE_RX_TYPE_EXT_2M_REPORT:
8297 le_adv_ext_2M_report(pdu_data, node_rx, buf);
8298 break;
8299
8300 case NODE_RX_TYPE_EXT_CODED_REPORT:
8301 le_adv_ext_coded_report(pdu_data, node_rx, buf);
8302 break;
8303
8304 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
8305 le_scan_timeout(pdu_data, node_rx, buf);
8306 break;
8307
8308 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
8309 case NODE_RX_TYPE_SYNC:
8310 le_per_adv_sync_established(pdu_data, node_rx, buf);
8311 break;
8312
8313 case NODE_RX_TYPE_SYNC_REPORT:
8314 le_per_adv_sync_report(pdu_data, node_rx, buf);
8315 break;
8316
8317 case NODE_RX_TYPE_SYNC_LOST:
8318 le_per_adv_sync_lost(pdu_data, node_rx, buf);
8319 break;
8320
8321 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
8322 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
8323 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
8324 vs_le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8325 #else
8326 le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8327 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
8328 break;
8329 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
8330
8331 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8332 case NODE_RX_TYPE_SYNC_ISO:
8333 le_big_sync_established(pdu_data, node_rx, buf);
8334 break;
8335
8336 case NODE_RX_TYPE_SYNC_ISO_LOST:
8337 le_big_sync_lost(pdu_data, node_rx, buf);
8338 break;
8339 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8340 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8341 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8342 #endif /* CONFIG_BT_OBSERVER */
8343
8344 #if defined(CONFIG_BT_BROADCASTER)
8345 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8346 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
8347 le_adv_ext_terminate(pdu_data, node_rx, buf);
8348 break;
8349
8350 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8351 case NODE_RX_TYPE_BIG_COMPLETE:
8352 le_big_complete(pdu_data, node_rx, buf);
8353 break;
8354 case NODE_RX_TYPE_BIG_TERMINATE:
8355 le_big_terminate(pdu_data, node_rx, buf);
8356 break;
8357 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8358 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8359 #endif /* CONFIG_BT_BROADCASTER */
8360
8361 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8362 case NODE_RX_TYPE_SCAN_REQ:
8363 le_scan_req_received(pdu_data, node_rx, buf);
8364 break;
8365 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8366
8367 #if defined(CONFIG_BT_CONN)
8368 case NODE_RX_TYPE_CONNECTION:
8369 le_conn_complete(pdu_data, handle, buf);
8370 break;
8371
8372 case NODE_RX_TYPE_TERMINATE:
8373 hci_disconn_complete_encode(pdu_data, handle, buf);
8374 break;
8375
8376 case NODE_RX_TYPE_CONN_UPDATE:
8377 le_conn_update_complete(pdu_data, handle, buf);
8378 break;
8379
8380 #if defined(CONFIG_BT_CTLR_LE_ENC)
8381 case NODE_RX_TYPE_ENC_REFRESH:
8382 enc_refresh_complete(pdu_data, handle, buf);
8383 break;
8384 #endif /* CONFIG_BT_CTLR_LE_ENC */
8385
8386 #if defined(CONFIG_BT_CTLR_LE_PING)
8387 case NODE_RX_TYPE_APTO:
8388 auth_payload_timeout_exp(pdu_data, handle, buf);
8389 break;
8390 #endif /* CONFIG_BT_CTLR_LE_PING */
8391
8392 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8393 case NODE_RX_TYPE_CHAN_SEL_ALGO:
8394 le_chan_sel_algo(pdu_data, handle, buf);
8395 break;
8396 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8397
8398 #if defined(CONFIG_BT_CTLR_PHY)
8399 case NODE_RX_TYPE_PHY_UPDATE:
8400 le_phy_upd_complete(pdu_data, handle, buf);
8401 return;
8402 #endif /* CONFIG_BT_CTLR_PHY */
8403
8404 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
8405 case NODE_RX_TYPE_RSSI:
8406 LOG_INF("handle: 0x%04x, rssi: -%d dB.", handle, pdu_data->rssi);
8407 return;
8408 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
8409
8410 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
8411 case NODE_RX_TYPE_CIS_REQUEST:
8412 le_cis_request(pdu_data, node_rx, buf);
8413 return;
8414 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
8415
8416 #if defined(CONFIG_BT_CTLR_CONN_ISO)
8417 case NODE_RX_TYPE_CIS_ESTABLISHED:
8418 le_cis_established(pdu_data, node_rx, buf);
8419 return;
8420 #endif /* CONFIG_BT_CTLR_CONN_ISO */
8421
8422 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8423 case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
8424 le_req_peer_sca_complete(pdu_data, handle, buf);
8425 return;
8426 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8427
8428 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
8429 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
8430 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
8431 vs_le_df_connection_iq_report(node_rx, buf);
8432 #else
8433 le_df_connection_iq_report(node_rx, buf);
8434 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
8435 return;
8436 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
8437 #endif /* CONFIG_BT_CONN */
8438
8439 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
8440 case NODE_RX_TYPE_ADV_INDICATION:
8441 LOG_INF("Advertised.");
8442 return;
8443 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
8444
8445 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
8446 case NODE_RX_TYPE_SCAN_INDICATION:
8447 LOG_INF("Scanned.");
8448 return;
8449 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
8450
8451 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
8452 case NODE_RX_TYPE_PROFILE:
8453 LOG_INF("l: %u, %u, %u; t: %u, %u, %u; cpu: %u, %u, %u, %u.",
8454 pdu_data->profile.lcur, pdu_data->profile.lmin, pdu_data->profile.lmax,
8455 pdu_data->profile.cur, pdu_data->profile.min, pdu_data->profile.max,
8456 pdu_data->profile.radio, pdu_data->profile.lll, pdu_data->profile.ull_high,
8457 pdu_data->profile.ull_low);
8458 return;
8459 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
8460
8461 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
8462 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
8463 le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8464 return;
8465 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
8466
8467 #if defined(CONFIG_BT_HCI_MESH_EXT)
8468 case NODE_RX_TYPE_MESH_ADV_CPLT:
8469 mesh_adv_cplt(pdu_data, node_rx, buf);
8470 return;
8471
8472 case NODE_RX_TYPE_MESH_REPORT:
8473 le_advertising_report(pdu_data, node_rx, buf);
8474 return;
8475 #endif /* CONFIG_BT_HCI_MESH_EXT */
8476
8477 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
8478 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
8479 hci_user_ext_encode_control(node_rx, pdu_data, buf);
8480 return;
8481 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
8482
8483 default:
8484 LL_ASSERT(0);
8485 return;
8486 }
8487 }
8488
8489 #if defined(CONFIG_BT_CTLR_LE_ENC)
8490 static void le_ltk_request(struct pdu_data *pdu_data, uint16_t handle,
8491 struct net_buf *buf)
8492 {
8493 struct bt_hci_evt_le_ltk_request *sep;
8494
8495 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8496 !(le_event_mask & BT_EVT_MASK_LE_LTK_REQUEST)) {
8497 return;
8498 }
8499
8500 sep = meta_evt(buf, BT_HCI_EVT_LE_LTK_REQUEST, sizeof(*sep));
8501
8502 sep->handle = sys_cpu_to_le16(handle);
8503 memcpy(&sep->rand, pdu_data->llctrl.enc_req.rand, sizeof(uint64_t));
8504 memcpy(&sep->ediv, pdu_data->llctrl.enc_req.ediv, sizeof(uint16_t));
8505 }
8506
8507 static void encrypt_change(uint8_t err, uint16_t handle,
8508 struct net_buf *buf)
8509 {
8510 struct bt_hci_evt_encrypt_change *ep;
8511
8512 if (!(event_mask & BT_EVT_MASK_ENCRYPT_CHANGE)) {
8513 return;
8514 }
8515
8516 hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_CHANGE, sizeof(*ep));
8517 ep = net_buf_add(buf, sizeof(*ep));
8518
8519 ep->status = err;
8520 ep->handle = sys_cpu_to_le16(handle);
8521 ep->encrypt = !err ? 1 : 0;
8522 }
8523 #endif /* CONFIG_BT_CTLR_LE_ENC */
8524
8525 static void le_remote_feat_complete(uint8_t status, struct pdu_data *pdu_data,
8526 uint16_t handle, struct net_buf *buf)
8527 {
8528 struct bt_hci_evt_le_remote_feat_complete *sep;
8529
8530 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8531 !(le_event_mask & BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE)) {
8532 return;
8533 }
8534
8535 sep = meta_evt(buf, BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE, sizeof(*sep));
8536
8537 sep->status = status;
8538 sep->handle = sys_cpu_to_le16(handle);
8539 if (!status) {
8540 memcpy(&sep->features[0],
8541 &pdu_data->llctrl.feature_rsp.features[0],
8542 sizeof(sep->features));
8543 } else {
8544 (void)memset(&sep->features[0], 0x00, sizeof(sep->features));
8545 }
8546 }
8547
8548 static void le_unknown_rsp(struct pdu_data *pdu_data, uint16_t handle,
8549 struct net_buf *buf)
8550 {
8551
8552 switch (pdu_data->llctrl.unknown_rsp.type) {
8553 case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
8554 le_remote_feat_complete(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE,
8555 NULL, handle, buf);
8556 break;
8557 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8558 case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8559 le_df_cte_req_failed(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, handle, buf);
8560 break;
8561 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8562 default:
8563 LOG_WRN("type: 0x%02x", pdu_data->llctrl.unknown_rsp.type);
8564 break;
8565 }
8566 }
8567
8568 static void le_reject_ext_ind(struct pdu_data *pdu, uint16_t handle, struct net_buf *buf)
8569 {
8570 switch (pdu->llctrl.reject_ext_ind.reject_opcode) {
8571 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8572 case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8573 le_df_cte_req_failed(pdu->llctrl.reject_ext_ind.error_code, handle, buf);
8574 break;
8575 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8576 default:
8577 LOG_WRN("reject opcode: 0x%02x", pdu->llctrl.reject_ext_ind.reject_opcode);
8578 break;
8579 }
8580 }
8581 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
8582 static void le_conn_param_req(struct pdu_data *pdu_data, uint16_t handle,
8583 struct net_buf *buf)
8584 {
8585 struct bt_hci_evt_le_conn_param_req *sep;
8586
8587 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8588 !(le_event_mask & BT_EVT_MASK_LE_CONN_PARAM_REQ)) {
8589 /* event masked, reject the conn param req */
8590 ll_conn_update(handle, 2, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, 0,
8591 0, 0, 0, NULL);
8592
8593 return;
8594 }
8595
8596 sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_PARAM_REQ, sizeof(*sep));
8597
8598 sep->handle = sys_cpu_to_le16(handle);
8599 sep->interval_min = pdu_data->llctrl.conn_param_req.interval_min;
8600 sep->interval_max = pdu_data->llctrl.conn_param_req.interval_max;
8601 sep->latency = pdu_data->llctrl.conn_param_req.latency;
8602 sep->timeout = pdu_data->llctrl.conn_param_req.timeout;
8603 }
8604 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
8605
8606 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
8607 static void le_data_len_change(struct pdu_data *pdu_data, uint16_t handle,
8608 struct net_buf *buf)
8609 {
8610 struct bt_hci_evt_le_data_len_change *sep;
8611
8612 if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8613 !(le_event_mask & BT_EVT_MASK_LE_DATA_LEN_CHANGE)) {
8614 return;
8615 }
8616
8617 sep = meta_evt(buf, BT_HCI_EVT_LE_DATA_LEN_CHANGE, sizeof(*sep));
8618
8619 sep->handle = sys_cpu_to_le16(handle);
8620 sep->max_tx_octets = pdu_data->llctrl.length_rsp.max_tx_octets;
8621 sep->max_tx_time = pdu_data->llctrl.length_rsp.max_tx_time;
8622 sep->max_rx_octets = pdu_data->llctrl.length_rsp.max_rx_octets;
8623 sep->max_rx_time = pdu_data->llctrl.length_rsp.max_rx_time;
8624 }
8625 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
8626
8627 #if defined(CONFIG_BT_REMOTE_VERSION)
8628 static void remote_version_info_encode(struct pdu_data *pdu_data,
8629 uint16_t handle, struct net_buf *buf)
8630 {
8631 struct pdu_data_llctrl_version_ind *ver_ind;
8632 struct bt_hci_evt_remote_version_info *ep;
8633
8634 if (!(event_mask & BT_EVT_MASK_REMOTE_VERSION_INFO)) {
8635 return;
8636 }
8637
8638 hci_evt_create(buf, BT_HCI_EVT_REMOTE_VERSION_INFO, sizeof(*ep));
8639 ep = net_buf_add(buf, sizeof(*ep));
8640
8641 ver_ind = &pdu_data->llctrl.version_ind;
8642 ep->status = 0x00;
8643 ep->handle = sys_cpu_to_le16(handle);
8644 ep->version = ver_ind->version_number;
8645 ep->manufacturer = ver_ind->company_id;
8646 ep->subversion = ver_ind->sub_version_number;
8647 }
8648 #endif /* CONFIG_BT_REMOTE_VERSION */
8649
8650 static void encode_data_ctrl(struct node_rx_pdu *node_rx,
8651 struct pdu_data *pdu_data, struct net_buf *buf)
8652 {
8653 uint16_t handle = node_rx->hdr.handle;
8654
8655 switch (pdu_data->llctrl.opcode) {
8656
8657 #if defined(CONFIG_BT_CTLR_LE_ENC)
8658 case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
8659 le_ltk_request(pdu_data, handle, buf);
8660 break;
8661
8662 case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
8663 encrypt_change(0x00, handle, buf);
8664 break;
8665 #endif /* CONFIG_BT_CTLR_LE_ENC */
8666
8667 #if defined(CONFIG_BT_REMOTE_VERSION)
8668 case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
8669 remote_version_info_encode(pdu_data, handle, buf);
8670 break;
8671 #endif /* defined(CONFIG_BT_REMOTE_VERSION) */
8672
8673 case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
8674 le_remote_feat_complete(0x00, pdu_data, handle, buf);
8675 break;
8676
8677 #if defined(CONFIG_BT_CTLR_LE_ENC)
8678 case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
8679 encrypt_change(pdu_data->llctrl.reject_ind.error_code, handle,
8680 buf);
8681 break;
8682 #endif /* CONFIG_BT_CTLR_LE_ENC */
8683
8684 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
8685 case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
8686 le_conn_param_req(pdu_data, handle, buf);
8687 break;
8688 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
8689
8690 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
8691 case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
8692 case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
8693 le_data_len_change(pdu_data, handle, buf);
8694 break;
8695 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
8696
8697 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8698 case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
8699 le_df_cte_req_failed(BT_HCI_CTE_REQ_STATUS_RSP_WITHOUT_CTE, handle, buf);
8700 break;
8701 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8702
8703 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
8704 le_unknown_rsp(pdu_data, handle, buf);
8705 break;
8706
8707 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
8708 le_reject_ext_ind(pdu_data, handle, buf);
8709 break;
8710
8711 default:
8712 LL_ASSERT(0);
8713 return;
8714 }
8715 }
8716
8717 #if defined(CONFIG_BT_CONN)
8718 void hci_acl_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
8719 {
8720 struct pdu_data *pdu_data = (void *)node_rx->pdu;
8721 struct bt_hci_acl_hdr *acl;
8722 uint16_t handle_flags;
8723 uint16_t handle;
8724 uint8_t *data;
8725
8726 handle = node_rx->hdr.handle;
8727
8728 switch (pdu_data->ll_id) {
8729 case PDU_DATA_LLID_DATA_CONTINUE:
8730 case PDU_DATA_LLID_DATA_START:
8731 acl = (void *)net_buf_add(buf, sizeof(*acl));
8732 if (pdu_data->ll_id == PDU_DATA_LLID_DATA_START) {
8733 handle_flags = bt_acl_handle_pack(handle, BT_ACL_START);
8734 } else {
8735 handle_flags = bt_acl_handle_pack(handle, BT_ACL_CONT);
8736 }
8737 acl->handle = sys_cpu_to_le16(handle_flags);
8738 acl->len = sys_cpu_to_le16(pdu_data->len);
8739 data = (void *)net_buf_add(buf, pdu_data->len);
8740 memcpy(data, pdu_data->lldata, pdu_data->len);
8741 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8742 if (hci_hbuf_total > 0) {
8743 LL_ASSERT((hci_hbuf_sent - hci_hbuf_acked) <
8744 hci_hbuf_total);
8745 hci_hbuf_sent++;
8746 /* Note: This requires linear handle values starting
8747 * from 0
8748 */
8749 LL_ASSERT(handle < ARRAY_SIZE(hci_hbuf_pend));
8750 hci_hbuf_pend[handle]++;
8751 }
8752 #endif
8753 break;
8754
8755 default:
8756 LL_ASSERT(0);
8757 break;
8758 }
8759 }
8760 #endif /* CONFIG_BT_CONN */
8761
8762 void hci_evt_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
8763 {
8764 struct pdu_data *pdu_data = (void *)node_rx->pdu;
8765
8766 if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
8767 encode_control(node_rx, pdu_data, buf);
8768 } else if (IS_ENABLED(CONFIG_BT_CONN)) {
8769 encode_data_ctrl(node_rx, pdu_data, buf);
8770 }
8771 }
8772
8773 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
8774 defined(CONFIG_BT_CTLR_CONN_ISO)
8775 void hci_num_cmplt_encode(struct net_buf *buf, uint16_t handle, uint8_t num)
8776 {
8777 struct bt_hci_evt_num_completed_packets *ep;
8778 struct bt_hci_handle_count *hc;
8779 uint8_t num_handles;
8780 uint8_t len;
8781
8782 num_handles = 1U;
8783
8784 len = (sizeof(*ep) + (sizeof(*hc) * num_handles));
8785 hci_evt_create(buf, BT_HCI_EVT_NUM_COMPLETED_PACKETS, len);
8786
8787 ep = net_buf_add(buf, len);
8788 ep->num_handles = num_handles;
8789 hc = &ep->h[0];
8790 hc->handle = sys_cpu_to_le16(handle);
8791 hc->count = sys_cpu_to_le16(num);
8792 }
8793 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
8794
8795 uint8_t hci_get_class(struct node_rx_pdu *node_rx)
8796 {
8797 #if defined(CONFIG_BT_CONN)
8798 struct pdu_data *pdu_data = (void *)node_rx->pdu;
8799 #endif
8800
8801 if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
8802
8803 switch (node_rx->hdr.type) {
8804 #if defined(CONFIG_BT_OBSERVER) || \
8805 defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
8806 defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
8807 defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
8808 defined(CONFIG_BT_CTLR_PROFILE_ISR)
8809 #if defined(CONFIG_BT_OBSERVER)
8810 case NODE_RX_TYPE_REPORT:
8811 #endif /* CONFIG_BT_OBSERVER */
8812
8813 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8814 case NODE_RX_TYPE_SCAN_REQ:
8815 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8816
8817 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
8818 case NODE_RX_TYPE_ADV_INDICATION:
8819 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
8820
8821 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
8822 case NODE_RX_TYPE_SCAN_INDICATION:
8823 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
8824
8825 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
8826 case NODE_RX_TYPE_PROFILE:
8827 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
8828 return HCI_CLASS_EVT_DISCARDABLE;
8829 #endif
8830
8831 #if defined(CONFIG_BT_HCI_MESH_EXT)
8832 case NODE_RX_TYPE_MESH_ADV_CPLT:
8833 case NODE_RX_TYPE_MESH_REPORT:
8834 #endif /* CONFIG_BT_HCI_MESH_EXT */
8835
8836 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8837 #if defined(CONFIG_BT_BROADCASTER)
8838 case NODE_RX_TYPE_EXT_ADV_TERMINATE:
8839
8840 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8841 case NODE_RX_TYPE_BIG_COMPLETE:
8842 case NODE_RX_TYPE_BIG_TERMINATE:
8843 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8844 #endif /* CONFIG_BT_BROADCASTER */
8845
8846 #if defined(CONFIG_BT_OBSERVER)
8847 case NODE_RX_TYPE_EXT_1M_REPORT:
8848 case NODE_RX_TYPE_EXT_2M_REPORT:
8849 case NODE_RX_TYPE_EXT_CODED_REPORT:
8850 case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
8851
8852 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
8853 case NODE_RX_TYPE_SYNC:
8854 case NODE_RX_TYPE_SYNC_REPORT:
8855 case NODE_RX_TYPE_SYNC_LOST:
8856
8857 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
8858 case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
8859 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
8860
8861 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8862 case NODE_RX_TYPE_SYNC_ISO:
8863 case NODE_RX_TYPE_SYNC_ISO_LOST:
8864 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8865 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8866 #endif /* CONFIG_BT_OBSERVER */
8867
8868 return HCI_CLASS_EVT_REQUIRED;
8869 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8870
8871 #if defined(CONFIG_BT_CONN)
8872 case NODE_RX_TYPE_CONNECTION:
8873
8874 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
8875 case NODE_RX_TYPE_CIS_REQUEST:
8876 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
8877
8878 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8879 case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
8880 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8881
8882 #if defined(CONFIG_BT_CTLR_CONN_ISO)
8883 case NODE_RX_TYPE_CIS_ESTABLISHED:
8884 #endif /* CONFIG_BT_CTLR_CONN_ISO */
8885
8886 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
8887 case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
8888 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
8889
8890 return HCI_CLASS_EVT_REQUIRED;
8891
8892 case NODE_RX_TYPE_TERMINATE:
8893 case NODE_RX_TYPE_CONN_UPDATE:
8894
8895 #if defined(CONFIG_BT_CTLR_LE_ENC)
8896 case NODE_RX_TYPE_ENC_REFRESH:
8897 #endif /* CONFIG_BT_CTLR_LE_ENC */
8898
8899 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
8900 case NODE_RX_TYPE_RSSI:
8901 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
8902
8903 #if defined(CONFIG_BT_CTLR_LE_PING)
8904 case NODE_RX_TYPE_APTO:
8905 #endif /* CONFIG_BT_CTLR_LE_PING */
8906
8907 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8908 case NODE_RX_TYPE_CHAN_SEL_ALGO:
8909 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8910
8911 #if defined(CONFIG_BT_CTLR_PHY)
8912 case NODE_RX_TYPE_PHY_UPDATE:
8913 #endif /* CONFIG_BT_CTLR_PHY */
8914
8915 return HCI_CLASS_EVT_CONNECTION;
8916 #endif /* CONFIG_BT_CONN */
8917
8918 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
8919 case NODE_RX_TYPE_ISO_PDU:
8920 return HCI_CLASS_ISO_DATA;
8921 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
8922
8923 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
8924 case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
8925 return HCI_CLASS_EVT_REQUIRED;
8926 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
8927
8928 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
8929 case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
8930 return hci_user_ext_get_class(node_rx);
8931 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
8932
8933 default:
8934 return HCI_CLASS_NONE;
8935 }
8936
8937 #if defined(CONFIG_BT_CONN)
8938 } else if (pdu_data->ll_id == PDU_DATA_LLID_CTRL) {
8939 return HCI_CLASS_EVT_LLCP;
8940 } else {
8941 return HCI_CLASS_ACL_DATA;
8942 }
8943 #else
8944 } else {
8945 return HCI_CLASS_NONE;
8946 }
8947 #endif
8948 }
8949
8950 void hci_init(struct k_poll_signal *signal_host_buf)
8951 {
8952 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8953 hbuf_signal = signal_host_buf;
8954 #endif
8955 reset(NULL, NULL);
8956 }
8957