1 /*
2  * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <stddef.h>
9 #include <string.h>
10 
11 #include <version.h>
12 #include <errno.h>
13 
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/atomic.h>
17 
18 #include <zephyr/drivers/bluetooth/hci_driver.h>
19 
20 #include <zephyr/bluetooth/hci_types.h>
21 #include <zephyr/bluetooth/hci_vs.h>
22 #include <zephyr/bluetooth/buf.h>
23 
24 #include "../host/hci_ecc.h"
25 
26 #include "util/util.h"
27 #include "util/memq.h"
28 #include "util/mem.h"
29 #include "util/dbuf.h"
30 
31 #include "hal/ecb.h"
32 #include "hal/ccm.h"
33 #include "hal/ticker.h"
34 
35 #include "ticker/ticker.h"
36 
37 #include "ll_sw/pdu_df.h"
38 #include "lll/pdu_vendor.h"
39 #include "ll_sw/pdu.h"
40 
41 #include "ll_sw/lll.h"
42 #include "lll/lll_adv_types.h"
43 #include "ll_sw/lll_adv.h"
44 #include "lll/lll_adv_pdu.h"
45 #include "ll_sw/lll_scan.h"
46 #include "lll/lll_df_types.h"
47 #include "ll_sw/lll_sync.h"
48 #include "ll_sw/lll_sync_iso.h"
49 #include "ll_sw/lll_conn.h"
50 #include "ll_sw/lll_conn_iso.h"
51 #include "ll_sw/lll_iso_tx.h"
52 
53 #include "ll_sw/isoal.h"
54 
55 #include "ll_sw/ull_tx_queue.h"
56 
57 #include "ll_sw/ull_adv_types.h"
58 #include "ll_sw/ull_scan_types.h"
59 #include "ll_sw/ull_sync_types.h"
60 #include "ll_sw/ull_conn_types.h"
61 #include "ll_sw/ull_iso_types.h"
62 #include "ll_sw/ull_conn_iso_types.h"
63 #include "ll_sw/ull_conn_iso_internal.h"
64 #include "ll_sw/ull_df_types.h"
65 #include "ll_sw/ull_internal.h"
66 
67 #include "ll_sw/ull_adv_internal.h"
68 #include "ll_sw/ull_sync_internal.h"
69 #include "ll_sw/ull_conn_internal.h"
70 #include "ll_sw/ull_sync_iso_internal.h"
71 #include "ll_sw/ull_df_internal.h"
72 
73 #include "ll.h"
74 #include "ll_feat.h"
75 #include "ll_settings.h"
76 
77 #include "hci_internal.h"
78 #include "hci_vendor.h"
79 
80 #if defined(CONFIG_BT_HCI_MESH_EXT)
81 #include "ll_sw/ll_mesh.h"
82 #endif /* CONFIG_BT_HCI_MESH_EXT */
83 
84 #if defined(CONFIG_BT_CTLR_DTM_HCI)
85 #include "ll_sw/ll_test.h"
86 #endif /* CONFIG_BT_CTLR_DTM_HCI */
87 
88 #if defined(CONFIG_BT_CTLR_USER_EXT)
89 #include "hci_user_ext.h"
90 #endif /* CONFIG_BT_CTLR_USER_EXT */
91 
92 #include "common/bt_str.h"
93 #include "hal/debug.h"
94 
95 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
96 #include <zephyr/logging/log.h>
97 LOG_MODULE_REGISTER(bt_ctlr_hci);
98 
99 #define STR_NULL_TERMINATOR 0x00
100 
101 /* opcode of the HCI command currently being processed. The opcode is stored
102  * by hci_cmd_handle() and then used during the creation of cmd complete and
103  * cmd status events to avoid passing it up the call chain.
104  */
105 static uint16_t _opcode;
106 
107 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
108 /* NOTE: Duplicate filter uses two LS bits value of standard advertising modes:
109  *       0 - Non-Connectable Non-Scannable advertising report
110  *       1 - Connectable Non-Scannable advertising report
111  *       2 - Non-Connectable Scannable advertisig report
112  *       3 - Connectable Scannable advertising report
113  *
114  *       FIXME: Duplicate filtering of Connectable Directed low and high duty
115  *              cycle. If advertiser changes between Connectable Non-Scannable,
116  *              Connectable Directed low, and high duty cycle without changing
117  *              SID and DID, then such reports will be filtered out by the
118  *              implementation. Needs enhancement to current implementation.
119  *
120  *       Define a custom duplicate filter mode for periodic advertising:
121  *       4 - Periodic Advertising report
122  */
123 
124 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
125 #define DUP_EXT_ADV_MODE_MAX      5
126 #define DUP_EXT_ADV_MODE_PERIODIC BIT(2)
127 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
128 #define DUP_EXT_ADV_MODE_MAX      4
129 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
130 
131 #define DUP_EXT_ADV_MODE_COUNT    4
132 
133 /* Duplicate filter entries, one per Bluetooth address */
134 static struct dup_entry {
135 	bt_addr_le_t addr;
136 
137 	/* Mask to accumulate advertising PDU type as bitmask */
138 	uint8_t      mask;
139 
140 #if defined(CONFIG_BT_CTLR_ADV_EXT)
141 	struct dup_ext_adv_mode {
142 		uint16_t set_count:5;
143 		uint16_t set_curr:5;
144 		struct dup_ext_adv_set {
145 			uint8_t data_cmplt:1;
146 			struct pdu_adv_adi adi;
147 		} set[CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX];
148 	} adv_mode[DUP_EXT_ADV_MODE_MAX];
149 #endif
150 } dup_filter[CONFIG_BT_CTLR_DUP_FILTER_LEN];
151 
152 /* Duplicate filtering is disabled if count value is set to negative integer */
153 #define DUP_FILTER_DISABLED (-1)
154 
155 /* Duplicate filtering array entry count, filtering disabled if negative */
156 static int32_t dup_count;
157 /* Duplicate filtering current free entry, overwrites entries after rollover */
158 static uint32_t dup_curr;
159 
160 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
161 /* Helper function to reset non-periodic advertising entries in filter table */
162 static void dup_ext_adv_reset(void);
163 /* Flag for advertising reports be filtered for duplicates. */
164 static bool dup_scan;
165 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
166 /* Set constant true so that (dup_count >= 0) decides if advertising duplicate
167  * filter is enabled when Periodic Advertising ADI support is disabled.
168  */
169 static const bool dup_scan = true;
170 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
171 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
172 
173 #if defined(CONFIG_BT_HCI_MESH_EXT)
174 struct scan_filter {
175 	uint8_t count;
176 	uint8_t lengths[CONFIG_BT_CTLR_MESH_SF_PATTERNS];
177 	uint8_t patterns[CONFIG_BT_CTLR_MESH_SF_PATTERNS]
178 		     [BT_HCI_MESH_PATTERN_LEN_MAX];
179 };
180 
181 static struct scan_filter scan_filters[CONFIG_BT_CTLR_MESH_SCAN_FILTERS];
182 static uint8_t sf_curr;
183 #endif
184 
185 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
186 int32_t    hci_hbuf_total;
187 uint32_t    hci_hbuf_sent;
188 uint32_t    hci_hbuf_acked;
189 uint16_t    hci_hbuf_pend[CONFIG_BT_MAX_CONN];
190 atomic_t hci_state_mask;
191 static struct k_poll_signal *hbuf_signal;
192 #endif
193 
194 #if defined(CONFIG_BT_CONN)
195 static uint32_t conn_count;
196 #endif
197 
198 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
199 static uint32_t cis_pending_count;
200 #endif
201 
202 /* In HCI event PHY indices start at 1 compare to 0 indexed in aux_ptr field in
203  * the Common Extended Payload Format in the PDUs.
204  */
205 #define HCI_AUX_PHY_TO_HCI_PHY(aux_phy) ((aux_phy) + 1)
206 
207 #define DEFAULT_EVENT_MASK           0x1fffffffffff
208 #define DEFAULT_EVENT_MASK_PAGE_2    0x0
209 #define DEFAULT_LE_EVENT_MASK 0x1f
210 
211 static uint64_t event_mask = DEFAULT_EVENT_MASK;
212 static uint64_t event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
213 static uint64_t le_event_mask = DEFAULT_LE_EVENT_MASK;
214 #if defined(CONFIG_BT_HCI_VS_EVT)
215 static uint64_t vs_events_mask = DEFAULT_VS_EVT_MASK;
216 #endif /* CONFIG_BT_HCI_VS_EVT */
217 
218 static struct net_buf *cmd_complete_status(uint8_t status);
219 
220 #if defined(CONFIG_BT_CTLR_ADV_EXT)
221 #define BUF_GET_TIMEOUT K_SECONDS(10)
222 
223 #if defined(CONFIG_BT_HCI_RAW)
224 static uint8_t ll_adv_cmds;
225 
ll_adv_cmds_set(uint8_t adv_cmds)226 __weak int ll_adv_cmds_set(uint8_t adv_cmds)
227 {
228 	if (!ll_adv_cmds) {
229 		ll_adv_cmds = adv_cmds;
230 	}
231 
232 	if (ll_adv_cmds != adv_cmds) {
233 		return -EINVAL;
234 	}
235 
236 	return 0;
237 }
238 
ll_adv_cmds_is_ext(void)239 __weak int ll_adv_cmds_is_ext(void)
240 {
241 	return ll_adv_cmds == LL_ADV_CMDS_EXT;
242 }
243 
244 #else /* !CONFIG_BT_HCI_RAW */
ll_adv_cmds_is_ext(void)245 __weak int ll_adv_cmds_is_ext(void)
246 {
247 	return 1;
248 }
249 #endif /* !CONFIG_BT_HCI_RAW */
250 
adv_cmds_legacy_check(struct net_buf ** cc_evt)251 static int adv_cmds_legacy_check(struct net_buf **cc_evt)
252 {
253 	int err;
254 
255 #if defined(CONFIG_BT_HCI_RAW)
256 	err = ll_adv_cmds_set(LL_ADV_CMDS_LEGACY);
257 	if (err && cc_evt) {
258 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
259 	}
260 #else
261 	if (cc_evt) {
262 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
263 	}
264 
265 	err = -EINVAL;
266 #endif /* CONFIG_BT_HCI_RAW */
267 
268 	return err;
269 }
270 
adv_cmds_ext_check(struct net_buf ** cc_evt)271 static int adv_cmds_ext_check(struct net_buf **cc_evt)
272 {
273 	int err;
274 
275 #if defined(CONFIG_BT_HCI_RAW)
276 	err = ll_adv_cmds_set(LL_ADV_CMDS_EXT);
277 	if (err && cc_evt) {
278 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
279 	}
280 #else
281 	err = 0;
282 #endif /* CONFIG_BT_HCI_RAW */
283 
284 	return err;
285 }
286 #else
adv_cmds_legacy_check(struct net_buf ** cc_evt)287 static inline int adv_cmds_legacy_check(struct net_buf **cc_evt)
288 {
289 	return 0;
290 }
291 #endif /* CONFIG_BT_CTLR_ADV_EXT */
292 
293 #if defined(CONFIG_BT_CONN)
294 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
295 			     struct net_buf *buf);
296 #endif /* CONFIG_BT_CONN */
297 
hci_evt_create(struct net_buf * buf,uint8_t evt,uint8_t len)298 static void hci_evt_create(struct net_buf *buf, uint8_t evt, uint8_t len)
299 {
300 	struct bt_hci_evt_hdr *hdr;
301 
302 	hdr = net_buf_add(buf, sizeof(*hdr));
303 	hdr->evt = evt;
304 	hdr->len = len;
305 }
306 
hci_cmd_complete(struct net_buf ** buf,uint8_t plen)307 void *hci_cmd_complete(struct net_buf **buf, uint8_t plen)
308 {
309 	*buf = bt_hci_cmd_complete_create(_opcode, plen);
310 
311 	return net_buf_add(*buf, plen);
312 }
313 
cmd_status(uint8_t status)314 static struct net_buf *cmd_status(uint8_t status)
315 {
316 	return bt_hci_cmd_status_create(_opcode, status);
317 }
318 
cmd_complete_status(uint8_t status)319 static struct net_buf *cmd_complete_status(uint8_t status)
320 {
321 	struct net_buf *buf;
322 	struct bt_hci_evt_cc_status *ccst;
323 
324 	buf = bt_hci_cmd_complete_create(_opcode, sizeof(*ccst));
325 	ccst = net_buf_add(buf, sizeof(*ccst));
326 	ccst->status = status;
327 
328 	return buf;
329 }
330 
meta_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)331 static void *meta_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
332 {
333 	struct bt_hci_evt_le_meta_event *me;
334 
335 	hci_evt_create(buf, BT_HCI_EVT_LE_META_EVENT, sizeof(*me) + melen);
336 	me = net_buf_add(buf, sizeof(*me));
337 	me->subevent = subevt;
338 
339 	return net_buf_add(buf, melen);
340 }
341 
342 #if defined(CONFIG_BT_HCI_VS_EVT)
vs_event(struct net_buf * buf,uint8_t subevt,uint8_t evt_len)343 static void *vs_event(struct net_buf *buf, uint8_t subevt, uint8_t evt_len)
344 {
345 	struct bt_hci_evt_vs *evt;
346 
347 	hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*evt) + evt_len);
348 	evt = net_buf_add(buf, sizeof(*evt));
349 	evt->subevent = subevt;
350 
351 	return net_buf_add(buf, evt_len);
352 }
353 #endif /* CONFIG_BT_HCI_VS_EVT */
354 
355 #if defined(CONFIG_BT_HCI_MESH_EXT)
mesh_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)356 static void *mesh_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
357 {
358 	struct bt_hci_evt_mesh *me;
359 
360 	hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*me) + melen);
361 	me = net_buf_add(buf, sizeof(*me));
362 	me->prefix = BT_HCI_MESH_EVT_PREFIX;
363 	me->subevent = subevt;
364 
365 	return net_buf_add(buf, melen);
366 }
367 #endif /* CONFIG_BT_HCI_MESH_EXT */
368 
369 #if defined(CONFIG_BT_CONN)
disconnect(struct net_buf * buf,struct net_buf ** evt)370 static void disconnect(struct net_buf *buf, struct net_buf **evt)
371 {
372 	struct bt_hci_cp_disconnect *cmd = (void *)buf->data;
373 	uint16_t handle;
374 	uint8_t status;
375 
376 	handle = sys_le16_to_cpu(cmd->handle);
377 	status = ll_terminate_ind_send(handle, cmd->reason);
378 
379 	*evt = cmd_status(status);
380 }
381 
read_remote_ver_info(struct net_buf * buf,struct net_buf ** evt)382 static void read_remote_ver_info(struct net_buf *buf, struct net_buf **evt)
383 {
384 	struct bt_hci_cp_read_remote_version_info *cmd = (void *)buf->data;
385 	uint16_t handle;
386 	uint8_t status;
387 
388 	handle = sys_le16_to_cpu(cmd->handle);
389 	status = ll_version_ind_send(handle);
390 
391 	*evt = cmd_status(status);
392 }
393 #endif /* CONFIG_BT_CONN */
394 
link_control_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)395 static int link_control_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
396 				   struct net_buf **evt)
397 {
398 	switch (ocf) {
399 #if defined(CONFIG_BT_CONN)
400 	case BT_OCF(BT_HCI_OP_DISCONNECT):
401 		disconnect(cmd, evt);
402 		break;
403 	case BT_OCF(BT_HCI_OP_READ_REMOTE_VERSION_INFO):
404 		read_remote_ver_info(cmd, evt);
405 		break;
406 #endif /* CONFIG_BT_CONN */
407 	default:
408 		return -EINVAL;
409 	}
410 
411 	return 0;
412 }
413 
set_event_mask(struct net_buf * buf,struct net_buf ** evt)414 static void set_event_mask(struct net_buf *buf, struct net_buf **evt)
415 {
416 	struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
417 
418 	event_mask = sys_get_le64(cmd->events);
419 
420 	*evt = cmd_complete_status(0x00);
421 }
422 
set_event_mask_page_2(struct net_buf * buf,struct net_buf ** evt)423 static void set_event_mask_page_2(struct net_buf *buf, struct net_buf **evt)
424 {
425 	struct bt_hci_cp_set_event_mask_page_2 *cmd = (void *)buf->data;
426 
427 	event_mask_page_2 = sys_get_le64(cmd->events_page_2);
428 
429 	*evt = cmd_complete_status(0x00);
430 }
431 
reset(struct net_buf * buf,struct net_buf ** evt)432 static void reset(struct net_buf *buf, struct net_buf **evt)
433 {
434 #if defined(CONFIG_BT_HCI_MESH_EXT)
435 	int i;
436 
437 	for (i = 0; i < ARRAY_SIZE(scan_filters); i++) {
438 		scan_filters[i].count = 0U;
439 	}
440 	sf_curr = 0xFF;
441 #endif
442 
443 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
444 	dup_count = DUP_FILTER_DISABLED;
445 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
446 	dup_scan = false;
447 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
448 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
449 
450 	/* reset event masks */
451 	event_mask = DEFAULT_EVENT_MASK;
452 	event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
453 	le_event_mask = DEFAULT_LE_EVENT_MASK;
454 
455 	if (buf) {
456 		ll_reset();
457 		*evt = cmd_complete_status(0x00);
458 	}
459 
460 #if defined(CONFIG_BT_CONN)
461 	conn_count = 0U;
462 #endif
463 
464 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
465 	cis_pending_count = 0U;
466 #endif
467 
468 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
469 	hci_hbuf_total = 0;
470 	hci_hbuf_sent = 0U;
471 	hci_hbuf_acked = 0U;
472 	(void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
473 	if (buf) {
474 		atomic_set_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
475 		k_poll_signal_raise(hbuf_signal, 0x0);
476 	}
477 #endif
478 
479 	hci_recv_fifo_reset();
480 }
481 
482 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_ctl_to_host_flow(struct net_buf * buf,struct net_buf ** evt)483 static void set_ctl_to_host_flow(struct net_buf *buf, struct net_buf **evt)
484 {
485 	struct bt_hci_cp_set_ctl_to_host_flow *cmd = (void *)buf->data;
486 	uint8_t flow_enable = cmd->flow_enable;
487 	struct bt_hci_evt_cc_status *ccst;
488 
489 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
490 
491 	/* require host buffer size before enabling flow control, and
492 	 * disallow if any connections are up
493 	 */
494 	if (!hci_hbuf_total || conn_count) {
495 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
496 		return;
497 	} else {
498 		ccst->status = 0x00;
499 	}
500 
501 	switch (flow_enable) {
502 	case BT_HCI_CTL_TO_HOST_FLOW_DISABLE:
503 		if (hci_hbuf_total < 0) {
504 			/* already disabled */
505 			return;
506 		}
507 		break;
508 	case BT_HCI_CTL_TO_HOST_FLOW_ENABLE:
509 		if (hci_hbuf_total > 0) {
510 			/* already enabled */
511 			return;
512 		}
513 		break;
514 	default:
515 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
516 		return;
517 	}
518 
519 	hci_hbuf_sent = 0U;
520 	hci_hbuf_acked = 0U;
521 	(void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
522 	hci_hbuf_total = -hci_hbuf_total;
523 }
524 
host_buffer_size(struct net_buf * buf,struct net_buf ** evt)525 static void host_buffer_size(struct net_buf *buf, struct net_buf **evt)
526 {
527 	struct bt_hci_cp_host_buffer_size *cmd = (void *)buf->data;
528 	uint16_t acl_pkts = sys_le16_to_cpu(cmd->acl_pkts);
529 	uint16_t acl_mtu = sys_le16_to_cpu(cmd->acl_mtu);
530 	struct bt_hci_evt_cc_status *ccst;
531 
532 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
533 
534 	if (hci_hbuf_total) {
535 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
536 		return;
537 	}
538 	/* fragmentation from controller to host not supported, require
539 	 * ACL MTU to be at least the LL MTU
540 	 */
541 	if (acl_mtu < LL_LENGTH_OCTETS_RX_MAX) {
542 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
543 		return;
544 	}
545 
546 	LOG_DBG("FC: host buf size: %d", acl_pkts);
547 	hci_hbuf_total = -acl_pkts;
548 }
549 
host_num_completed_packets(struct net_buf * buf,struct net_buf ** evt)550 static void host_num_completed_packets(struct net_buf *buf,
551 				       struct net_buf **evt)
552 {
553 	struct bt_hci_cp_host_num_completed_packets *cmd = (void *)buf->data;
554 	struct bt_hci_evt_cc_status *ccst;
555 	uint32_t count = 0U;
556 
557 	/* special case, no event returned except for error conditions */
558 	if (hci_hbuf_total <= 0) {
559 		ccst = hci_cmd_complete(evt, sizeof(*ccst));
560 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
561 		return;
562 	} else if (!conn_count) {
563 		ccst = hci_cmd_complete(evt, sizeof(*ccst));
564 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
565 		return;
566 	}
567 
568 	/* leave *evt == NULL so no event is generated */
569 	for (uint8_t i = 0; i < cmd->num_handles; i++) {
570 		uint16_t h = sys_le16_to_cpu(cmd->h[i].handle);
571 		uint16_t c = sys_le16_to_cpu(cmd->h[i].count);
572 
573 		if ((h >= ARRAY_SIZE(hci_hbuf_pend)) ||
574 		    (c > hci_hbuf_pend[h])) {
575 			ccst = hci_cmd_complete(evt, sizeof(*ccst));
576 			ccst->status = BT_HCI_ERR_INVALID_PARAM;
577 			return;
578 		}
579 
580 		hci_hbuf_pend[h] -= c;
581 		count += c;
582 	}
583 
584 	LOG_DBG("FC: acked: %d", count);
585 	hci_hbuf_acked += count;
586 	k_poll_signal_raise(hbuf_signal, 0x0);
587 }
588 #endif
589 
590 #if defined(CONFIG_BT_CTLR_LE_PING)
read_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)591 static void read_auth_payload_timeout(struct net_buf *buf, struct net_buf **evt)
592 {
593 	struct bt_hci_cp_read_auth_payload_timeout *cmd = (void *)buf->data;
594 	struct bt_hci_rp_read_auth_payload_timeout *rp;
595 	uint16_t auth_payload_timeout;
596 	uint16_t handle;
597 	uint8_t status;
598 
599 	handle = sys_le16_to_cpu(cmd->handle);
600 
601 	status = ll_apto_get(handle, &auth_payload_timeout);
602 
603 	rp = hci_cmd_complete(evt, sizeof(*rp));
604 	rp->status = status;
605 	rp->handle = sys_cpu_to_le16(handle);
606 	rp->auth_payload_timeout = sys_cpu_to_le16(auth_payload_timeout);
607 }
608 
write_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)609 static void write_auth_payload_timeout(struct net_buf *buf,
610 				       struct net_buf **evt)
611 {
612 	struct bt_hci_cp_write_auth_payload_timeout *cmd = (void *)buf->data;
613 	struct bt_hci_rp_write_auth_payload_timeout *rp;
614 	uint16_t auth_payload_timeout;
615 	uint16_t handle;
616 	uint8_t status;
617 
618 	handle = sys_le16_to_cpu(cmd->handle);
619 	auth_payload_timeout = sys_le16_to_cpu(cmd->auth_payload_timeout);
620 
621 	status = ll_apto_set(handle, auth_payload_timeout);
622 
623 	rp = hci_cmd_complete(evt, sizeof(*rp));
624 	rp->status = status;
625 	rp->handle = sys_cpu_to_le16(handle);
626 }
627 #endif /* CONFIG_BT_CTLR_LE_PING */
628 
629 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
configure_data_path(struct net_buf * buf,struct net_buf ** evt)630 static void configure_data_path(struct net_buf *buf,
631 				struct net_buf **evt)
632 {
633 	struct bt_hci_cp_configure_data_path *cmd = (void *)buf->data;
634 	struct bt_hci_rp_configure_data_path *rp;
635 
636 	uint8_t *vs_config;
637 	uint8_t status;
638 
639 	vs_config = &cmd->vs_config[0];
640 
641 	if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)) {
642 		status = ll_configure_data_path(cmd->data_path_dir,
643 						cmd->data_path_id,
644 						cmd->vs_config_len,
645 						vs_config);
646 	} else {
647 		status = BT_HCI_ERR_INVALID_PARAM;
648 	}
649 
650 	rp = hci_cmd_complete(evt, sizeof(*rp));
651 	rp->status = status;
652 }
653 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
654 
655 #if defined(CONFIG_BT_CTLR_CONN_ISO)
read_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)656 static void read_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
657 {
658 	struct bt_hci_rp_read_conn_accept_timeout *rp;
659 	uint16_t timeout;
660 
661 	ARG_UNUSED(buf);
662 
663 	rp = hci_cmd_complete(evt, sizeof(*rp));
664 
665 	rp->status = ll_conn_iso_accept_timeout_get(&timeout);
666 	rp->conn_accept_timeout = sys_cpu_to_le16(timeout);
667 }
668 
write_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)669 static void write_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
670 {
671 	struct bt_hci_cp_write_conn_accept_timeout *cmd = (void *)buf->data;
672 	struct bt_hci_rp_write_conn_accept_timeout *rp;
673 	uint16_t timeout;
674 
675 	timeout = sys_le16_to_cpu(cmd->conn_accept_timeout);
676 
677 	rp = hci_cmd_complete(evt, sizeof(*rp));
678 
679 	rp->status = ll_conn_iso_accept_timeout_set(timeout);
680 }
681 #endif /* CONFIG_BT_CTLR_CONN_ISO */
682 
683 #if defined(CONFIG_BT_CONN)
read_tx_power_level(struct net_buf * buf,struct net_buf ** evt)684 static void read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
685 {
686 	struct bt_hci_cp_read_tx_power_level *cmd = (void *)buf->data;
687 	struct bt_hci_rp_read_tx_power_level *rp;
688 	uint16_t handle;
689 	uint8_t status;
690 	uint8_t type;
691 
692 	handle = sys_le16_to_cpu(cmd->handle);
693 	type = cmd->type;
694 
695 	rp = hci_cmd_complete(evt, sizeof(*rp));
696 
697 	status = ll_tx_pwr_lvl_get(BT_HCI_VS_LL_HANDLE_TYPE_CONN,
698 				   handle, type, &rp->tx_power_level);
699 
700 	rp->status = status;
701 	rp->handle = sys_cpu_to_le16(handle);
702 }
703 #endif /* CONFIG_BT_CONN */
704 
ctrl_bb_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)705 static int ctrl_bb_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
706 			      struct net_buf **evt)
707 {
708 	switch (ocf) {
709 	case BT_OCF(BT_HCI_OP_SET_EVENT_MASK):
710 		set_event_mask(cmd, evt);
711 		break;
712 
713 	case BT_OCF(BT_HCI_OP_RESET):
714 		reset(cmd, evt);
715 		break;
716 
717 	case BT_OCF(BT_HCI_OP_SET_EVENT_MASK_PAGE_2):
718 		set_event_mask_page_2(cmd, evt);
719 		break;
720 
721 #if defined(CONFIG_BT_CTLR_CONN_ISO)
722 	case BT_OCF(BT_HCI_OP_READ_CONN_ACCEPT_TIMEOUT):
723 		read_conn_accept_timeout(cmd, evt);
724 		break;
725 
726 	case BT_OCF(BT_HCI_OP_WRITE_CONN_ACCEPT_TIMEOUT):
727 		write_conn_accept_timeout(cmd, evt);
728 		break;
729 #endif /* CONFIG_BT_CTLR_CONN_ISO */
730 
731 #if defined(CONFIG_BT_CONN)
732 	case BT_OCF(BT_HCI_OP_READ_TX_POWER_LEVEL):
733 		read_tx_power_level(cmd, evt);
734 		break;
735 #endif /* CONFIG_BT_CONN */
736 
737 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
738 	case BT_OCF(BT_HCI_OP_SET_CTL_TO_HOST_FLOW):
739 		set_ctl_to_host_flow(cmd, evt);
740 		break;
741 
742 	case BT_OCF(BT_HCI_OP_HOST_BUFFER_SIZE):
743 		host_buffer_size(cmd, evt);
744 		break;
745 
746 	case BT_OCF(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS):
747 		host_num_completed_packets(cmd, evt);
748 		break;
749 #endif
750 
751 #if defined(CONFIG_BT_CTLR_LE_PING)
752 	case BT_OCF(BT_HCI_OP_READ_AUTH_PAYLOAD_TIMEOUT):
753 		read_auth_payload_timeout(cmd, evt);
754 		break;
755 
756 	case BT_OCF(BT_HCI_OP_WRITE_AUTH_PAYLOAD_TIMEOUT):
757 		write_auth_payload_timeout(cmd, evt);
758 		break;
759 #endif /* CONFIG_BT_CTLR_LE_PING */
760 
761 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
762 	case BT_OCF(BT_HCI_OP_CONFIGURE_DATA_PATH):
763 		configure_data_path(cmd, evt);
764 		break;
765 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
766 
767 	default:
768 		return -EINVAL;
769 	}
770 
771 	return 0;
772 }
773 
read_local_version_info(struct net_buf * buf,struct net_buf ** evt)774 static void read_local_version_info(struct net_buf *buf, struct net_buf **evt)
775 {
776 	struct bt_hci_rp_read_local_version_info *rp;
777 
778 	rp = hci_cmd_complete(evt, sizeof(*rp));
779 
780 	rp->status = 0x00;
781 	rp->hci_version = LL_VERSION_NUMBER;
782 	rp->hci_revision = sys_cpu_to_le16(0);
783 	rp->lmp_version = LL_VERSION_NUMBER;
784 	rp->manufacturer = sys_cpu_to_le16(ll_settings_company_id());
785 	rp->lmp_subversion = sys_cpu_to_le16(ll_settings_subversion_number());
786 }
787 
read_supported_commands(struct net_buf * buf,struct net_buf ** evt)788 static void read_supported_commands(struct net_buf *buf, struct net_buf **evt)
789 {
790 	struct bt_hci_rp_read_supported_commands *rp;
791 
792 	rp = hci_cmd_complete(evt, sizeof(*rp));
793 
794 	rp->status = 0x00;
795 	(void)memset(&rp->commands[0], 0, sizeof(rp->commands));
796 
797 #if defined(CONFIG_BT_REMOTE_VERSION)
798 	/* Read Remote Version Info. */
799 	rp->commands[2] |= BIT(7);
800 #endif
801 	/* Set Event Mask, and Reset. */
802 	rp->commands[5] |= BIT(6) | BIT(7);
803 
804 #if defined(CONFIG_BT_CTLR_CONN_ISO)
805 	/* Read/Write Connection Accept Timeout */
806 	rp->commands[7] |= BIT(2) | BIT(3);
807 #endif /* CONFIG_BT_CTLR_CONN_ISO */
808 
809 	/* Read TX Power Level. */
810 	rp->commands[10] |= BIT(2);
811 
812 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
813 	/* Set FC, Host Buffer Size and Host Num Completed */
814 	rp->commands[10] |= BIT(5) | BIT(6) | BIT(7);
815 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
816 
817 	/* Read Local Version Info, Read Local Supported Features. */
818 	rp->commands[14] |= BIT(3) | BIT(5);
819 	/* Read BD ADDR. */
820 	rp->commands[15] |= BIT(1);
821 
822 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
823 	/* Read RSSI. */
824 	rp->commands[15] |= BIT(5);
825 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
826 
827 	/* Set Event Mask Page 2 */
828 	rp->commands[22] |= BIT(2);
829 	/* LE Set Event Mask, LE Read Buffer Size, LE Read Local Supp Feats,
830 	 * Set Random Addr
831 	 */
832 	rp->commands[25] |= BIT(0) | BIT(1) | BIT(2) | BIT(4);
833 
834 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
835 	/* LE Read FAL Size, LE Clear FAL */
836 	rp->commands[26] |= BIT(6) | BIT(7);
837 	/* LE Add Dev to FAL, LE Remove Dev from FAL */
838 	rp->commands[27] |= BIT(0) | BIT(1);
839 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
840 
841 	/* LE Encrypt, LE Rand */
842 	rp->commands[27] |= BIT(6) | BIT(7);
843 	/* LE Read Supported States */
844 	rp->commands[28] |= BIT(3);
845 
846 #if defined(CONFIG_BT_BROADCASTER)
847 	/* LE Set Adv Params, LE Read Adv Channel TX Power, LE Set Adv Data */
848 	rp->commands[25] |= BIT(5) | BIT(6) | BIT(7);
849 	/* LE Set Scan Response Data, LE Set Adv Enable */
850 	rp->commands[26] |= BIT(0) | BIT(1);
851 
852 #if defined(CONFIG_BT_CTLR_ADV_EXT)
853 	/* LE Set Adv Set Random Addr, LE Set Ext Adv Params, LE Set Ext Adv
854 	 * Data, LE Set Ext Adv Scan Rsp Data, LE Set Ext Adv Enable, LE Read
855 	 * Max Adv Data Len, LE Read Num Supp Adv Sets
856 	 */
857 	rp->commands[36] |= BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
858 			    BIT(6) | BIT(7);
859 	/* LE Remove Adv Set, LE Clear Adv Sets */
860 	rp->commands[37] |= BIT(0) | BIT(1);
861 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
862 	/* LE Set PA Params, LE Set PA Data, LE Set PA Enable */
863 	rp->commands[37] |= BIT(2) | BIT(3) | BIT(4);
864 #if defined(CONFIG_BT_CTLR_ADV_ISO)
865 	/* LE Create BIG, LE Create BIG Test, LE Terminate BIG */
866 	rp->commands[42] |= BIT(5) | BIT(6) | BIT(7);
867 #endif /* CONFIG_BT_CTLR_ADV_ISO */
868 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
869 #endif /* CONFIG_BT_CTLR_ADV_EXT */
870 #endif /* CONFIG_BT_BROADCASTER */
871 
872 #if defined(CONFIG_BT_OBSERVER)
873 	/* LE Set Scan Params, LE Set Scan Enable */
874 	rp->commands[26] |= BIT(2) | BIT(3);
875 
876 #if defined(CONFIG_BT_CTLR_ADV_EXT)
877 	/* LE Set Extended Scan Params, LE Set Extended Scan Enable */
878 	rp->commands[37] |= BIT(5) | BIT(6);
879 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
880 	/* LE PA Create Sync, LE PA Create Sync Cancel, LE PA Terminate Sync */
881 	rp->commands[38] |= BIT(0) | BIT(1) | BIT(2);
882 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
883 	/* LE PA Add Device to Periodic Advertiser List,
884 	 * LE PA Remove Device from Periodic Advertiser List,
885 	 * LE Clear Periodic Advertiser List,
886 	 * LE Read Periodic Adveritiser List Size
887 	 */
888 	rp->commands[38] |= BIT(3) | BIT(4) | BIT(5) | BIT(6);
889 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
890 	/* LE Set PA Receive Enable */
891 	rp->commands[40] |= BIT(5);
892 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
893 	/* LE BIG Create Sync, LE BIG Terminate Sync */
894 	rp->commands[43] |= BIT(0) | BIT(1);
895 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
896 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
897 #endif /* CONFIG_BT_CTLR_ADV_EXT */
898 
899 #endif /* CONFIG_BT_OBSERVER */
900 
901 #if defined(CONFIG_BT_CONN)
902 #if defined(CONFIG_BT_CENTRAL)
903 	/* LE Create Connection, LE Create Connection Cancel */
904 	rp->commands[26] |= BIT(4) | BIT(5);
905 	/* Set Host Channel Classification */
906 	rp->commands[27] |= BIT(3);
907 
908 #if defined(CONFIG_BT_CTLR_ADV_EXT)
909 	/* LE Extended Create Connection */
910 	rp->commands[37] |= BIT(7);
911 #endif /* CONFIG_BT_CTLR_ADV_EXT */
912 
913 #if defined(CONFIG_BT_CTLR_LE_ENC)
914 	/* LE Start Encryption */
915 	rp->commands[28] |= BIT(0);
916 #endif /* CONFIG_BT_CTLR_LE_ENC */
917 
918 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
919 	/* LE Set CIG Parameters */
920 	rp->commands[41] |= BIT(7);
921 	/* LE Set CIG Parameters Test, LE Create CIS, LE Remove CIS */
922 	rp->commands[42] |= BIT(0) | BIT(1) | BIT(2);
923 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
924 #endif /* CONFIG_BT_CENTRAL */
925 
926 #if defined(CONFIG_BT_PERIPHERAL)
927 #if defined(CONFIG_BT_CTLR_LE_ENC)
928 	/* LE LTK Request Reply, LE LTK Request Negative Reply */
929 	rp->commands[28] |= BIT(1) | BIT(2);
930 #endif /* CONFIG_BT_CTLR_LE_ENC */
931 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
932 	/* LE Accept CIS Request, LE Reject CIS Request */
933 	rp->commands[42] |= BIT(3) | BIT(4);
934 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
935 #endif /* CONFIG_BT_PERIPHERAL */
936 
937 	/* Disconnect. */
938 	rp->commands[0] |= BIT(5);
939 	/* LE Connection Update, LE Read Channel Map, LE Read Remote Features */
940 	rp->commands[27] |= BIT(2) | BIT(4) | BIT(5);
941 
942 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
943 	/* LE Remote Conn Param Req and Neg Reply */
944 	rp->commands[33] |= BIT(4) | BIT(5);
945 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
946 
947 #if defined(CONFIG_BT_CTLR_LE_PING)
948 	/* Read and Write authenticated payload timeout */
949 	rp->commands[32] |= BIT(4) | BIT(5);
950 #endif /* CONFIG_BT_CTLR_LE_PING */
951 
952 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
953 	/* LE Set Data Length, and LE Read Suggested Data Length. */
954 	rp->commands[33] |= BIT(6) | BIT(7);
955 	/* LE Write Suggested Data Length. */
956 	rp->commands[34] |= BIT(0);
957 	/* LE Read Maximum Data Length. */
958 	rp->commands[35] |= BIT(3);
959 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
960 
961 #if defined(CONFIG_BT_CTLR_PHY)
962 	/* LE Read PHY Command. */
963 	rp->commands[35] |= BIT(4);
964 	/* LE Set Default PHY Command. */
965 	rp->commands[35] |= BIT(5);
966 	/* LE Set PHY Command. */
967 	rp->commands[35] |= BIT(6);
968 #endif /* CONFIG_BT_CTLR_PHY */
969 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
970 	/* LE Request Peer SCA */
971 	rp->commands[43] |= BIT(2);
972 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
973 #endif /* CONFIG_BT_CONN */
974 
975 #if defined(CONFIG_BT_CTLR_DTM_HCI)
976 	/* LE RX Test, LE TX Test, LE Test End */
977 	rp->commands[28] |= BIT(4) | BIT(5) | BIT(6);
978 	/* LE Enhanced RX Test. */
979 	rp->commands[35] |= BIT(7);
980 	/* LE Enhanced TX Test. */
981 	rp->commands[36] |= BIT(0);
982 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
983 	rp->commands[39] |= BIT(3);
984 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
985 
986 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
987 	rp->commands[39] |= BIT(4);
988 #endif
989 
990 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
991 	rp->commands[45] |= BIT(0);
992 #endif
993 #endif /* CONFIG_BT_CTLR_DTM_HCI */
994 
995 #if defined(CONFIG_BT_CTLR_PRIVACY)
996 	/* LE resolving list commands, LE Read Peer RPA */
997 	rp->commands[34] |= BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7);
998 	/* LE Read Local RPA, LE Set AR Enable, Set RPA Timeout */
999 	rp->commands[35] |= BIT(0) | BIT(1) | BIT(2);
1000 	/* LE Set Privacy Mode */
1001 	rp->commands[39] |= BIT(2);
1002 #endif /* CONFIG_BT_CTLR_PRIVACY */
1003 
1004 #if defined(CONFIG_BT_CTLR_DF)
1005 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1006 	/* LE Set Connectionless CTE Transmit Parameters,
1007 	 * LE Set Connectionless CTE Transmit Enable
1008 	 */
1009 	rp->commands[39] |= BIT(5) | BIT(6);
1010 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1011 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1012 	/* LE Set Connectionless IQ Sampling Enable */
1013 	rp->commands[39] |= BIT(7);
1014 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1015 	/* LE Read Antenna Information */
1016 	rp->commands[40] |= BIT(4);
1017 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1018 	/* LE Set Connection CTE Transmit Parameters */
1019 	rp->commands[40] |= BIT(1);
1020 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1021 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1022 	/* LE Set Connection CTE Receive Parameters */
1023 	rp->commands[40] |= BIT(0);
1024 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1025 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1026 	/* LE Connection CTE Request Enable */
1027 	rp->commands[40] |= BIT(2);
1028 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1029 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1030 	/* LE Connection CTE Response Enable */
1031 	rp->commands[40] |= BIT(3);
1032 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1033 
1034 #endif /* CONFIG_BT_CTLR_DF */
1035 
1036 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_TINYCRYPT_ECC)
1037 	bt_hci_ecc_supported_commands(rp->commands);
1038 #endif /* CONFIG_BT_HCI_RAW && CONFIG_BT_TINYCRYPT_ECC */
1039 
1040 	/* LE Read TX Power. */
1041 	rp->commands[38] |= BIT(7);
1042 
1043 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1044 	/* LE Read Buffer Size v2, LE Read ISO TX Sync */
1045 	rp->commands[41] |= BIT(5) | BIT(6);
1046 	/* LE ISO Transmit Test */
1047 	rp->commands[43] |= BIT(5);
1048 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1049 
1050 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1051 	/* LE ISO Receive Test, LE ISO Read Test Counters */
1052 	rp->commands[43] |= BIT(6) | BIT(7);
1053 
1054 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
1055 	/* LE Read ISO Link Quality */
1056 	rp->commands[44] |= BIT(2);
1057 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
1058 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
1059 
1060 #if defined(CONFIG_BT_CTLR_ISO)
1061 	/* LE Setup ISO Data Path, LE Remove ISO Data Path */
1062 	rp->commands[43] |= BIT(3) | BIT(4);
1063 	/* LE ISO Test End */
1064 	rp->commands[44] |= BIT(0);
1065 #endif /* CONFIG_BT_CTLR_ISO */
1066 
1067 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
1068 	/* LE Set Host Feature */
1069 	rp->commands[44] |= BIT(1);
1070 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
1071 
1072 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1073 	/* Read Supported Codecs [v2], Codec Capabilities, Controller Delay, Configure Data Path */
1074 	rp->commands[45] |= BIT(2) | BIT(3) | BIT(4) | BIT(5);
1075 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1076 }
1077 
read_local_features(struct net_buf * buf,struct net_buf ** evt)1078 static void read_local_features(struct net_buf *buf, struct net_buf **evt)
1079 {
1080 	struct bt_hci_rp_read_local_features *rp;
1081 
1082 	rp = hci_cmd_complete(evt, sizeof(*rp));
1083 
1084 	rp->status = 0x00;
1085 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1086 	/* BR/EDR not supported and LE supported */
1087 	rp->features[4] = (1 << 5) | (1 << 6);
1088 }
1089 
read_bd_addr(struct net_buf * buf,struct net_buf ** evt)1090 static void read_bd_addr(struct net_buf *buf, struct net_buf **evt)
1091 {
1092 	struct bt_hci_rp_read_bd_addr *rp;
1093 
1094 	rp = hci_cmd_complete(evt, sizeof(*rp));
1095 
1096 	rp->status = 0x00;
1097 
1098 	(void)ll_addr_read(0, &rp->bdaddr.val[0]);
1099 }
1100 
1101 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
hci_vendor_read_std_codecs(const struct bt_hci_std_codec_info_v2 ** codecs)1102 uint8_t __weak hci_vendor_read_std_codecs(
1103 	const struct bt_hci_std_codec_info_v2 **codecs)
1104 {
1105 	ARG_UNUSED(codecs);
1106 
1107 	/* return number of supported codecs */
1108 	return 0;
1109 }
1110 
hci_vendor_read_vs_codecs(const struct bt_hci_vs_codec_info_v2 ** codecs)1111 uint8_t __weak hci_vendor_read_vs_codecs(
1112 	const struct bt_hci_vs_codec_info_v2 **codecs)
1113 {
1114 	ARG_UNUSED(codecs);
1115 
1116 	/* return number of supported codecs */
1117 	return 0;
1118 }
1119 
1120 /* NOTE: Not implementing the [v1] version.
1121  * Refer to BT Spec v5.3 Vol 4, Part E 7.4.8 Read Local Supported Codecs command
1122  * The [v1] version of this command shall only return codecs supported on the
1123  * BR/EDR physical transport, while the [v2] version shall return codecs
1124  * supported on all physical transports.
1125  */
read_codecs_v2(struct net_buf * buf,struct net_buf ** evt)1126 static void read_codecs_v2(struct net_buf *buf, struct net_buf **evt)
1127 {
1128 	struct bt_hci_rp_read_codecs_v2 *rp;
1129 	const struct bt_hci_std_codec_info_v2 *std_codec_info;
1130 	const struct bt_hci_vs_codec_info_v2 *vs_codec_info;
1131 	struct bt_hci_std_codecs_v2 *std_codecs;
1132 	struct bt_hci_vs_codecs_v2 *vs_codecs;
1133 	size_t std_codecs_bytes;
1134 	size_t vs_codecs_bytes;
1135 	uint8_t num_std_codecs;
1136 	uint8_t num_vs_codecs;
1137 	uint8_t i;
1138 
1139 	/* read standard codec information */
1140 	num_std_codecs = hci_vendor_read_std_codecs(&std_codec_info);
1141 	std_codecs_bytes = sizeof(struct bt_hci_std_codecs_v2) +
1142 		num_std_codecs * sizeof(struct bt_hci_std_codec_info_v2);
1143 	/* read vendor-specific codec information */
1144 	num_vs_codecs = hci_vendor_read_vs_codecs(&vs_codec_info);
1145 	vs_codecs_bytes = sizeof(struct bt_hci_vs_codecs_v2) +
1146 		num_vs_codecs *	sizeof(struct bt_hci_vs_codec_info_v2);
1147 
1148 	/* allocate response packet */
1149 	rp = hci_cmd_complete(evt, sizeof(*rp) +
1150 			      std_codecs_bytes +
1151 			      vs_codecs_bytes);
1152 	rp->status = 0x00;
1153 
1154 	/* copy standard codec information */
1155 	std_codecs = (struct bt_hci_std_codecs_v2 *)&rp->codecs[0];
1156 	std_codecs->num_codecs = num_std_codecs;
1157 	for (i = 0; i < num_std_codecs; i++) {
1158 		struct bt_hci_std_codec_info_v2 *codec;
1159 
1160 		codec = &std_codecs->codec_info[i];
1161 		codec->codec_id = std_codec_info[i].codec_id;
1162 		codec->transports = std_codec_info[i].transports;
1163 	}
1164 
1165 	/* copy vendor specific codec information  */
1166 	vs_codecs = (struct bt_hci_vs_codecs_v2 *)&rp->codecs[std_codecs_bytes];
1167 	vs_codecs->num_codecs = num_vs_codecs;
1168 	for (i = 0; i < num_std_codecs; i++) {
1169 		struct bt_hci_vs_codec_info_v2 *codec;
1170 
1171 		codec = &vs_codecs->codec_info[i];
1172 		codec->company_id =
1173 			sys_cpu_to_le16(vs_codec_info[i].company_id);
1174 		codec->codec_id = sys_cpu_to_le16(vs_codec_info[i].codec_id);
1175 		codec->transports = vs_codec_info[i].transports;
1176 	}
1177 }
1178 
hci_vendor_read_codec_capabilities(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t * num_capabilities,size_t * capabilities_bytes,const uint8_t ** capabilities)1179 uint8_t __weak hci_vendor_read_codec_capabilities(uint8_t coding_format,
1180 						  uint16_t company_id,
1181 						  uint16_t vs_codec_id,
1182 						  uint8_t transport,
1183 						  uint8_t direction,
1184 						  uint8_t *num_capabilities,
1185 						  size_t *capabilities_bytes,
1186 						  const uint8_t **capabilities)
1187 {
1188 	ARG_UNUSED(coding_format);
1189 	ARG_UNUSED(company_id);
1190 	ARG_UNUSED(vs_codec_id);
1191 	ARG_UNUSED(transport);
1192 	ARG_UNUSED(direction);
1193 	ARG_UNUSED(capabilities);
1194 
1195 	*num_capabilities = 0;
1196 	*capabilities_bytes = 0;
1197 
1198 	/* return status */
1199 	return 0x00;
1200 }
1201 
read_codec_capabilities(struct net_buf * buf,struct net_buf ** evt)1202 static void read_codec_capabilities(struct net_buf *buf, struct net_buf **evt)
1203 {
1204 	struct bt_hci_cp_read_codec_capabilities *cmd = (void *)buf->data;
1205 	struct bt_hci_rp_read_codec_capabilities *rp;
1206 	const uint8_t *capabilities;
1207 	size_t capabilities_bytes;
1208 	uint8_t num_capabilities;
1209 	uint16_t vs_codec_id;
1210 	uint16_t company_id;
1211 	uint8_t status;
1212 
1213 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1214 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1215 
1216 	/* read codec capabilities */
1217 	status = hci_vendor_read_codec_capabilities(cmd->codec_id.coding_format,
1218 						    company_id,
1219 						    vs_codec_id,
1220 						    cmd->transport,
1221 						    cmd->direction,
1222 						    &num_capabilities,
1223 						    &capabilities_bytes,
1224 						    &capabilities);
1225 
1226 	/* allocate response packet */
1227 	rp = hci_cmd_complete(evt, sizeof(*rp) + capabilities_bytes);
1228 	rp->status = status;
1229 
1230 	/* copy codec capabilities information */
1231 	rp->num_capabilities = num_capabilities;
1232 	memcpy(&rp->capabilities, capabilities, capabilities_bytes);
1233 }
1234 
hci_vendor_read_ctlr_delay(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t codec_config_len,const uint8_t * codec_config,uint32_t * min_delay,uint32_t * max_delay)1235 uint8_t __weak hci_vendor_read_ctlr_delay(uint8_t coding_format,
1236 					  uint16_t company_id,
1237 					  uint16_t vs_codec_id,
1238 					  uint8_t transport,
1239 					  uint8_t direction,
1240 					  uint8_t codec_config_len,
1241 					  const uint8_t *codec_config,
1242 					  uint32_t *min_delay,
1243 					  uint32_t *max_delay)
1244 {
1245 	ARG_UNUSED(coding_format);
1246 	ARG_UNUSED(company_id);
1247 	ARG_UNUSED(vs_codec_id);
1248 	ARG_UNUSED(transport);
1249 	ARG_UNUSED(direction);
1250 	ARG_UNUSED(codec_config_len);
1251 	ARG_UNUSED(codec_config);
1252 
1253 	*min_delay = 0;
1254 	*max_delay = 0x3D0900; /* 4 seconds, maximum value allowed by spec */
1255 
1256 	/* return status */
1257 	return 0x00;
1258 }
1259 
read_ctlr_delay(struct net_buf * buf,struct net_buf ** evt)1260 static void read_ctlr_delay(struct net_buf *buf, struct net_buf **evt)
1261 {
1262 	struct bt_hci_cp_read_ctlr_delay *cmd = (void *)buf->data;
1263 	struct bt_hci_rp_read_ctlr_delay *rp;
1264 	uint16_t vs_codec_id;
1265 	uint16_t company_id;
1266 	uint32_t min_delay;
1267 	uint32_t max_delay;
1268 	uint8_t status;
1269 
1270 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1271 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1272 
1273 	status = hci_vendor_read_ctlr_delay(cmd->codec_id.coding_format,
1274 					    company_id,
1275 					    vs_codec_id,
1276 					    cmd->transport,
1277 					    cmd->direction,
1278 					    cmd->codec_config_len,
1279 					    cmd->codec_config,
1280 					    &min_delay,
1281 					    &max_delay);
1282 
1283 	rp = hci_cmd_complete(evt, sizeof(*rp));
1284 	rp->status = status;
1285 	sys_put_le24(min_delay, rp->min_ctlr_delay);
1286 	sys_put_le24(max_delay, rp->max_ctlr_delay);
1287 }
1288 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1289 
info_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1290 static int info_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
1291 			   struct net_buf **evt)
1292 {
1293 	switch (ocf) {
1294 	case BT_OCF(BT_HCI_OP_READ_LOCAL_VERSION_INFO):
1295 		read_local_version_info(cmd, evt);
1296 		break;
1297 
1298 	case BT_OCF(BT_HCI_OP_READ_SUPPORTED_COMMANDS):
1299 		read_supported_commands(cmd, evt);
1300 		break;
1301 
1302 	case BT_OCF(BT_HCI_OP_READ_LOCAL_FEATURES):
1303 		read_local_features(cmd, evt);
1304 		break;
1305 
1306 	case BT_OCF(BT_HCI_OP_READ_BD_ADDR):
1307 		read_bd_addr(cmd, evt);
1308 		break;
1309 
1310 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1311 	case BT_OCF(BT_HCI_OP_READ_CODECS_V2):
1312 		read_codecs_v2(cmd, evt);
1313 		break;
1314 
1315 	case BT_OCF(BT_HCI_OP_READ_CODEC_CAPABILITIES):
1316 		read_codec_capabilities(cmd, evt);
1317 		break;
1318 
1319 	case BT_OCF(BT_HCI_OP_READ_CTLR_DELAY):
1320 		read_ctlr_delay(cmd, evt);
1321 		break;
1322 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1323 
1324 	default:
1325 		return -EINVAL;
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
read_rssi(struct net_buf * buf,struct net_buf ** evt)1332 static void read_rssi(struct net_buf *buf, struct net_buf **evt)
1333 {
1334 	struct bt_hci_cp_read_rssi *cmd = (void *)buf->data;
1335 	struct bt_hci_rp_read_rssi *rp;
1336 	uint16_t handle;
1337 
1338 	handle = sys_le16_to_cpu(cmd->handle);
1339 
1340 	rp = hci_cmd_complete(evt, sizeof(*rp));
1341 
1342 	rp->status = ll_rssi_get(handle, &rp->rssi);
1343 
1344 	rp->handle = sys_cpu_to_le16(handle);
1345 	/* The Link Layer currently returns RSSI as an absolute value */
1346 	rp->rssi = (!rp->status) ? -rp->rssi : 127;
1347 }
1348 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1349 
status_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1350 static int status_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
1351 			     struct net_buf **evt)
1352 {
1353 	switch (ocf) {
1354 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1355 	case BT_OCF(BT_HCI_OP_READ_RSSI):
1356 		read_rssi(cmd, evt);
1357 		break;
1358 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1359 
1360 	default:
1361 		return -EINVAL;
1362 	}
1363 
1364 	return 0;
1365 }
1366 
le_set_event_mask(struct net_buf * buf,struct net_buf ** evt)1367 static void le_set_event_mask(struct net_buf *buf, struct net_buf **evt)
1368 {
1369 	struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
1370 
1371 	le_event_mask = sys_get_le64(cmd->events);
1372 
1373 	*evt = cmd_complete_status(0x00);
1374 }
1375 
le_read_buffer_size(struct net_buf * buf,struct net_buf ** evt)1376 static void le_read_buffer_size(struct net_buf *buf, struct net_buf **evt)
1377 {
1378 	struct bt_hci_rp_le_read_buffer_size *rp;
1379 
1380 	rp = hci_cmd_complete(evt, sizeof(*rp));
1381 
1382 	rp->status = 0x00;
1383 
1384 	rp->le_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1385 	rp->le_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1386 }
1387 
1388 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_read_buffer_size_v2(struct net_buf * buf,struct net_buf ** evt)1389 static void le_read_buffer_size_v2(struct net_buf *buf, struct net_buf **evt)
1390 {
1391 	struct bt_hci_rp_le_read_buffer_size_v2 *rp;
1392 
1393 	rp = hci_cmd_complete(evt, sizeof(*rp));
1394 
1395 	rp->status = 0x00;
1396 
1397 	rp->acl_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1398 	rp->acl_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1399 	rp->iso_max_len = sys_cpu_to_le16(CONFIG_BT_CTLR_ISO_TX_BUFFER_SIZE);
1400 	rp->iso_max_num = CONFIG_BT_CTLR_ISO_TX_BUFFERS;
1401 }
1402 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1403 
le_read_local_features(struct net_buf * buf,struct net_buf ** evt)1404 static void le_read_local_features(struct net_buf *buf, struct net_buf **evt)
1405 {
1406 	struct bt_hci_rp_le_read_local_features *rp;
1407 
1408 	rp = hci_cmd_complete(evt, sizeof(*rp));
1409 
1410 	rp->status = 0x00;
1411 
1412 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1413 	sys_put_le64(ll_feat_get(), rp->features);
1414 }
1415 
le_set_random_address(struct net_buf * buf,struct net_buf ** evt)1416 static void le_set_random_address(struct net_buf *buf, struct net_buf **evt)
1417 {
1418 	struct bt_hci_cp_le_set_random_address *cmd = (void *)buf->data;
1419 	uint8_t status;
1420 
1421 	status = ll_addr_set(1, &cmd->bdaddr.val[0]);
1422 
1423 	*evt = cmd_complete_status(status);
1424 }
1425 
1426 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
le_read_fal_size(struct net_buf * buf,struct net_buf ** evt)1427 static void le_read_fal_size(struct net_buf *buf, struct net_buf **evt)
1428 {
1429 	struct bt_hci_rp_le_read_fal_size *rp;
1430 
1431 	rp = hci_cmd_complete(evt, sizeof(*rp));
1432 	rp->status = 0x00;
1433 
1434 	rp->fal_size = ll_fal_size_get();
1435 }
1436 
le_clear_fal(struct net_buf * buf,struct net_buf ** evt)1437 static void le_clear_fal(struct net_buf *buf, struct net_buf **evt)
1438 {
1439 	uint8_t status;
1440 
1441 	status = ll_fal_clear();
1442 
1443 	*evt = cmd_complete_status(status);
1444 }
1445 
le_add_dev_to_fal(struct net_buf * buf,struct net_buf ** evt)1446 static void le_add_dev_to_fal(struct net_buf *buf, struct net_buf **evt)
1447 {
1448 	struct bt_hci_cp_le_add_dev_to_fal *cmd = (void *)buf->data;
1449 	uint8_t status;
1450 
1451 	status = ll_fal_add(&cmd->addr);
1452 
1453 	*evt = cmd_complete_status(status);
1454 }
1455 
le_rem_dev_from_fal(struct net_buf * buf,struct net_buf ** evt)1456 static void le_rem_dev_from_fal(struct net_buf *buf, struct net_buf **evt)
1457 {
1458 	struct bt_hci_cp_le_rem_dev_from_fal *cmd = (void *)buf->data;
1459 	uint8_t status;
1460 
1461 	status = ll_fal_remove(&cmd->addr);
1462 
1463 	*evt = cmd_complete_status(status);
1464 }
1465 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1466 
le_encrypt(struct net_buf * buf,struct net_buf ** evt)1467 static void le_encrypt(struct net_buf *buf, struct net_buf **evt)
1468 {
1469 	struct bt_hci_cp_le_encrypt *cmd = (void *)buf->data;
1470 	struct bt_hci_rp_le_encrypt *rp;
1471 	uint8_t enc_data[16];
1472 
1473 	ecb_encrypt(cmd->key, cmd->plaintext, enc_data, NULL);
1474 
1475 	rp = hci_cmd_complete(evt, sizeof(*rp));
1476 
1477 	rp->status = 0x00;
1478 	memcpy(rp->enc_data, enc_data, 16);
1479 }
1480 
le_rand(struct net_buf * buf,struct net_buf ** evt)1481 static void le_rand(struct net_buf *buf, struct net_buf **evt)
1482 {
1483 	struct bt_hci_rp_le_rand *rp;
1484 	uint8_t count = sizeof(rp->rand);
1485 
1486 	rp = hci_cmd_complete(evt, sizeof(*rp));
1487 	rp->status = 0x00;
1488 
1489 	lll_csrand_get(rp->rand, count);
1490 }
1491 
le_read_supp_states(struct net_buf * buf,struct net_buf ** evt)1492 static void le_read_supp_states(struct net_buf *buf, struct net_buf **evt)
1493 {
1494 	struct bt_hci_rp_le_read_supp_states *rp;
1495 	uint64_t states = 0U;
1496 
1497 	rp = hci_cmd_complete(evt, sizeof(*rp));
1498 	rp->status = 0x00;
1499 
1500 #define ST_ADV (BIT64(0)  | BIT64(1)  | BIT64(8)  | BIT64(9)  | BIT64(12) | \
1501 		BIT64(13) | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1502 		BIT64(20) | BIT64(21))
1503 
1504 #define ST_SCA (BIT64(4)  | BIT64(5)  | BIT64(8)  | BIT64(9)  | BIT64(10) | \
1505 		BIT64(11) | BIT64(12) | BIT64(13) | BIT64(14) | BIT64(15) | \
1506 		BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(26) | \
1507 		BIT64(27) | BIT64(30) | BIT64(31))
1508 
1509 #define ST_PER (BIT64(2)  | BIT64(3)  | BIT64(7)  | BIT64(10) | BIT64(11) | \
1510 		BIT64(14) | BIT64(15) | BIT64(20) | BIT64(21) | BIT64(26) | \
1511 		BIT64(27) | BIT64(29) | BIT64(30) | BIT64(31) | BIT64(32) | \
1512 		BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | BIT64(37) | \
1513 		BIT64(38) | BIT64(39) | BIT64(40) | BIT64(41))
1514 
1515 #define ST_CEN (BIT64(6)  | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1516 		BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(28) | \
1517 		BIT64(32) | BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | \
1518 		BIT64(37) | BIT64(41))
1519 
1520 #if defined(CONFIG_BT_BROADCASTER)
1521 	states |= ST_ADV;
1522 #else
1523 	states &= ~ST_ADV;
1524 #endif
1525 #if defined(CONFIG_BT_OBSERVER)
1526 	states |= ST_SCA;
1527 #else
1528 	states &= ~ST_SCA;
1529 #endif
1530 #if defined(CONFIG_BT_PERIPHERAL)
1531 	states |= ST_PER;
1532 #else
1533 	states &= ~ST_PER;
1534 #endif
1535 #if defined(CONFIG_BT_CENTRAL)
1536 	states |= ST_CEN;
1537 #else
1538 	states &= ~ST_CEN;
1539 #endif
1540 	/* All states and combinations supported except:
1541 	 * Initiating State + Passive Scanning
1542 	 * Initiating State + Active Scanning
1543 	 */
1544 	states &= ~(BIT64(22) | BIT64(23));
1545 	LOG_DBG("states: 0x%08x%08x", (uint32_t)(states >> 32), (uint32_t)(states & 0xffffffff));
1546 	sys_put_le64(states, rp->le_states);
1547 }
1548 
1549 #if defined(CONFIG_BT_BROADCASTER)
le_set_adv_param(struct net_buf * buf,struct net_buf ** evt)1550 static void le_set_adv_param(struct net_buf *buf, struct net_buf **evt)
1551 {
1552 	struct bt_hci_cp_le_set_adv_param *cmd = (void *)buf->data;
1553 	uint16_t min_interval;
1554 	uint8_t status;
1555 
1556 	if (adv_cmds_legacy_check(evt)) {
1557 		return;
1558 	}
1559 
1560 	min_interval = sys_le16_to_cpu(cmd->min_interval);
1561 
1562 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
1563 	    (cmd->type != BT_HCI_ADV_DIRECT_IND)) {
1564 		uint16_t max_interval = sys_le16_to_cpu(cmd->max_interval);
1565 
1566 		if ((min_interval > max_interval) ||
1567 		    (min_interval < 0x0020) ||
1568 		    (max_interval > 0x4000)) {
1569 			*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
1570 			return;
1571 		}
1572 	}
1573 
1574 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1575 	status = ll_adv_params_set(0, 0, min_interval, cmd->type,
1576 				   cmd->own_addr_type, cmd->direct_addr.type,
1577 				   &cmd->direct_addr.a.val[0], cmd->channel_map,
1578 				   cmd->filter_policy, 0, 0, 0, 0, 0, 0);
1579 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1580 	status = ll_adv_params_set(min_interval, cmd->type,
1581 				   cmd->own_addr_type, cmd->direct_addr.type,
1582 				   &cmd->direct_addr.a.val[0], cmd->channel_map,
1583 				   cmd->filter_policy);
1584 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1585 
1586 	*evt = cmd_complete_status(status);
1587 }
1588 
le_read_adv_chan_tx_power(struct net_buf * buf,struct net_buf ** evt)1589 static void le_read_adv_chan_tx_power(struct net_buf *buf, struct net_buf **evt)
1590 {
1591 	struct bt_hci_rp_le_read_chan_tx_power *rp;
1592 
1593 	if (adv_cmds_legacy_check(evt)) {
1594 		return;
1595 	}
1596 
1597 	rp = hci_cmd_complete(evt, sizeof(*rp));
1598 
1599 	rp->status = 0x00;
1600 
1601 	rp->tx_power_level = 0;
1602 }
1603 
le_set_adv_data(struct net_buf * buf,struct net_buf ** evt)1604 static void le_set_adv_data(struct net_buf *buf, struct net_buf **evt)
1605 {
1606 	struct bt_hci_cp_le_set_adv_data *cmd = (void *)buf->data;
1607 	uint8_t status;
1608 
1609 	if (adv_cmds_legacy_check(evt)) {
1610 		return;
1611 	}
1612 
1613 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1614 	status = ll_adv_data_set(0, cmd->len, &cmd->data[0]);
1615 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1616 	status = ll_adv_data_set(cmd->len, &cmd->data[0]);
1617 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1618 
1619 	*evt = cmd_complete_status(status);
1620 }
1621 
le_set_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)1622 static void le_set_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
1623 {
1624 	struct bt_hci_cp_le_set_scan_rsp_data *cmd = (void *)buf->data;
1625 	uint8_t status;
1626 
1627 	if (adv_cmds_legacy_check(evt)) {
1628 		return;
1629 	}
1630 
1631 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1632 	status = ll_adv_scan_rsp_set(0, cmd->len, &cmd->data[0]);
1633 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1634 	status = ll_adv_scan_rsp_set(cmd->len, &cmd->data[0]);
1635 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1636 
1637 	*evt = cmd_complete_status(status);
1638 }
1639 
le_set_adv_enable(struct net_buf * buf,struct net_buf ** evt)1640 static void le_set_adv_enable(struct net_buf *buf, struct net_buf **evt)
1641 {
1642 	struct bt_hci_cp_le_set_adv_enable *cmd = (void *)buf->data;
1643 	uint8_t status;
1644 
1645 	if (adv_cmds_legacy_check(evt)) {
1646 		return;
1647 	}
1648 
1649 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
1650 #if defined(CONFIG_BT_HCI_MESH_EXT)
1651 	status = ll_adv_enable(0, cmd->enable, 0, 0, 0, 0, 0);
1652 #else /* !CONFIG_BT_HCI_MESH_EXT */
1653 	status = ll_adv_enable(0, cmd->enable, 0, 0);
1654 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1655 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1656 	status = ll_adv_enable(cmd->enable);
1657 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1658 
1659 	*evt = cmd_complete_status(status);
1660 }
1661 
1662 #if defined(CONFIG_BT_CTLR_ADV_ISO)
le_create_big(struct net_buf * buf,struct net_buf ** evt)1663 static void le_create_big(struct net_buf *buf, struct net_buf **evt)
1664 {
1665 	struct bt_hci_cp_le_create_big *cmd = (void *)buf->data;
1666 	uint32_t sdu_interval;
1667 	uint16_t max_latency;
1668 	uint8_t big_handle;
1669 	uint8_t adv_handle;
1670 	uint16_t max_sdu;
1671 	uint8_t status;
1672 
1673 	status = ll_adv_iso_by_hci_handle_new(cmd->big_handle, &big_handle);
1674 	if (status) {
1675 		*evt = cmd_status(status);
1676 		return;
1677 	}
1678 
1679 	status = ll_adv_set_by_hci_handle_get(cmd->adv_handle, &adv_handle);
1680 	if (status) {
1681 		*evt = cmd_status(status);
1682 		return;
1683 	}
1684 
1685 	sdu_interval = sys_get_le24(cmd->sdu_interval);
1686 	max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1687 	max_latency = sys_le16_to_cpu(cmd->max_latency);
1688 
1689 	status = ll_big_create(big_handle, adv_handle, cmd->num_bis,
1690 			       sdu_interval, max_sdu, max_latency, cmd->rtn,
1691 			       cmd->phy, cmd->packing, cmd->framing,
1692 			       cmd->encryption, cmd->bcode);
1693 
1694 	*evt = cmd_status(status);
1695 }
1696 
le_create_big_test(struct net_buf * buf,struct net_buf ** evt)1697 static void le_create_big_test(struct net_buf *buf, struct net_buf **evt)
1698 {
1699 	struct bt_hci_cp_le_create_big_test *cmd = (void *)buf->data;
1700 	uint32_t sdu_interval;
1701 	uint16_t iso_interval;
1702 	uint16_t max_sdu;
1703 	uint16_t max_pdu;
1704 	uint8_t status;
1705 
1706 	sdu_interval = sys_get_le24(cmd->sdu_interval);
1707 	iso_interval = sys_le16_to_cpu(cmd->iso_interval);
1708 	max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1709 	max_pdu = sys_le16_to_cpu(cmd->max_pdu);
1710 
1711 	status = ll_big_test_create(cmd->big_handle, cmd->adv_handle,
1712 				    cmd->num_bis, sdu_interval, iso_interval,
1713 				    cmd->nse, max_sdu, max_pdu, cmd->phy,
1714 				    cmd->packing, cmd->framing, cmd->bn,
1715 				    cmd->irc, cmd->pto, cmd->encryption,
1716 				    cmd->bcode);
1717 
1718 	*evt = cmd_status(status);
1719 }
1720 
le_terminate_big(struct net_buf * buf,struct net_buf ** evt)1721 static void le_terminate_big(struct net_buf *buf, struct net_buf **evt)
1722 {
1723 	struct bt_hci_cp_le_terminate_big *cmd = (void *)buf->data;
1724 	uint8_t status;
1725 
1726 	status = ll_big_terminate(cmd->big_handle, cmd->reason);
1727 
1728 	*evt = cmd_status(status);
1729 }
1730 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1731 #endif /* CONFIG_BT_BROADCASTER */
1732 
1733 #if defined(CONFIG_BT_OBSERVER)
le_set_scan_param(struct net_buf * buf,struct net_buf ** evt)1734 static void le_set_scan_param(struct net_buf *buf, struct net_buf **evt)
1735 {
1736 	struct bt_hci_cp_le_set_scan_param *cmd = (void *)buf->data;
1737 	uint16_t interval;
1738 	uint16_t window;
1739 	uint8_t status;
1740 
1741 	if (adv_cmds_legacy_check(evt)) {
1742 		return;
1743 	}
1744 
1745 	interval = sys_le16_to_cpu(cmd->interval);
1746 	window = sys_le16_to_cpu(cmd->window);
1747 
1748 	status = ll_scan_params_set(cmd->scan_type, interval, window,
1749 				    cmd->addr_type, cmd->filter_policy);
1750 
1751 	*evt = cmd_complete_status(status);
1752 }
1753 
le_set_scan_enable(struct net_buf * buf,struct net_buf ** evt)1754 static void le_set_scan_enable(struct net_buf *buf, struct net_buf **evt)
1755 {
1756 	struct bt_hci_cp_le_set_scan_enable *cmd = (void *)buf->data;
1757 	uint8_t status;
1758 
1759 	if (adv_cmds_legacy_check(evt)) {
1760 		return;
1761 	}
1762 
1763 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
1764 	/* Initialize duplicate filtering */
1765 	if (cmd->enable && cmd->filter_dup) {
1766 		if (0) {
1767 
1768 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1769 		} else if (dup_count == DUP_FILTER_DISABLED) {
1770 			dup_scan = true;
1771 
1772 			/* All entries reset */
1773 			dup_count = 0;
1774 			dup_curr = 0U;
1775 		} else if (!dup_scan) {
1776 			dup_scan = true;
1777 			dup_ext_adv_reset();
1778 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1779 
1780 		} else {
1781 			/* All entries reset */
1782 			dup_count = 0;
1783 			dup_curr = 0U;
1784 		}
1785 	} else {
1786 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1787 		dup_scan = false;
1788 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1789 		dup_count = DUP_FILTER_DISABLED;
1790 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1791 	}
1792 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
1793 
1794 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1795 	status = ll_scan_enable(cmd->enable, 0, 0);
1796 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1797 	status = ll_scan_enable(cmd->enable);
1798 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1799 
1800 	/* NOTE: As filter duplicates is implemented here in HCI source code,
1801 	 *       enabling of already enabled scanning shall succeed after
1802 	 *       updates to filter duplicates is handled in the above
1803 	 *       statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
1804 	 */
1805 	if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
1806 	    (status == BT_HCI_ERR_CMD_DISALLOWED)) {
1807 		status = BT_HCI_ERR_SUCCESS;
1808 	}
1809 
1810 	*evt = cmd_complete_status(status);
1811 }
1812 
1813 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
le_big_create_sync(struct net_buf * buf,struct net_buf ** evt)1814 static void le_big_create_sync(struct net_buf *buf, struct net_buf **evt)
1815 {
1816 	struct bt_hci_cp_le_big_create_sync *cmd = (void *)buf->data;
1817 	uint8_t status;
1818 	uint16_t sync_handle;
1819 	uint16_t sync_timeout;
1820 
1821 	sync_handle = sys_le16_to_cpu(cmd->sync_handle);
1822 	sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
1823 
1824 	status = ll_big_sync_create(cmd->big_handle, sync_handle,
1825 				    cmd->encryption, cmd->bcode, cmd->mse,
1826 				    sync_timeout, cmd->num_bis, cmd->bis);
1827 
1828 	*evt = cmd_status(status);
1829 }
1830 
1831 
le_big_terminate_sync(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1832 static void le_big_terminate_sync(struct net_buf *buf, struct net_buf **evt,
1833 				  void **node_rx)
1834 {
1835 	struct bt_hci_cp_le_big_terminate_sync *cmd = (void *)buf->data;
1836 	struct bt_hci_rp_le_big_terminate_sync *rp;
1837 	uint8_t big_handle;
1838 	uint8_t status;
1839 
1840 	big_handle = cmd->big_handle;
1841 	status = ll_big_sync_terminate(big_handle, node_rx);
1842 
1843 	rp = hci_cmd_complete(evt, sizeof(*rp));
1844 	rp->status = status;
1845 	rp->big_handle = big_handle;
1846 }
1847 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1848 #endif /* CONFIG_BT_OBSERVER */
1849 
1850 #if defined(CONFIG_BT_CENTRAL)
check_cconn_params(bool ext,uint16_t scan_interval,uint16_t scan_window,uint16_t conn_interval_max,uint16_t conn_latency,uint16_t supervision_timeout)1851 static uint8_t check_cconn_params(bool ext, uint16_t scan_interval,
1852 				  uint16_t scan_window,
1853 				  uint16_t conn_interval_max,
1854 				  uint16_t conn_latency,
1855 				  uint16_t supervision_timeout)
1856 {
1857 	if (scan_interval < 0x0004 || scan_window < 0x0004 ||
1858 	    (!ext && (scan_interval > 0x4000 || scan_window > 0x4000))) {
1859 		return BT_HCI_ERR_INVALID_PARAM;
1860 	}
1861 
1862 	if (conn_interval_max < 0x0006 || conn_interval_max > 0x0C80) {
1863 		return BT_HCI_ERR_INVALID_PARAM;
1864 	}
1865 
1866 	if (conn_latency > 0x01F3) {
1867 		return BT_HCI_ERR_INVALID_PARAM;
1868 	}
1869 
1870 	if (supervision_timeout < 0x000A || supervision_timeout > 0x0C80) {
1871 		return BT_HCI_ERR_INVALID_PARAM;
1872 	}
1873 
1874 	/* sto * 10ms > (1 + lat) * ci * 1.25ms * 2
1875 	 * sto * 10 > (1 + lat) * ci * 2.5
1876 	 * sto * 2 > (1 + lat) * ci * 0.5
1877 	 * sto * 4 > (1 + lat) * ci
1878 	 */
1879 	if ((supervision_timeout << 2) <= ((1 + conn_latency) *
1880 					   conn_interval_max)) {
1881 		return BT_HCI_ERR_INVALID_PARAM;
1882 	}
1883 
1884 	return 0;
1885 }
1886 
le_create_connection(struct net_buf * buf,struct net_buf ** evt)1887 static void le_create_connection(struct net_buf *buf, struct net_buf **evt)
1888 {
1889 	struct bt_hci_cp_le_create_conn *cmd = (void *)buf->data;
1890 	uint16_t supervision_timeout;
1891 	uint16_t conn_interval_max;
1892 	uint16_t scan_interval;
1893 	uint16_t conn_latency;
1894 	uint16_t scan_window;
1895 	uint8_t status;
1896 
1897 	if (adv_cmds_legacy_check(NULL)) {
1898 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
1899 		return;
1900 	}
1901 
1902 	scan_interval = sys_le16_to_cpu(cmd->scan_interval);
1903 	scan_window = sys_le16_to_cpu(cmd->scan_window);
1904 	conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
1905 	conn_latency = sys_le16_to_cpu(cmd->conn_latency);
1906 	supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
1907 
1908 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
1909 		status = check_cconn_params(false, scan_interval,
1910 					    scan_window,
1911 					    conn_interval_max,
1912 					    conn_latency,
1913 					    supervision_timeout);
1914 		if (status) {
1915 			*evt = cmd_status(status);
1916 			return;
1917 		}
1918 	}
1919 
1920 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1921 	status = ll_create_connection(scan_interval, scan_window,
1922 				      cmd->filter_policy,
1923 				      cmd->peer_addr.type,
1924 				      &cmd->peer_addr.a.val[0],
1925 				      cmd->own_addr_type, conn_interval_max,
1926 				      conn_latency, supervision_timeout,
1927 				      PHY_LEGACY);
1928 	if (status) {
1929 		*evt = cmd_status(status);
1930 		return;
1931 	}
1932 
1933 	status = ll_connect_enable(0U);
1934 
1935 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1936 	status = ll_create_connection(scan_interval, scan_window,
1937 				      cmd->filter_policy,
1938 				      cmd->peer_addr.type,
1939 				      &cmd->peer_addr.a.val[0],
1940 				      cmd->own_addr_type, conn_interval_max,
1941 				      conn_latency, supervision_timeout);
1942 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1943 
1944 	*evt = cmd_status(status);
1945 }
1946 
le_create_conn_cancel(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1947 static void le_create_conn_cancel(struct net_buf *buf, struct net_buf **evt,
1948 				  void **node_rx)
1949 {
1950 	uint8_t status;
1951 
1952 	status = ll_connect_disable(node_rx);
1953 
1954 	*evt = cmd_complete_status(status);
1955 }
1956 
le_set_host_chan_classif(struct net_buf * buf,struct net_buf ** evt)1957 static void le_set_host_chan_classif(struct net_buf *buf, struct net_buf **evt)
1958 {
1959 	struct bt_hci_cp_le_set_host_chan_classif *cmd = (void *)buf->data;
1960 	uint8_t status;
1961 
1962 	status = ll_chm_update(&cmd->ch_map[0]);
1963 
1964 	*evt = cmd_complete_status(status);
1965 }
1966 
1967 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_start_encryption(struct net_buf * buf,struct net_buf ** evt)1968 static void le_start_encryption(struct net_buf *buf, struct net_buf **evt)
1969 {
1970 	struct bt_hci_cp_le_start_encryption *cmd = (void *)buf->data;
1971 	uint16_t handle;
1972 	uint8_t status;
1973 
1974 	handle = sys_le16_to_cpu(cmd->handle);
1975 	status = ll_enc_req_send(handle,
1976 				 (uint8_t *)&cmd->rand,
1977 				 (uint8_t *)&cmd->ediv,
1978 				 &cmd->ltk[0]);
1979 
1980 	*evt = cmd_status(status);
1981 }
1982 #endif /* CONFIG_BT_CTLR_LE_ENC */
1983 
1984 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
le_set_cig_parameters(struct net_buf * buf,struct net_buf ** evt)1985 static void le_set_cig_parameters(struct net_buf *buf, struct net_buf **evt)
1986 {
1987 	struct bt_hci_cp_le_set_cig_params *cmd = (void *)buf->data;
1988 	struct bt_hci_rp_le_set_cig_params *rp;
1989 	uint32_t c_interval;
1990 	uint32_t p_interval;
1991 	uint16_t c_latency;
1992 	uint16_t p_latency;
1993 	uint8_t cis_count;
1994 	uint8_t cig_id;
1995 	uint8_t status;
1996 	uint8_t i;
1997 
1998 	c_interval = sys_get_le24(cmd->c_interval);
1999 	p_interval = sys_get_le24(cmd->p_interval);
2000 	c_latency = sys_le16_to_cpu(cmd->c_latency);
2001 	p_latency = sys_le16_to_cpu(cmd->p_latency);
2002 
2003 	cig_id = cmd->cig_id;
2004 	cis_count = cmd->num_cis;
2005 
2006 	/* Create CIG or start modifying existing CIG */
2007 	status = ll_cig_parameters_open(cig_id, c_interval, p_interval,
2008 					cmd->sca, cmd->packing, cmd->framing,
2009 					c_latency, p_latency, cis_count);
2010 
2011 	/* Configure individual CISes */
2012 	for (i = 0; !status && i < cis_count; i++) {
2013 		struct bt_hci_cis_params *params = &cmd->cis[i];
2014 		uint16_t c_sdu;
2015 		uint16_t p_sdu;
2016 
2017 		c_sdu = sys_le16_to_cpu(params->c_sdu);
2018 		p_sdu = sys_le16_to_cpu(params->p_sdu);
2019 
2020 		status = ll_cis_parameters_set(params->cis_id, c_sdu, p_sdu,
2021 					       params->c_phy, params->p_phy,
2022 					       params->c_rtn, params->p_rtn);
2023 	}
2024 
2025 	rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2026 	rp->cig_id = cig_id;
2027 
2028 	/* Only apply parameters if all went well */
2029 	if (!status) {
2030 		uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2031 
2032 		status = ll_cig_parameters_commit(cig_id, handles);
2033 
2034 		if (status == BT_HCI_ERR_SUCCESS) {
2035 			for (uint8_t i = 0; i < cis_count; i++) {
2036 				rp->handle[i] = sys_cpu_to_le16(handles[i]);
2037 			}
2038 		}
2039 	}
2040 
2041 	rp->num_handles = status ? 0U : cis_count;
2042 	rp->status = status;
2043 }
2044 
le_set_cig_params_test(struct net_buf * buf,struct net_buf ** evt)2045 static void le_set_cig_params_test(struct net_buf *buf, struct net_buf **evt)
2046 {
2047 	struct bt_hci_cp_le_set_cig_params_test *cmd = (void *)buf->data;
2048 	struct bt_hci_rp_le_set_cig_params_test *rp;
2049 
2050 	uint32_t c_interval;
2051 	uint32_t p_interval;
2052 	uint16_t iso_interval;
2053 	uint8_t cis_count;
2054 	uint8_t cig_id;
2055 	uint8_t status;
2056 	uint8_t i;
2057 
2058 	c_interval = sys_get_le24(cmd->c_interval);
2059 	p_interval = sys_get_le24(cmd->p_interval);
2060 	iso_interval = sys_le16_to_cpu(cmd->iso_interval);
2061 
2062 	cig_id = cmd->cig_id;
2063 	cis_count = cmd->num_cis;
2064 
2065 	/* Create CIG or start modifying existing CIG */
2066 	status = ll_cig_parameters_test_open(cig_id, c_interval,
2067 					     p_interval, cmd->c_ft,
2068 					     cmd->p_ft, iso_interval,
2069 					     cmd->sca, cmd->packing,
2070 					     cmd->framing,
2071 					     cis_count);
2072 
2073 	/* Configure individual CISes */
2074 	for (i = 0; !status && i < cis_count; i++) {
2075 		struct bt_hci_cis_params_test *params = &cmd->cis[i];
2076 		uint16_t c_sdu;
2077 		uint16_t p_sdu;
2078 		uint16_t c_pdu;
2079 		uint16_t p_pdu;
2080 		uint8_t  nse;
2081 
2082 		nse   = params->nse;
2083 		c_sdu = sys_le16_to_cpu(params->c_sdu);
2084 		p_sdu = sys_le16_to_cpu(params->p_sdu);
2085 		c_pdu = sys_le16_to_cpu(params->c_pdu);
2086 		p_pdu = sys_le16_to_cpu(params->p_pdu);
2087 
2088 		status = ll_cis_parameters_test_set(params->cis_id, nse,
2089 						    c_sdu, p_sdu,
2090 						    c_pdu, p_pdu,
2091 						    params->c_phy,
2092 						    params->p_phy,
2093 						    params->c_bn,
2094 						    params->p_bn);
2095 	}
2096 
2097 	rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2098 	rp->cig_id = cig_id;
2099 
2100 	/* Only apply parameters if all went well */
2101 	if (!status) {
2102 		uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2103 
2104 		status = ll_cig_parameters_commit(cig_id, handles);
2105 
2106 		if (status == BT_HCI_ERR_SUCCESS) {
2107 			for (uint8_t i = 0; i < cis_count; i++) {
2108 				rp->handle[i] = sys_cpu_to_le16(handles[i]);
2109 			}
2110 		}
2111 	}
2112 
2113 	rp->num_handles = status ? 0U : cis_count;
2114 	rp->status = status;
2115 }
2116 
le_create_cis(struct net_buf * buf,struct net_buf ** evt)2117 static void le_create_cis(struct net_buf *buf, struct net_buf **evt)
2118 {
2119 	uint16_t handle_used[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP] = {0};
2120 	struct bt_hci_cp_le_create_cis *cmd = (void *)buf->data;
2121 	uint8_t status;
2122 	uint8_t i;
2123 
2124 	/*
2125 	 * Creating new CISes is disallowed until all previous CIS
2126 	 * established events have been generated
2127 	 */
2128 	if (cis_pending_count) {
2129 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2130 		return;
2131 	}
2132 
2133 	/* Check all handles before actually starting to create CISes */
2134 	status = 0x00;
2135 	for (i = 0; !status && i < cmd->num_cis; i++) {
2136 		uint16_t cis_handle;
2137 		uint16_t acl_handle;
2138 		uint8_t cis_idx;
2139 
2140 		cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2141 		acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2142 
2143 		cis_idx = LL_CIS_IDX_FROM_HANDLE(cis_handle);
2144 		if (handle_used[cis_idx]) {
2145 			/* Handle must be unique in request */
2146 			status = BT_HCI_ERR_INVALID_PARAM;
2147 			break;
2148 		}
2149 
2150 		handle_used[cis_idx]++;
2151 		status = ll_cis_create_check(cis_handle, acl_handle);
2152 	}
2153 
2154 	if (status) {
2155 		*evt = cmd_status(status);
2156 		return;
2157 	}
2158 
2159 	/*
2160 	 * Actually create CISes, any errors are to be reported
2161 	 * through CIS established events
2162 	 */
2163 	cis_pending_count = cmd->num_cis;
2164 	for (i = 0; i < cmd->num_cis; i++) {
2165 		uint16_t cis_handle;
2166 		uint16_t acl_handle;
2167 
2168 		cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2169 		acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2170 		ll_cis_create(cis_handle, acl_handle);
2171 	}
2172 
2173 	*evt = cmd_status(status);
2174 }
2175 
le_remove_cig(struct net_buf * buf,struct net_buf ** evt)2176 static void le_remove_cig(struct net_buf *buf, struct net_buf **evt)
2177 {
2178 	struct bt_hci_cp_le_remove_cig *cmd = (void *)buf->data;
2179 	struct bt_hci_rp_le_remove_cig *rp;
2180 	uint8_t status;
2181 
2182 	status = ll_cig_remove(cmd->cig_id);
2183 
2184 	rp = hci_cmd_complete(evt, sizeof(*rp));
2185 	rp->status = status;
2186 	rp->cig_id = cmd->cig_id;
2187 }
2188 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
2189 
2190 #endif /* CONFIG_BT_CENTRAL */
2191 
2192 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_transmit_test(struct net_buf * buf,struct net_buf ** evt)2193 static void le_iso_transmit_test(struct net_buf *buf, struct net_buf **evt)
2194 {
2195 	struct bt_hci_cp_le_iso_transmit_test *cmd = (void *)buf->data;
2196 	struct bt_hci_rp_le_iso_transmit_test *rp;
2197 	uint16_t handle;
2198 	uint8_t status;
2199 
2200 	handle = sys_le16_to_cpu(cmd->handle);
2201 
2202 	status = ll_iso_transmit_test(handle, cmd->payload_type);
2203 
2204 	rp = hci_cmd_complete(evt, sizeof(*rp));
2205 	rp->status = status;
2206 	rp->handle = sys_cpu_to_le16(handle);
2207 }
2208 
le_read_iso_tx_sync(struct net_buf * buf,struct net_buf ** evt)2209 static void le_read_iso_tx_sync(struct net_buf *buf, struct net_buf **evt)
2210 {
2211 	struct bt_hci_cp_le_read_iso_tx_sync *cmd = (void *)buf->data;
2212 	struct bt_hci_rp_le_read_iso_tx_sync *rp;
2213 	uint16_t handle_le16;
2214 	uint32_t timestamp;
2215 	uint32_t offset;
2216 	uint16_t handle;
2217 	uint8_t status;
2218 	uint16_t seq;
2219 
2220 	handle_le16 = cmd->handle;
2221 	handle = sys_le16_to_cpu(handle_le16);
2222 
2223 	status = ll_read_iso_tx_sync(handle, &seq, &timestamp, &offset);
2224 
2225 	rp = hci_cmd_complete(evt, sizeof(*rp));
2226 	rp->status = status;
2227 	rp->handle = handle_le16;
2228 	rp->seq       = sys_cpu_to_le16(seq);
2229 	rp->timestamp = sys_cpu_to_le32(timestamp);
2230 	sys_put_le24(offset, rp->offset);
2231 }
2232 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2233 
2234 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_receive_test(struct net_buf * buf,struct net_buf ** evt)2235 static void le_iso_receive_test(struct net_buf *buf, struct net_buf **evt)
2236 {
2237 	struct bt_hci_cp_le_iso_receive_test *cmd = (void *)buf->data;
2238 	struct bt_hci_rp_le_iso_receive_test *rp;
2239 	uint16_t handle;
2240 	uint8_t status;
2241 
2242 	handle = sys_le16_to_cpu(cmd->handle);
2243 
2244 	status = ll_iso_receive_test(handle, cmd->payload_type);
2245 
2246 	rp = hci_cmd_complete(evt, sizeof(*rp));
2247 	rp->status = status;
2248 	rp->handle = sys_cpu_to_le16(handle);
2249 }
2250 
le_iso_read_test_counters(struct net_buf * buf,struct net_buf ** evt)2251 static void le_iso_read_test_counters(struct net_buf *buf, struct net_buf **evt)
2252 {
2253 	struct bt_hci_cp_le_read_test_counters *cmd = (void *)buf->data;
2254 	struct bt_hci_rp_le_read_test_counters *rp;
2255 	uint32_t received_cnt;
2256 	uint32_t missed_cnt;
2257 	uint32_t failed_cnt;
2258 	uint16_t handle;
2259 	uint8_t status;
2260 
2261 	handle = sys_le16_to_cpu(cmd->handle);
2262 	status = ll_iso_read_test_counters(handle, &received_cnt,
2263 					   &missed_cnt, &failed_cnt);
2264 
2265 	rp = hci_cmd_complete(evt, sizeof(*rp));
2266 	rp->status = status;
2267 	rp->handle = sys_cpu_to_le16(handle);
2268 	rp->received_cnt = sys_cpu_to_le32(received_cnt);
2269 	rp->missed_cnt   = sys_cpu_to_le32(missed_cnt);
2270 	rp->failed_cnt   = sys_cpu_to_le32(failed_cnt);
2271 }
2272 
2273 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
le_read_iso_link_quality(struct net_buf * buf,struct net_buf ** evt)2274 static void le_read_iso_link_quality(struct net_buf *buf, struct net_buf **evt)
2275 {
2276 	struct bt_hci_cp_le_read_iso_link_quality *cmd = (void *)buf->data;
2277 	struct bt_hci_rp_le_read_iso_link_quality *rp;
2278 	uint32_t tx_last_subevent_packets;
2279 	uint32_t retransmitted_packets;
2280 	uint32_t rx_unreceived_packets;
2281 	uint32_t tx_unacked_packets;
2282 	uint32_t tx_flushed_packets;
2283 	uint32_t crc_error_packets;
2284 	uint32_t duplicate_packets;
2285 	uint16_t handle_le16;
2286 	uint16_t handle;
2287 	uint8_t status;
2288 
2289 	handle_le16 = cmd->handle;
2290 	handle = sys_le16_to_cpu(handle_le16);
2291 	status = ll_read_iso_link_quality(handle, &tx_unacked_packets,
2292 					  &tx_flushed_packets,
2293 					  &tx_last_subevent_packets,
2294 					  &retransmitted_packets,
2295 					  &crc_error_packets,
2296 					  &rx_unreceived_packets,
2297 					  &duplicate_packets);
2298 
2299 	rp = hci_cmd_complete(evt, sizeof(*rp));
2300 	rp->status = status;
2301 	rp->handle = handle_le16;
2302 	rp->tx_unacked_packets = sys_cpu_to_le32(tx_unacked_packets);
2303 	rp->tx_flushed_packets = sys_cpu_to_le32(tx_flushed_packets);
2304 	rp->tx_last_subevent_packets =
2305 		sys_cpu_to_le32(tx_last_subevent_packets);
2306 	rp->retransmitted_packets = sys_cpu_to_le32(retransmitted_packets);
2307 	rp->crc_error_packets     = sys_cpu_to_le32(crc_error_packets);
2308 	rp->rx_unreceived_packets = sys_cpu_to_le32(rx_unreceived_packets);
2309 	rp->duplicate_packets     = sys_cpu_to_le32(duplicate_packets);
2310 }
2311 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
2312 
2313 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
2314 
2315 #if defined(CONFIG_BT_CTLR_ISO)
le_setup_iso_path(struct net_buf * buf,struct net_buf ** evt)2316 static void le_setup_iso_path(struct net_buf *buf, struct net_buf **evt)
2317 {
2318 	struct bt_hci_cp_le_setup_iso_path *cmd = (void *)buf->data;
2319 	struct bt_hci_rp_le_setup_iso_path *rp;
2320 	uint32_t controller_delay;
2321 	uint8_t *codec_config;
2322 	uint8_t coding_format;
2323 	uint16_t vs_codec_id;
2324 	uint16_t company_id;
2325 	uint16_t handle;
2326 	uint8_t status;
2327 
2328 	handle = sys_le16_to_cpu(cmd->handle);
2329 	coding_format = cmd->codec_id.coding_format;
2330 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
2331 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
2332 	controller_delay = sys_get_le24(cmd->controller_delay);
2333 	codec_config = &cmd->codec_config[0];
2334 
2335 	status = ll_setup_iso_path(handle, cmd->path_dir, cmd->path_id,
2336 				   coding_format, company_id, vs_codec_id,
2337 				   controller_delay, cmd->codec_config_len,
2338 				   codec_config);
2339 
2340 	rp = hci_cmd_complete(evt, sizeof(*rp));
2341 	rp->status = status;
2342 	rp->handle = sys_cpu_to_le16(handle);
2343 }
2344 
le_remove_iso_path(struct net_buf * buf,struct net_buf ** evt)2345 static void le_remove_iso_path(struct net_buf *buf, struct net_buf **evt)
2346 {
2347 	struct bt_hci_cp_le_remove_iso_path *cmd = (void *)buf->data;
2348 	struct bt_hci_rp_le_remove_iso_path *rp;
2349 	uint16_t handle;
2350 	uint8_t status;
2351 
2352 	handle = sys_le16_to_cpu(cmd->handle);
2353 
2354 	status = ll_remove_iso_path(handle, cmd->path_dir);
2355 
2356 	rp = hci_cmd_complete(evt, sizeof(*rp));
2357 	rp->status = status;
2358 	rp->handle = sys_cpu_to_le16(handle);
2359 }
2360 
le_iso_test_end(struct net_buf * buf,struct net_buf ** evt)2361 static void le_iso_test_end(struct net_buf *buf, struct net_buf **evt)
2362 {
2363 	struct bt_hci_cp_le_iso_test_end *cmd = (void *)buf->data;
2364 	struct bt_hci_rp_le_iso_test_end *rp;
2365 	uint32_t received_cnt;
2366 	uint32_t missed_cnt;
2367 	uint32_t failed_cnt;
2368 	uint16_t handle;
2369 	uint8_t status;
2370 
2371 	handle = sys_le16_to_cpu(cmd->handle);
2372 	status = ll_iso_test_end(handle, &received_cnt, &missed_cnt,
2373 				 &failed_cnt);
2374 
2375 	rp = hci_cmd_complete(evt, sizeof(*rp));
2376 	rp->status = status;
2377 	rp->handle = sys_cpu_to_le16(handle);
2378 	rp->received_cnt = sys_cpu_to_le32(received_cnt);
2379 	rp->missed_cnt   = sys_cpu_to_le32(missed_cnt);
2380 	rp->failed_cnt   = sys_cpu_to_le32(failed_cnt);
2381 }
2382 #endif /* CONFIG_BT_CTLR_ISO */
2383 
2384 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
le_set_host_feature(struct net_buf * buf,struct net_buf ** evt)2385 static void le_set_host_feature(struct net_buf *buf, struct net_buf **evt)
2386 {
2387 	struct bt_hci_cp_le_set_host_feature *cmd = (void *)buf->data;
2388 	struct bt_hci_rp_le_set_host_feature *rp;
2389 	uint8_t status;
2390 
2391 	status = ll_set_host_feature(cmd->bit_number, cmd->bit_value);
2392 
2393 	rp = hci_cmd_complete(evt, sizeof(*rp));
2394 	rp->status = status;
2395 }
2396 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
2397 
2398 #if defined(CONFIG_BT_PERIPHERAL)
2399 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_ltk_req_reply(struct net_buf * buf,struct net_buf ** evt)2400 static void le_ltk_req_reply(struct net_buf *buf, struct net_buf **evt)
2401 {
2402 	struct bt_hci_cp_le_ltk_req_reply *cmd = (void *)buf->data;
2403 	struct bt_hci_rp_le_ltk_req_reply *rp;
2404 	uint16_t handle;
2405 	uint8_t status;
2406 
2407 	handle = sys_le16_to_cpu(cmd->handle);
2408 	status = ll_start_enc_req_send(handle, 0x00, &cmd->ltk[0]);
2409 
2410 	rp = hci_cmd_complete(evt, sizeof(*rp));
2411 	rp->status = status;
2412 	rp->handle = sys_cpu_to_le16(handle);
2413 }
2414 
le_ltk_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2415 static void le_ltk_req_neg_reply(struct net_buf *buf, struct net_buf **evt)
2416 {
2417 	struct bt_hci_cp_le_ltk_req_neg_reply *cmd = (void *)buf->data;
2418 	struct bt_hci_rp_le_ltk_req_neg_reply *rp;
2419 	uint16_t handle;
2420 	uint8_t status;
2421 
2422 	handle = sys_le16_to_cpu(cmd->handle);
2423 	status = ll_start_enc_req_send(handle, BT_HCI_ERR_PIN_OR_KEY_MISSING,
2424 				       NULL);
2425 
2426 	rp = hci_cmd_complete(evt, sizeof(*rp));
2427 	rp->status = status;
2428 	rp->handle = sys_le16_to_cpu(handle);
2429 }
2430 #endif /* CONFIG_BT_CTLR_LE_ENC */
2431 
2432 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
le_accept_cis(struct net_buf * buf,struct net_buf ** evt)2433 static void le_accept_cis(struct net_buf *buf, struct net_buf **evt)
2434 {
2435 	struct bt_hci_cp_le_accept_cis *cmd = (void *)buf->data;
2436 	uint16_t handle;
2437 	uint8_t status;
2438 
2439 	handle = sys_le16_to_cpu(cmd->handle);
2440 	status = ll_cis_accept(handle);
2441 	*evt = cmd_status(status);
2442 }
2443 
le_reject_cis(struct net_buf * buf,struct net_buf ** evt)2444 static void le_reject_cis(struct net_buf *buf, struct net_buf **evt)
2445 {
2446 	struct bt_hci_cp_le_reject_cis *cmd = (void *)buf->data;
2447 	struct bt_hci_rp_le_reject_cis *rp;
2448 	uint16_t handle;
2449 	uint8_t status;
2450 
2451 	handle = sys_le16_to_cpu(cmd->handle);
2452 	status = ll_cis_reject(handle, cmd->reason);
2453 
2454 	rp = hci_cmd_complete(evt, sizeof(*rp));
2455 	rp->status = status;
2456 	rp->handle = sys_cpu_to_le16(handle);
2457 }
2458 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
2459 
2460 #endif /* CONFIG_BT_PERIPHERAL */
2461 
2462 #if defined(CONFIG_BT_CONN)
2463 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
le_req_peer_sca(struct net_buf * buf,struct net_buf ** evt)2464 static void le_req_peer_sca(struct net_buf *buf, struct net_buf **evt)
2465 {
2466 	struct bt_hci_cp_le_req_peer_sca *cmd = (void *)buf->data;
2467 	uint16_t handle;
2468 	uint8_t status;
2469 
2470 	handle = sys_le16_to_cpu(cmd->handle);
2471 	status = ll_req_peer_sca(handle);
2472 
2473 	*evt = cmd_status(status);
2474 }
2475 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
2476 
2477 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
le_read_remote_features(struct net_buf * buf,struct net_buf ** evt)2478 static void le_read_remote_features(struct net_buf *buf, struct net_buf **evt)
2479 {
2480 	struct bt_hci_cp_le_read_remote_features *cmd = (void *)buf->data;
2481 	uint16_t handle;
2482 	uint8_t status;
2483 
2484 	handle = sys_le16_to_cpu(cmd->handle);
2485 	status = ll_feature_req_send(handle);
2486 
2487 	*evt = cmd_status(status);
2488 }
2489 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
2490 
le_read_chan_map(struct net_buf * buf,struct net_buf ** evt)2491 static void le_read_chan_map(struct net_buf *buf, struct net_buf **evt)
2492 {
2493 	struct bt_hci_cp_le_read_chan_map *cmd = (void *)buf->data;
2494 	struct bt_hci_rp_le_read_chan_map *rp;
2495 	uint16_t handle;
2496 	uint8_t status;
2497 
2498 	handle = sys_le16_to_cpu(cmd->handle);
2499 
2500 	rp = hci_cmd_complete(evt, sizeof(*rp));
2501 
2502 	status = ll_chm_get(handle, rp->ch_map);
2503 
2504 	rp->status = status;
2505 	rp->handle = sys_le16_to_cpu(handle);
2506 }
2507 
le_conn_update(struct net_buf * buf,struct net_buf ** evt)2508 static void le_conn_update(struct net_buf *buf, struct net_buf **evt)
2509 {
2510 	struct hci_cp_le_conn_update *cmd = (void *)buf->data;
2511 	uint16_t supervision_timeout;
2512 	uint16_t conn_interval_min;
2513 	uint16_t conn_interval_max;
2514 	uint16_t conn_latency;
2515 	uint16_t handle;
2516 	uint8_t status;
2517 
2518 	handle = sys_le16_to_cpu(cmd->handle);
2519 	conn_interval_min = sys_le16_to_cpu(cmd->conn_interval_min);
2520 	conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
2521 	conn_latency = sys_le16_to_cpu(cmd->conn_latency);
2522 	supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
2523 
2524 	status = ll_conn_update(handle, 0, 0, conn_interval_min,
2525 				conn_interval_max, conn_latency,
2526 				supervision_timeout, NULL);
2527 
2528 	*evt = cmd_status(status);
2529 }
2530 
2531 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
le_conn_param_req_reply(struct net_buf * buf,struct net_buf ** evt)2532 static void le_conn_param_req_reply(struct net_buf *buf, struct net_buf **evt)
2533 {
2534 	struct bt_hci_cp_le_conn_param_req_reply *cmd = (void *)buf->data;
2535 	struct bt_hci_rp_le_conn_param_req_reply *rp;
2536 	uint16_t interval_min;
2537 	uint16_t interval_max;
2538 	uint16_t latency;
2539 	uint16_t timeout;
2540 	uint16_t handle;
2541 	uint8_t status;
2542 
2543 	handle = sys_le16_to_cpu(cmd->handle);
2544 	interval_min = sys_le16_to_cpu(cmd->interval_min);
2545 	interval_max = sys_le16_to_cpu(cmd->interval_max);
2546 	latency = sys_le16_to_cpu(cmd->latency);
2547 	timeout = sys_le16_to_cpu(cmd->timeout);
2548 
2549 	status = ll_conn_update(handle, 2, 0, interval_min, interval_max,
2550 				latency, timeout, NULL);
2551 
2552 	rp = hci_cmd_complete(evt, sizeof(*rp));
2553 	rp->status = status;
2554 	rp->handle = sys_cpu_to_le16(handle);
2555 }
2556 
le_conn_param_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2557 static void le_conn_param_req_neg_reply(struct net_buf *buf,
2558 					struct net_buf **evt)
2559 {
2560 	struct bt_hci_cp_le_conn_param_req_neg_reply *cmd = (void *)buf->data;
2561 	struct bt_hci_rp_le_conn_param_req_neg_reply *rp;
2562 	uint16_t handle;
2563 	uint8_t status;
2564 
2565 	handle = sys_le16_to_cpu(cmd->handle);
2566 	status = ll_conn_update(handle, 2, cmd->reason, 0, 0, 0, 0, NULL);
2567 
2568 	rp = hci_cmd_complete(evt, sizeof(*rp));
2569 	rp->status = status;
2570 	rp->handle = sys_cpu_to_le16(handle);
2571 }
2572 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2573 
2574 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
le_set_data_len(struct net_buf * buf,struct net_buf ** evt)2575 static void le_set_data_len(struct net_buf *buf, struct net_buf **evt)
2576 {
2577 	struct bt_hci_cp_le_set_data_len *cmd = (void *)buf->data;
2578 	struct bt_hci_rp_le_set_data_len *rp;
2579 	uint16_t tx_octets;
2580 	uint16_t tx_time;
2581 	uint16_t handle;
2582 	uint8_t status;
2583 
2584 	handle = sys_le16_to_cpu(cmd->handle);
2585 	tx_octets = sys_le16_to_cpu(cmd->tx_octets);
2586 	tx_time = sys_le16_to_cpu(cmd->tx_time);
2587 	status = ll_length_req_send(handle, tx_octets, tx_time);
2588 
2589 	rp = hci_cmd_complete(evt, sizeof(*rp));
2590 	rp->status = status;
2591 	rp->handle = sys_cpu_to_le16(handle);
2592 }
2593 
le_read_default_data_len(struct net_buf * buf,struct net_buf ** evt)2594 static void le_read_default_data_len(struct net_buf *buf, struct net_buf **evt)
2595 {
2596 	struct bt_hci_rp_le_read_default_data_len *rp;
2597 	uint16_t max_tx_octets;
2598 	uint16_t max_tx_time;
2599 
2600 	rp = hci_cmd_complete(evt, sizeof(*rp));
2601 
2602 	ll_length_default_get(&max_tx_octets, &max_tx_time);
2603 
2604 	rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2605 	rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2606 	rp->status = 0x00;
2607 }
2608 
le_write_default_data_len(struct net_buf * buf,struct net_buf ** evt)2609 static void le_write_default_data_len(struct net_buf *buf,
2610 				      struct net_buf **evt)
2611 {
2612 	struct bt_hci_cp_le_write_default_data_len *cmd = (void *)buf->data;
2613 	uint16_t max_tx_octets;
2614 	uint16_t max_tx_time;
2615 	uint8_t status;
2616 
2617 	max_tx_octets = sys_le16_to_cpu(cmd->max_tx_octets);
2618 	max_tx_time = sys_le16_to_cpu(cmd->max_tx_time);
2619 	status = ll_length_default_set(max_tx_octets, max_tx_time);
2620 
2621 	*evt = cmd_complete_status(status);
2622 }
2623 
le_read_max_data_len(struct net_buf * buf,struct net_buf ** evt)2624 static void le_read_max_data_len(struct net_buf *buf, struct net_buf **evt)
2625 {
2626 	struct bt_hci_rp_le_read_max_data_len *rp;
2627 	uint16_t max_tx_octets;
2628 	uint16_t max_tx_time;
2629 	uint16_t max_rx_octets;
2630 	uint16_t max_rx_time;
2631 
2632 	rp = hci_cmd_complete(evt, sizeof(*rp));
2633 
2634 	ll_length_max_get(&max_tx_octets, &max_tx_time,
2635 			  &max_rx_octets, &max_rx_time);
2636 
2637 	rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2638 	rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2639 	rp->max_rx_octets = sys_cpu_to_le16(max_rx_octets);
2640 	rp->max_rx_time = sys_cpu_to_le16(max_rx_time);
2641 	rp->status = 0x00;
2642 }
2643 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2644 
2645 #if defined(CONFIG_BT_CTLR_PHY)
le_read_phy(struct net_buf * buf,struct net_buf ** evt)2646 static void le_read_phy(struct net_buf *buf, struct net_buf **evt)
2647 {
2648 	struct bt_hci_cp_le_read_phy *cmd = (void *)buf->data;
2649 	struct bt_hci_rp_le_read_phy *rp;
2650 	uint16_t handle;
2651 	uint8_t status;
2652 
2653 	handle = sys_le16_to_cpu(cmd->handle);
2654 
2655 	rp = hci_cmd_complete(evt, sizeof(*rp));
2656 
2657 	status = ll_phy_get(handle, &rp->tx_phy, &rp->rx_phy);
2658 
2659 	rp->status = status;
2660 	rp->handle = sys_cpu_to_le16(handle);
2661 	rp->tx_phy = find_lsb_set(rp->tx_phy);
2662 	rp->rx_phy = find_lsb_set(rp->rx_phy);
2663 }
2664 
le_set_default_phy(struct net_buf * buf,struct net_buf ** evt)2665 static void le_set_default_phy(struct net_buf *buf, struct net_buf **evt)
2666 {
2667 	struct bt_hci_cp_le_set_default_phy *cmd = (void *)buf->data;
2668 	uint8_t status;
2669 
2670 	if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2671 		cmd->tx_phys = 0x07;
2672 	}
2673 	if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2674 		cmd->rx_phys = 0x07;
2675 	}
2676 
2677 	status = ll_phy_default_set(cmd->tx_phys, cmd->rx_phys);
2678 
2679 	*evt = cmd_complete_status(status);
2680 }
2681 
le_set_phy(struct net_buf * buf,struct net_buf ** evt)2682 static void le_set_phy(struct net_buf *buf, struct net_buf **evt)
2683 {
2684 	struct bt_hci_cp_le_set_phy *cmd = (void *)buf->data;
2685 	uint16_t phy_opts;
2686 	uint8_t mask_phys;
2687 	uint16_t handle;
2688 	uint8_t status;
2689 
2690 	handle = sys_le16_to_cpu(cmd->handle);
2691 	phy_opts = sys_le16_to_cpu(cmd->phy_opts);
2692 
2693 	mask_phys = BT_HCI_LE_PHY_PREFER_1M;
2694 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_2M)) {
2695 		mask_phys |= BT_HCI_LE_PHY_PREFER_2M;
2696 	}
2697 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
2698 		mask_phys |= BT_HCI_LE_PHY_PREFER_CODED;
2699 	}
2700 
2701 	if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2702 		cmd->tx_phys |= mask_phys;
2703 	}
2704 	if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2705 		cmd->rx_phys |= mask_phys;
2706 	}
2707 
2708 	if ((cmd->tx_phys | cmd->rx_phys) & ~mask_phys) {
2709 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
2710 
2711 		return;
2712 	}
2713 
2714 	if (!(cmd->tx_phys & 0x07) ||
2715 	    !(cmd->rx_phys & 0x07)) {
2716 		*evt = cmd_status(BT_HCI_ERR_INVALID_PARAM);
2717 
2718 		return;
2719 	}
2720 
2721 	if (phy_opts & 0x03) {
2722 		phy_opts -= 1U;
2723 		phy_opts &= 1;
2724 	} else {
2725 		phy_opts = 0U;
2726 	}
2727 
2728 	status = ll_phy_req_send(handle, cmd->tx_phys, phy_opts,
2729 				 cmd->rx_phys);
2730 
2731 	*evt = cmd_status(status);
2732 }
2733 #endif /* CONFIG_BT_CTLR_PHY */
2734 #endif /* CONFIG_BT_CONN */
2735 
2736 #if defined(CONFIG_BT_CTLR_PRIVACY)
le_add_dev_to_rl(struct net_buf * buf,struct net_buf ** evt)2737 static void le_add_dev_to_rl(struct net_buf *buf, struct net_buf **evt)
2738 {
2739 	struct bt_hci_cp_le_add_dev_to_rl *cmd = (void *)buf->data;
2740 	uint8_t status;
2741 
2742 	status = ll_rl_add(&cmd->peer_id_addr, cmd->peer_irk, cmd->local_irk);
2743 
2744 	*evt = cmd_complete_status(status);
2745 }
2746 
le_rem_dev_from_rl(struct net_buf * buf,struct net_buf ** evt)2747 static void le_rem_dev_from_rl(struct net_buf *buf, struct net_buf **evt)
2748 {
2749 	struct bt_hci_cp_le_rem_dev_from_rl *cmd = (void *)buf->data;
2750 	uint8_t status;
2751 
2752 	status = ll_rl_remove(&cmd->peer_id_addr);
2753 
2754 	*evt = cmd_complete_status(status);
2755 }
2756 
le_clear_rl(struct net_buf * buf,struct net_buf ** evt)2757 static void le_clear_rl(struct net_buf *buf, struct net_buf **evt)
2758 {
2759 	uint8_t status;
2760 
2761 	status = ll_rl_clear();
2762 
2763 	*evt = cmd_complete_status(status);
2764 }
2765 
le_read_rl_size(struct net_buf * buf,struct net_buf ** evt)2766 static void le_read_rl_size(struct net_buf *buf, struct net_buf **evt)
2767 {
2768 	struct bt_hci_rp_le_read_rl_size *rp;
2769 
2770 	rp = hci_cmd_complete(evt, sizeof(*rp));
2771 
2772 	rp->rl_size = ll_rl_size_get();
2773 	rp->status = 0x00;
2774 }
2775 
le_read_peer_rpa(struct net_buf * buf,struct net_buf ** evt)2776 static void le_read_peer_rpa(struct net_buf *buf, struct net_buf **evt)
2777 {
2778 	struct bt_hci_cp_le_read_peer_rpa *cmd = (void *)buf->data;
2779 	struct bt_hci_rp_le_read_peer_rpa *rp;
2780 	bt_addr_le_t peer_id_addr;
2781 
2782 	bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2783 	rp = hci_cmd_complete(evt, sizeof(*rp));
2784 
2785 	rp->status = ll_rl_crpa_get(&peer_id_addr, &rp->peer_rpa);
2786 }
2787 
le_read_local_rpa(struct net_buf * buf,struct net_buf ** evt)2788 static void le_read_local_rpa(struct net_buf *buf, struct net_buf **evt)
2789 {
2790 	struct bt_hci_cp_le_read_local_rpa *cmd = (void *)buf->data;
2791 	struct bt_hci_rp_le_read_local_rpa *rp;
2792 	bt_addr_le_t peer_id_addr;
2793 
2794 	bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2795 	rp = hci_cmd_complete(evt, sizeof(*rp));
2796 
2797 	rp->status = ll_rl_lrpa_get(&peer_id_addr, &rp->local_rpa);
2798 }
2799 
le_set_addr_res_enable(struct net_buf * buf,struct net_buf ** evt)2800 static void le_set_addr_res_enable(struct net_buf *buf, struct net_buf **evt)
2801 {
2802 	struct bt_hci_cp_le_set_addr_res_enable *cmd = (void *)buf->data;
2803 	uint8_t status;
2804 
2805 	status = ll_rl_enable(cmd->enable);
2806 
2807 	*evt = cmd_complete_status(status);
2808 }
2809 
le_set_rpa_timeout(struct net_buf * buf,struct net_buf ** evt)2810 static void le_set_rpa_timeout(struct net_buf *buf, struct net_buf **evt)
2811 {
2812 	struct bt_hci_cp_le_set_rpa_timeout *cmd = (void *)buf->data;
2813 	uint16_t timeout = sys_le16_to_cpu(cmd->rpa_timeout);
2814 
2815 	ll_rl_timeout_set(timeout);
2816 
2817 	*evt = cmd_complete_status(0x00);
2818 }
2819 
le_set_privacy_mode(struct net_buf * buf,struct net_buf ** evt)2820 static void le_set_privacy_mode(struct net_buf *buf, struct net_buf **evt)
2821 {
2822 	struct bt_hci_cp_le_set_privacy_mode *cmd = (void *)buf->data;
2823 	uint8_t status;
2824 
2825 	status = ll_priv_mode_set(&cmd->id_addr, cmd->mode);
2826 
2827 	*evt = cmd_complete_status(status);
2828 }
2829 #endif /* CONFIG_BT_CTLR_PRIVACY */
2830 
le_read_tx_power(struct net_buf * buf,struct net_buf ** evt)2831 static void le_read_tx_power(struct net_buf *buf, struct net_buf **evt)
2832 {
2833 	struct bt_hci_rp_le_read_tx_power *rp;
2834 
2835 	rp = hci_cmd_complete(evt, sizeof(*rp));
2836 	rp->status = 0x00;
2837 	ll_tx_pwr_get(&rp->min_tx_power, &rp->max_tx_power);
2838 }
2839 
2840 #if defined(CONFIG_BT_CTLR_DF)
2841 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
le_df_set_cl_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)2842 static void le_df_set_cl_cte_tx_params(struct net_buf *buf,
2843 				       struct net_buf **evt)
2844 {
2845 	struct bt_hci_cp_le_set_cl_cte_tx_params *cmd = (void *)buf->data;
2846 	uint8_t adv_handle;
2847 	uint8_t status;
2848 
2849 	if (adv_cmds_ext_check(evt)) {
2850 		return;
2851 	}
2852 
2853 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &adv_handle);
2854 	if (status) {
2855 		*evt = cmd_complete_status(status);
2856 		return;
2857 	}
2858 
2859 	status = ll_df_set_cl_cte_tx_params(adv_handle, cmd->cte_len,
2860 					    cmd->cte_type, cmd->cte_count,
2861 					    cmd->switch_pattern_len,
2862 					    cmd->ant_ids);
2863 
2864 	*evt = cmd_complete_status(status);
2865 }
2866 
le_df_set_cl_cte_enable(struct net_buf * buf,struct net_buf ** evt)2867 static void le_df_set_cl_cte_enable(struct net_buf *buf, struct net_buf **evt)
2868 {
2869 	struct bt_hci_cp_le_set_cl_cte_tx_enable *cmd = (void *)buf->data;
2870 	uint8_t status;
2871 	uint8_t handle;
2872 
2873 	if (adv_cmds_ext_check(evt)) {
2874 		return;
2875 	}
2876 
2877 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
2878 	if (status) {
2879 		*evt = cmd_complete_status(status);
2880 		return;
2881 	}
2882 
2883 	status = ll_df_set_cl_cte_tx_enable(handle, cmd->cte_enable);
2884 
2885 	*evt = cmd_complete_status(status);
2886 }
2887 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2888 
2889 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
le_df_set_cl_iq_sampling_enable(struct net_buf * buf,struct net_buf ** evt)2890 static void le_df_set_cl_iq_sampling_enable(struct net_buf *buf, struct net_buf **evt)
2891 {
2892 	struct bt_hci_cp_le_set_cl_cte_sampling_enable *cmd = (void *)buf->data;
2893 	struct bt_hci_rp_le_set_cl_cte_sampling_enable *rp;
2894 	uint16_t sync_handle;
2895 	uint8_t status;
2896 
2897 	sync_handle = sys_le16_to_cpu(cmd->sync_handle);
2898 
2899 	status = ll_df_set_cl_iq_sampling_enable(sync_handle,
2900 						 cmd->sampling_enable,
2901 						 cmd->slot_durations,
2902 						 cmd->max_sampled_cte,
2903 						 cmd->switch_pattern_len,
2904 						 cmd->ant_ids);
2905 
2906 	rp = hci_cmd_complete(evt, sizeof(*rp));
2907 
2908 	rp->status = status;
2909 	rp->sync_handle = sys_cpu_to_le16(sync_handle);
2910 }
2911 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2912 
2913 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) ||      \
2914 	defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
iq_convert_12_to_8_bits(int16_t data)2915 static int8_t iq_convert_12_to_8_bits(int16_t data)
2916 {
2917 	if (data == IQ_SAMPLE_SATURATED_16_BIT) {
2918 		return IQ_SAMPLE_SATURATED_8_BIT;
2919 	}
2920 
2921 #if defined(CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB)
2922 	return (data > INT8_MAX || data < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2923 						    : IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2924 #else  /* !CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2925 	int16_t data_conv = IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2926 
2927 	return (data_conv > INT8_MAX || data_conv < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2928 							      : (int8_t)data_conv;
2929 #endif /* CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2930 }
2931 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT
2932 	* || CONFIG_BT_CTLR_DF_CONN_CTE_RX
2933 	*/
2934 
2935 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
le_df_connectionless_iq_report(struct pdu_data * pdu_rx,struct node_rx_pdu * node_rx,struct net_buf * buf)2936 static void le_df_connectionless_iq_report(struct pdu_data *pdu_rx,
2937 					   struct node_rx_pdu *node_rx,
2938 					   struct net_buf *buf)
2939 {
2940 	struct bt_hci_evt_le_connectionless_iq_report *sep;
2941 	struct node_rx_iq_report *iq_report;
2942 	struct lll_sync *lll;
2943 	uint8_t samples_cnt;
2944 	int16_t rssi;
2945 	uint16_t sync_handle;
2946 	uint16_t per_evt_counter;
2947 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
2948 	struct ll_sync_set *sync = NULL;
2949 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2950 
2951 	iq_report =  (struct node_rx_iq_report *)node_rx;
2952 
2953 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
2954 	    !(le_event_mask & BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT)) {
2955 		return;
2956 	}
2957 
2958 	lll = iq_report->hdr.rx_ftr.param;
2959 
2960 	/* If there is not LLL context and CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT is enabled
2961 	 * the controller is in the Direct Test Mode and may generate
2962 	 * the Connectionless IQ Report.
2963 	 */
2964 	if (!lll && IS_ENABLED(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)) {
2965 		/* Set sync_handle to 0x0FFF according to the BT Core 5.3 specification
2966 		 * Vol 4 7.7.65.21
2967 		 */
2968 		sync_handle = 0x0FFF;
2969 		/* Set periodic event counter to 0 since there is not periodic advertising train. */
2970 		per_evt_counter = 0;
2971 	}
2972 
2973 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
2974 	else {
2975 		sync = HDR_LLL2ULL(lll);
2976 
2977 		/* TX LL thread has higher priority than RX thread. It may happen that
2978 		 * host successfully disables CTE sampling in the meantime.
2979 		 * It should be verified here, to avoid reporting IQ samples after
2980 		 * the functionality was disabled or if sync was lost.
2981 		 */
2982 		if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) ||
2983 		    !sync->timeout_reload) {
2984 			/* Drop further processing of the event. */
2985 			return;
2986 		}
2987 
2988 		/* Get the sync handle corresponding to the LLL context passed in the
2989 		 * node rx footer field.
2990 		 */
2991 		sync_handle = ull_sync_handle_get(sync);
2992 		per_evt_counter = iq_report->event_counter;
2993 	}
2994 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2995 
2996 	/* If packet status does not indicate insufficient resources for IQ samples and for
2997 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
2998 	 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
2999 	 */
3000 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3001 		samples_cnt = 0U;
3002 	} else {
3003 		samples_cnt = MAX(1, iq_report->sample_count);
3004 	}
3005 
3006 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT,
3007 		       (sizeof(*sep) +
3008 			(samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3009 
3010 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->hdr.rx_ftr.rssi);
3011 
3012 
3013 	sep->sync_handle = sys_cpu_to_le16(sync_handle);
3014 	sep->rssi = sys_cpu_to_le16(rssi);
3015 	sep->rssi_ant_id = iq_report->rssi_ant_id;
3016 	sep->cte_type = iq_report->cte_info.type;
3017 
3018 	sep->chan_idx = iq_report->chan_idx;
3019 	sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
3020 
3021 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3022 		sep->slot_durations = iq_report->local_slot_durations;
3023 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3024 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3025 	} else {
3026 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3027 	}
3028 
3029 	sep->packet_status = iq_report->packet_status;
3030 
3031 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3032 		if (iq_report->sample_count == 0U) {
3033 			sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3034 			sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3035 		} else {
3036 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3037 				sep->sample[idx].i =
3038 					iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3039 				sep->sample[idx].q =
3040 					iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3041 			}
3042 		}
3043 	}
3044 
3045 	sep->sample_count = samples_cnt;
3046 }
3047 #endif /* defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) */
3048 
3049 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
le_df_set_conn_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)3050 static void le_df_set_conn_cte_tx_params(struct net_buf *buf,
3051 					 struct net_buf **evt)
3052 {
3053 	struct bt_hci_cp_le_set_conn_cte_tx_params *cmd = (void *)buf->data;
3054 	struct bt_hci_rp_le_set_conn_cte_tx_params *rp;
3055 	uint16_t handle, handle_le16;
3056 	uint8_t status;
3057 
3058 	handle_le16 = cmd->handle;
3059 	handle = sys_le16_to_cpu(handle_le16);
3060 
3061 	status = ll_df_set_conn_cte_tx_params(handle, cmd->cte_types,
3062 					      cmd->switch_pattern_len,
3063 					      cmd->ant_ids);
3064 
3065 	rp = hci_cmd_complete(evt, sizeof(*rp));
3066 
3067 	rp->status = status;
3068 	rp->handle = handle_le16;
3069 }
3070 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
3071 
3072 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
le_df_set_conn_cte_rx_params(struct net_buf * buf,struct net_buf ** evt)3073 static void le_df_set_conn_cte_rx_params(struct net_buf *buf, struct net_buf **evt)
3074 {
3075 	struct bt_hci_cp_le_set_conn_cte_rx_params *cmd = (void *)buf->data;
3076 	struct bt_hci_rp_le_set_conn_cte_rx_params *rp;
3077 	uint16_t handle, handle_le16;
3078 	uint8_t status;
3079 
3080 	handle_le16 = cmd->handle;
3081 	handle = sys_le16_to_cpu(handle_le16);
3082 
3083 	status = ll_df_set_conn_cte_rx_params(handle, cmd->sampling_enable, cmd->slot_durations,
3084 					      cmd->switch_pattern_len, cmd->ant_ids);
3085 
3086 	rp = hci_cmd_complete(evt, sizeof(*rp));
3087 
3088 	rp->status = status;
3089 	rp->handle = handle_le16;
3090 }
3091 
le_df_connection_iq_report(struct node_rx_pdu * node_rx,struct net_buf * buf)3092 static void le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
3093 {
3094 	struct bt_hci_evt_le_connection_iq_report *sep;
3095 	struct node_rx_iq_report *iq_report;
3096 	struct lll_conn *lll;
3097 	uint8_t samples_cnt;
3098 	uint8_t phy_rx;
3099 	int16_t rssi;
3100 
3101 	iq_report = (struct node_rx_iq_report *)node_rx;
3102 
3103 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3104 	    !(le_event_mask & BT_EVT_MASK_LE_CONNECTION_IQ_REPORT)) {
3105 		return;
3106 	}
3107 
3108 	lll = iq_report->hdr.rx_ftr.param;
3109 
3110 #if defined(CONFIG_BT_CTLR_PHY)
3111 	phy_rx = lll->phy_rx;
3112 
3113 	/* Make sure the report is generated for connection on PHY UNCODED */
3114 	LL_ASSERT(phy_rx != PHY_CODED);
3115 #else
3116 	phy_rx = PHY_1M;
3117 #endif /* CONFIG_BT_CTLR_PHY */
3118 
3119 	/* TX LL thread has higher priority than RX thread. It may happen that host succefully
3120 	 * disables CTE sampling in the meantime. It should be verified here, to avoid reporing
3121 	 * IQ samples after the functionality was disabled.
3122 	 */
3123 	if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
3124 		/* Dropp further processing of the event. */
3125 		return;
3126 	}
3127 
3128 	/* If packet status does not indicate insufficient resources for IQ samples and for
3129 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3130 	 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3131 	 */
3132 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3133 		samples_cnt = 0;
3134 	} else {
3135 		samples_cnt = MAX(1, iq_report->sample_count);
3136 	}
3137 
3138 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTION_IQ_REPORT,
3139 		       (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3140 
3141 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->hdr.rx_ftr.rssi);
3142 
3143 	sep->conn_handle = sys_cpu_to_le16(iq_report->hdr.handle);
3144 	sep->rx_phy = phy_rx;
3145 	sep->rssi = sys_cpu_to_le16(rssi);
3146 	sep->rssi_ant_id = iq_report->rssi_ant_id;
3147 	sep->cte_type = iq_report->cte_info.type;
3148 
3149 	sep->data_chan_idx = iq_report->chan_idx;
3150 	sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
3151 
3152 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3153 		sep->slot_durations = iq_report->local_slot_durations;
3154 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3155 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3156 	} else {
3157 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3158 	}
3159 
3160 	sep->packet_status = iq_report->packet_status;
3161 
3162 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3163 		if (iq_report->sample_count == 0U) {
3164 			sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3165 			sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3166 		} else {
3167 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3168 				sep->sample[idx].i =
3169 					iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3170 				sep->sample[idx].q =
3171 					iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3172 			}
3173 		}
3174 	}
3175 
3176 	sep->sample_count = samples_cnt;
3177 }
3178 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
3179 
3180 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
le_df_set_conn_cte_req_enable(struct net_buf * buf,struct net_buf ** evt)3181 static void le_df_set_conn_cte_req_enable(struct net_buf *buf, struct net_buf **evt)
3182 {
3183 	struct bt_hci_cp_le_conn_cte_req_enable *cmd = (void *)buf->data;
3184 	struct bt_hci_rp_le_conn_cte_req_enable *rp;
3185 	uint16_t handle, handle_le16;
3186 	uint8_t status;
3187 
3188 	handle_le16 = cmd->handle;
3189 	handle = sys_le16_to_cpu(handle_le16);
3190 
3191 	status = ll_df_set_conn_cte_req_enable(handle, cmd->enable,
3192 					       sys_le16_to_cpu(cmd->cte_request_interval),
3193 					       cmd->requested_cte_length, cmd->requested_cte_type);
3194 	rp = hci_cmd_complete(evt, sizeof(*rp));
3195 
3196 	rp->status = status;
3197 	rp->handle = handle_le16;
3198 }
3199 
le_df_cte_req_failed(uint8_t error_code,uint16_t handle,struct net_buf * buf)3200 static void le_df_cte_req_failed(uint8_t error_code, uint16_t handle, struct net_buf *buf)
3201 {
3202 	struct bt_hci_evt_le_cte_req_failed *sep;
3203 
3204 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3205 	    !(le_event_mask & BT_EVT_MASK_LE_CTE_REQUEST_FAILED)) {
3206 		return;
3207 	}
3208 
3209 	sep = meta_evt(buf, BT_HCI_EVT_LE_CTE_REQUEST_FAILED, sizeof(*sep));
3210 
3211 	sep->status = error_code;
3212 	sep->conn_handle = sys_cpu_to_le16(handle);
3213 }
3214 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
3215 
3216 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
le_df_set_conn_cte_rsp_enable(struct net_buf * buf,struct net_buf ** evt)3217 static void le_df_set_conn_cte_rsp_enable(struct net_buf *buf, struct net_buf **evt)
3218 {
3219 	struct bt_hci_cp_le_conn_cte_rsp_enable *cmd = (void *)buf->data;
3220 	struct bt_hci_rp_le_conn_cte_rsp_enable *rp;
3221 	uint16_t handle, handle_le16;
3222 	uint8_t status;
3223 
3224 	handle_le16 = cmd->handle;
3225 	handle = sys_le16_to_cpu(handle_le16);
3226 
3227 	status = ll_df_set_conn_cte_rsp_enable(handle, cmd->enable);
3228 	rp = hci_cmd_complete(evt, sizeof(*rp));
3229 
3230 	rp->status = status;
3231 	rp->handle = handle_le16;
3232 }
3233 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
3234 
le_df_read_ant_inf(struct net_buf * buf,struct net_buf ** evt)3235 static void le_df_read_ant_inf(struct net_buf *buf, struct net_buf **evt)
3236 {
3237 	struct bt_hci_rp_le_read_ant_info *rp;
3238 	uint8_t max_switch_pattern_len;
3239 	uint8_t switch_sample_rates;
3240 	uint8_t max_cte_len;
3241 	uint8_t num_ant;
3242 
3243 	ll_df_read_ant_inf(&switch_sample_rates, &num_ant,
3244 			   &max_switch_pattern_len, &max_cte_len);
3245 
3246 	rp = hci_cmd_complete(evt, sizeof(*rp));
3247 
3248 	rp->max_switch_pattern_len = max_switch_pattern_len;
3249 	rp->switch_sample_rates = switch_sample_rates;
3250 	rp->max_cte_len = max_cte_len;
3251 	rp->num_ant = num_ant;
3252 	rp->status = 0x00;
3253 }
3254 #endif /* CONFIG_BT_CTLR_DF */
3255 
3256 #if defined(CONFIG_BT_CTLR_DTM_HCI)
le_rx_test(struct net_buf * buf,struct net_buf ** evt)3257 static void le_rx_test(struct net_buf *buf, struct net_buf **evt)
3258 {
3259 	struct bt_hci_cp_le_rx_test *cmd = (void *)buf->data;
3260 	uint8_t status;
3261 
3262 	status = ll_test_rx(cmd->rx_ch, BT_HCI_LE_RX_PHY_1M, BT_HCI_LE_MOD_INDEX_STANDARD,
3263 			    BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3264 			    BT_HCI_LE_TEST_SLOT_DURATION_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3265 			    NULL);
3266 
3267 	*evt = cmd_complete_status(status);
3268 }
3269 
le_tx_test(struct net_buf * buf,struct net_buf ** evt)3270 static void le_tx_test(struct net_buf *buf, struct net_buf **evt)
3271 {
3272 	struct bt_hci_cp_le_tx_test *cmd = (void *)buf->data;
3273 	uint8_t status;
3274 
3275 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload,
3276 			    BT_HCI_LE_TX_PHY_1M, BT_HCI_LE_TEST_CTE_DISABLED,
3277 			    BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3278 			    NULL, BT_HCI_TX_TEST_POWER_MAX_SET);
3279 
3280 	*evt = cmd_complete_status(status);
3281 }
3282 
le_test_end(struct net_buf * buf,struct net_buf ** evt)3283 static void le_test_end(struct net_buf *buf, struct net_buf **evt)
3284 {
3285 	struct bt_hci_rp_le_test_end *rp;
3286 	uint16_t rx_pkt_count;
3287 	uint8_t status;
3288 
3289 	status = ll_test_end(&rx_pkt_count);
3290 
3291 	rp = hci_cmd_complete(evt, sizeof(*rp));
3292 	rp->status = status;
3293 	rp->rx_pkt_count = sys_cpu_to_le16(rx_pkt_count);
3294 }
3295 
le_enh_rx_test(struct net_buf * buf,struct net_buf ** evt)3296 static void le_enh_rx_test(struct net_buf *buf, struct net_buf **evt)
3297 {
3298 	struct bt_hci_cp_le_enh_rx_test *cmd = (void *)buf->data;
3299 	uint8_t status;
3300 
3301 	status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, BT_HCI_LE_TEST_CTE_DISABLED,
3302 			    BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SLOT_DURATION_ANY,
3303 			    BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL);
3304 
3305 	*evt = cmd_complete_status(status);
3306 }
3307 
3308 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
le_rx_test_v3(struct net_buf * buf,struct net_buf ** evt)3309 static void le_rx_test_v3(struct net_buf *buf, struct net_buf **evt)
3310 {
3311 	struct bt_hci_cp_le_rx_test_v3 *cmd = (void *)buf->data;
3312 	uint8_t status;
3313 
3314 	status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, cmd->expected_cte_len,
3315 			    cmd->expected_cte_type, cmd->slot_durations, cmd->switch_pattern_len,
3316 			    cmd->ant_ids);
3317 
3318 	*evt = cmd_complete_status(status);
3319 }
3320 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
3321 
le_enh_tx_test(struct net_buf * buf,struct net_buf ** evt)3322 static void le_enh_tx_test(struct net_buf *buf, struct net_buf **evt)
3323 {
3324 	struct bt_hci_cp_le_enh_tx_test *cmd = (void *)buf->data;
3325 	uint8_t status;
3326 
3327 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3328 			    BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3329 			    BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL,
3330 			    BT_HCI_TX_TEST_POWER_MAX_SET);
3331 
3332 	*evt = cmd_complete_status(status);
3333 }
3334 
3335 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
le_tx_test_v3(struct net_buf * buf,struct net_buf ** evt)3336 static void le_tx_test_v3(struct net_buf *buf, struct net_buf **evt)
3337 {
3338 	struct bt_hci_cp_le_tx_test_v3 *cmd = (void *)buf->data;
3339 	uint8_t status;
3340 
3341 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3342 			    cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3343 			    BT_HCI_TX_TEST_POWER_MAX_SET);
3344 
3345 	*evt = cmd_complete_status(status);
3346 }
3347 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
3348 
3349 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
le_tx_test_v4(struct net_buf * buf,struct net_buf ** evt)3350 static void le_tx_test_v4(struct net_buf *buf, struct net_buf **evt)
3351 {
3352 	struct bt_hci_cp_le_tx_test_v4 *cmd = (void *)buf->data;
3353 	struct bt_hci_cp_le_tx_test_v4_tx_power *tx_power = (void *)(buf->data +
3354 			sizeof(struct bt_hci_cp_le_tx_test_v4) + cmd->switch_pattern_len);
3355 	uint8_t status;
3356 
3357 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3358 			    cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3359 			    tx_power->tx_power);
3360 
3361 	*evt = cmd_complete_status(status);
3362 }
3363 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
3364 #endif /* CONFIG_BT_CTLR_DTM_HCI */
3365 
3366 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3367 #if defined(CONFIG_BT_BROADCASTER)
3368 
le_set_adv_set_random_addr(struct net_buf * buf,struct net_buf ** evt)3369 static void le_set_adv_set_random_addr(struct net_buf *buf,
3370 				       struct net_buf **evt)
3371 {
3372 	struct bt_hci_cp_le_set_adv_set_random_addr *cmd = (void *)buf->data;
3373 	uint8_t status;
3374 	uint8_t handle;
3375 
3376 	if (adv_cmds_ext_check(evt)) {
3377 		return;
3378 	}
3379 
3380 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3381 	if (status) {
3382 		*evt = cmd_complete_status(status);
3383 		return;
3384 	}
3385 
3386 	status = ll_adv_aux_random_addr_set(handle, &cmd->bdaddr.val[0]);
3387 
3388 	*evt = cmd_complete_status(status);
3389 }
3390 
le_set_ext_adv_param(struct net_buf * buf,struct net_buf ** evt)3391 static void le_set_ext_adv_param(struct net_buf *buf, struct net_buf **evt)
3392 {
3393 	struct bt_hci_cp_le_set_ext_adv_param *cmd = (void *)buf->data;
3394 	struct bt_hci_rp_le_set_ext_adv_param *rp;
3395 	uint32_t min_interval;
3396 	uint16_t evt_prop;
3397 	uint8_t tx_pwr;
3398 	uint8_t status;
3399 	uint8_t phy_p;
3400 	uint8_t phy_s;
3401 	uint8_t handle;
3402 
3403 	if (adv_cmds_ext_check(evt)) {
3404 		return;
3405 	}
3406 
3407 	if (cmd->handle > BT_HCI_LE_ADV_HANDLE_MAX) {
3408 		*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3409 		return;
3410 	}
3411 
3412 	min_interval = sys_get_le24(cmd->prim_min_interval);
3413 
3414 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3415 		const uint32_t max_interval =
3416 					sys_get_le24(cmd->prim_max_interval);
3417 
3418 		/* Compare advertising interval maximum with implementation
3419 		 * supported advertising interval maximum value defined in the
3420 		 * Kconfig CONFIG_BT_CTLR_ADV_INTERVAL_MAX.
3421 		 */
3422 		if ((min_interval > max_interval) ||
3423 		    (min_interval < BT_HCI_LE_PRIM_ADV_INTERVAL_MIN) ||
3424 		    (max_interval > CONFIG_BT_CTLR_ADV_INTERVAL_MAX)) {
3425 			*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3426 			return;
3427 		}
3428 	}
3429 
3430 	status = ll_adv_set_by_hci_handle_get_or_new(cmd->handle, &handle);
3431 	if (status) {
3432 		*evt = cmd_complete_status(status);
3433 		return;
3434 	}
3435 
3436 	evt_prop = sys_le16_to_cpu(cmd->props);
3437 	tx_pwr = cmd->tx_power;
3438 	phy_p = BIT(cmd->prim_adv_phy - 1);
3439 	phy_s = BIT(cmd->sec_adv_phy - 1);
3440 
3441 	status = ll_adv_params_set(handle, evt_prop, min_interval,
3442 				   PDU_ADV_TYPE_EXT_IND, cmd->own_addr_type,
3443 				   cmd->peer_addr.type, cmd->peer_addr.a.val,
3444 				   cmd->prim_channel_map, cmd->filter_policy,
3445 				   &tx_pwr, phy_p, cmd->sec_adv_max_skip, phy_s,
3446 				   cmd->sid, cmd->scan_req_notify_enable);
3447 
3448 	rp = hci_cmd_complete(evt, sizeof(*rp));
3449 	rp->status = status;
3450 	rp->tx_power = tx_pwr;
3451 }
3452 
le_set_ext_adv_data(struct net_buf * buf,struct net_buf ** evt)3453 static void le_set_ext_adv_data(struct net_buf *buf, struct net_buf **evt)
3454 {
3455 	struct bt_hci_cp_le_set_ext_adv_data *cmd = (void *)buf->data;
3456 	uint8_t status;
3457 	uint8_t handle;
3458 
3459 	if (adv_cmds_ext_check(evt)) {
3460 		return;
3461 	}
3462 
3463 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3464 	if (status) {
3465 		*evt = cmd_complete_status(status);
3466 		return;
3467 	}
3468 
3469 	status = ll_adv_aux_ad_data_set(handle, cmd->op, cmd->frag_pref,
3470 					cmd->len, cmd->data);
3471 
3472 	*evt = cmd_complete_status(status);
3473 }
3474 
le_set_ext_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)3475 static void le_set_ext_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
3476 {
3477 	struct bt_hci_cp_le_set_ext_scan_rsp_data *cmd = (void *)buf->data;
3478 	uint8_t status;
3479 	uint8_t handle;
3480 
3481 	if (adv_cmds_ext_check(evt)) {
3482 		return;
3483 	}
3484 
3485 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3486 	if (status) {
3487 		*evt = cmd_complete_status(status);
3488 		return;
3489 	}
3490 
3491 	status = ll_adv_aux_sr_data_set(handle, cmd->op, cmd->frag_pref,
3492 					cmd->len, cmd->data);
3493 
3494 	*evt = cmd_complete_status(status);
3495 }
3496 
le_set_ext_adv_enable(struct net_buf * buf,struct net_buf ** evt)3497 static void le_set_ext_adv_enable(struct net_buf *buf, struct net_buf **evt)
3498 {
3499 	struct bt_hci_cp_le_set_ext_adv_enable *cmd = (void *)buf->data;
3500 	struct bt_hci_ext_adv_set *s;
3501 	uint8_t set_num;
3502 	uint8_t enable;
3503 	uint8_t status;
3504 	uint8_t handle;
3505 
3506 	if (adv_cmds_ext_check(evt)) {
3507 		return;
3508 	}
3509 
3510 	set_num = cmd->set_num;
3511 	if (!set_num) {
3512 		if (cmd->enable) {
3513 			*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3514 			return;
3515 		}
3516 
3517 		status = ll_adv_disable_all();
3518 
3519 		*evt = cmd_complete_status(status);
3520 
3521 		return;
3522 	}
3523 
3524 	s = (void *) cmd->s;
3525 	enable = cmd->enable;
3526 	do {
3527 		status = ll_adv_set_by_hci_handle_get(s->handle, &handle);
3528 		if (status) {
3529 			break;
3530 		}
3531 
3532 		/* TODO: duration and events parameter use. */
3533 #if defined(CONFIG_BT_HCI_MESH_EXT)
3534 		status = ll_adv_enable(handle, cmd->enable, 0, 0, 0, 0, 0);
3535 #else /* !CONFIG_BT_HCI_MESH_EXT */
3536 		status = ll_adv_enable(handle, cmd->enable,
3537 				       sys_le16_to_cpu(s->duration), s->max_ext_adv_evts);
3538 #endif /* !CONFIG_BT_HCI_MESH_EXT */
3539 		if (status) {
3540 			/* TODO: how to handle succeeded ones before this
3541 			 * error.
3542 			 */
3543 			break;
3544 		}
3545 
3546 		s++;
3547 	} while (--set_num);
3548 
3549 	*evt = cmd_complete_status(status);
3550 }
3551 
le_read_max_adv_data_len(struct net_buf * buf,struct net_buf ** evt)3552 static void le_read_max_adv_data_len(struct net_buf *buf, struct net_buf **evt)
3553 {
3554 	struct bt_hci_rp_le_read_max_adv_data_len *rp;
3555 	uint16_t max_adv_data_len;
3556 
3557 	if (adv_cmds_ext_check(evt)) {
3558 		return;
3559 	}
3560 
3561 	rp = hci_cmd_complete(evt, sizeof(*rp));
3562 
3563 	max_adv_data_len = ll_adv_aux_max_data_length_get();
3564 
3565 	rp->max_adv_data_len = sys_cpu_to_le16(max_adv_data_len);
3566 	rp->status = 0x00;
3567 }
3568 
le_read_num_adv_sets(struct net_buf * buf,struct net_buf ** evt)3569 static void le_read_num_adv_sets(struct net_buf *buf, struct net_buf **evt)
3570 {
3571 	struct bt_hci_rp_le_read_num_adv_sets *rp;
3572 
3573 	if (adv_cmds_ext_check(evt)) {
3574 		return;
3575 	}
3576 
3577 	rp = hci_cmd_complete(evt, sizeof(*rp));
3578 
3579 	rp->num_sets = ll_adv_aux_set_count_get();
3580 	rp->status = 0x00;
3581 }
3582 
le_remove_adv_set(struct net_buf * buf,struct net_buf ** evt)3583 static void le_remove_adv_set(struct net_buf *buf, struct net_buf **evt)
3584 {
3585 	struct bt_hci_cp_le_remove_adv_set *cmd = (void *)buf->data;
3586 	uint8_t status;
3587 	uint8_t handle;
3588 
3589 	if (adv_cmds_ext_check(evt)) {
3590 		return;
3591 	}
3592 
3593 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3594 	if (status) {
3595 		*evt = cmd_complete_status(status);
3596 		return;
3597 	}
3598 
3599 	status = ll_adv_aux_set_remove(handle);
3600 
3601 	*evt = cmd_complete_status(status);
3602 }
3603 
le_clear_adv_sets(struct net_buf * buf,struct net_buf ** evt)3604 static void le_clear_adv_sets(struct net_buf *buf, struct net_buf **evt)
3605 {
3606 	uint8_t status;
3607 
3608 	if (adv_cmds_ext_check(evt)) {
3609 		return;
3610 	}
3611 
3612 	status = ll_adv_aux_set_clear();
3613 
3614 	*evt = cmd_complete_status(status);
3615 }
3616 
3617 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
le_set_per_adv_param(struct net_buf * buf,struct net_buf ** evt)3618 static void le_set_per_adv_param(struct net_buf *buf, struct net_buf **evt)
3619 {
3620 	struct bt_hci_cp_le_set_per_adv_param *cmd = (void *)buf->data;
3621 	uint16_t max_interval;
3622 	uint16_t flags;
3623 	uint8_t status;
3624 	uint8_t handle;
3625 
3626 	if (adv_cmds_ext_check(evt)) {
3627 		return;
3628 	}
3629 
3630 	max_interval = sys_le16_to_cpu(cmd->max_interval);
3631 
3632 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3633 		const uint32_t min_interval =
3634 					sys_le16_to_cpu(cmd->min_interval);
3635 
3636 		/* Compare periodic advertising interval maximum with
3637 		 * implementation supported periodic advertising interval
3638 		 * maximum value defined in the Kconfig
3639 		 * CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX.
3640 		 */
3641 		if ((min_interval > max_interval) ||
3642 		    (min_interval < BT_HCI_LE_PER_ADV_INTERVAL_MIN) ||
3643 		    (max_interval > CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX)) {
3644 			*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3645 			return;
3646 		}
3647 	}
3648 
3649 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3650 	if (status) {
3651 		*evt = cmd_complete_status(status);
3652 		return;
3653 	}
3654 
3655 	flags = sys_le16_to_cpu(cmd->props);
3656 
3657 	status = ll_adv_sync_param_set(handle, max_interval, flags);
3658 
3659 	*evt = cmd_complete_status(status);
3660 }
3661 
le_set_per_adv_data(struct net_buf * buf,struct net_buf ** evt)3662 static void le_set_per_adv_data(struct net_buf *buf, struct net_buf **evt)
3663 {
3664 	struct bt_hci_cp_le_set_per_adv_data *cmd = (void *)buf->data;
3665 	uint8_t status;
3666 	uint8_t handle;
3667 
3668 	if (adv_cmds_ext_check(evt)) {
3669 		return;
3670 	}
3671 
3672 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3673 	if (status) {
3674 		*evt = cmd_complete_status(status);
3675 		return;
3676 	}
3677 
3678 	status = ll_adv_sync_ad_data_set(handle, cmd->op, cmd->len,
3679 					 cmd->data);
3680 
3681 	*evt = cmd_complete_status(status);
3682 }
3683 
le_set_per_adv_enable(struct net_buf * buf,struct net_buf ** evt)3684 static void le_set_per_adv_enable(struct net_buf *buf, struct net_buf **evt)
3685 {
3686 	struct bt_hci_cp_le_set_per_adv_enable *cmd = (void *)buf->data;
3687 	uint8_t status;
3688 	uint8_t handle;
3689 
3690 	if (adv_cmds_ext_check(evt)) {
3691 		return;
3692 	}
3693 
3694 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3695 	if (status) {
3696 		*evt = cmd_complete_status(status);
3697 		return;
3698 	}
3699 
3700 	status = ll_adv_sync_enable(handle, cmd->enable);
3701 
3702 	*evt = cmd_complete_status(status);
3703 }
3704 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
3705 #endif /* CONFIG_BT_BROADCASTER */
3706 
3707 #if defined(CONFIG_BT_OBSERVER)
le_set_ext_scan_param(struct net_buf * buf,struct net_buf ** evt)3708 static void le_set_ext_scan_param(struct net_buf *buf, struct net_buf **evt)
3709 {
3710 	struct bt_hci_cp_le_set_ext_scan_param *cmd = (void *)buf->data;
3711 	struct bt_hci_ext_scan_phy *p;
3712 	uint8_t own_addr_type;
3713 	uint8_t filter_policy;
3714 	uint8_t phys_bitmask;
3715 	uint8_t status;
3716 	uint8_t phys;
3717 
3718 	if (adv_cmds_ext_check(evt)) {
3719 		return;
3720 	}
3721 
3722 	/* Number of bits set indicate scan sets to be configured by calling
3723 	 * ll_scan_params_set function.
3724 	 */
3725 	phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
3726 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
3727 		phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
3728 	}
3729 
3730 	phys = cmd->phys;
3731 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
3732 	    (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
3733 		*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3734 
3735 		return;
3736 	}
3737 
3738 	own_addr_type = cmd->own_addr_type;
3739 	filter_policy = cmd->filter_policy;
3740 	p = cmd->p;
3741 
3742 	/* Irrespective of enabled PHYs to scan for, ll_scan_params_set needs
3743 	 * to be called to initialise the scan sets.
3744 	 * Passing interval and window as 0, disable the particular scan set
3745 	 * from being enabled.
3746 	 */
3747 	do {
3748 		uint16_t interval;
3749 		uint16_t window;
3750 		uint8_t type;
3751 		uint8_t phy;
3752 
3753 		/* Get single PHY bit from the loop bitmask */
3754 		phy = BIT(find_lsb_set(phys_bitmask) - 1);
3755 
3756 		/* Pass the PHY (1M or Coded) of scan set in MSbits of type
3757 		 * parameter
3758 		 */
3759 		type = (phy << 1);
3760 
3761 		/* If current PHY is one of the PHY in the Scanning_PHYs,
3762 		 * pick the supplied scan type, interval and window.
3763 		 */
3764 		if (phys & phy) {
3765 			type |= (p->type & 0x01);
3766 			interval = sys_le16_to_cpu(p->interval);
3767 			window = sys_le16_to_cpu(p->window);
3768 			p++;
3769 		} else {
3770 			interval = 0U;
3771 			window = 0U;
3772 		}
3773 
3774 		status = ll_scan_params_set(type, interval, window,
3775 					    own_addr_type, filter_policy);
3776 		if (status) {
3777 			break;
3778 		}
3779 
3780 		phys_bitmask &= (phys_bitmask - 1);
3781 	} while (phys_bitmask);
3782 
3783 	*evt = cmd_complete_status(status);
3784 }
3785 
le_set_ext_scan_enable(struct net_buf * buf,struct net_buf ** evt)3786 static void le_set_ext_scan_enable(struct net_buf *buf, struct net_buf **evt)
3787 {
3788 	struct bt_hci_cp_le_set_ext_scan_enable *cmd = (void *)buf->data;
3789 	uint8_t status;
3790 
3791 	if (adv_cmds_ext_check(evt)) {
3792 		return;
3793 	}
3794 
3795 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3796 	/* Initialize duplicate filtering */
3797 	if (cmd->enable && cmd->filter_dup) {
3798 		if (0) {
3799 
3800 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3801 		} else if (dup_count == DUP_FILTER_DISABLED) {
3802 			dup_scan = true;
3803 
3804 			/* All entries reset */
3805 			dup_count = 0;
3806 			dup_curr = 0U;
3807 		} else if (!dup_scan) {
3808 			dup_scan = true;
3809 			dup_ext_adv_reset();
3810 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3811 
3812 		} else {
3813 			/* All entries reset */
3814 			dup_count = 0;
3815 			dup_curr = 0U;
3816 		}
3817 	} else {
3818 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3819 		dup_scan = false;
3820 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3821 		dup_count = DUP_FILTER_DISABLED;
3822 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3823 	}
3824 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
3825 
3826 	status = ll_scan_enable(cmd->enable, cmd->duration, cmd->period);
3827 
3828 	/* NOTE: As filter duplicates is implemented here in HCI source code,
3829 	 *       enabling of already enabled scanning shall succeed after
3830 	 *       updates to filter duplicates is handled in the above
3831 	 *       statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
3832 	 */
3833 	if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
3834 	    (status == BT_HCI_ERR_CMD_DISALLOWED)) {
3835 		status = BT_HCI_ERR_SUCCESS;
3836 	}
3837 
3838 	*evt = cmd_complete_status(status);
3839 }
3840 
3841 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
le_per_adv_create_sync(struct net_buf * buf,struct net_buf ** evt)3842 static void le_per_adv_create_sync(struct net_buf *buf, struct net_buf **evt)
3843 {
3844 	struct bt_hci_cp_le_per_adv_create_sync *cmd = (void *)buf->data;
3845 	uint16_t sync_timeout;
3846 	uint8_t status;
3847 	uint16_t skip;
3848 
3849 	if (adv_cmds_ext_check(NULL)) {
3850 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
3851 		return;
3852 	}
3853 
3854 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
3855 	    (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST)) {
3856 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3857 		return;
3858 	}
3859 
3860 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
3861 	    (cmd->options &
3862 	     (BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED |
3863 	      BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE)) ==
3864 	    BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3865 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3866 		return;
3867 	}
3868 
3869 	/* FIXME: Check for HCI LE Set Periodic Advertising Receive Enable
3870 	 * command support and if reporting is initially disabled then
3871 	 * return error code Connection Failed to be Established /
3872 	 * Synchronization Timeout (0x3E).
3873 	 */
3874 
3875 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3876 	/* Initialize duplicate filtering */
3877 	if (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3878 		if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
3879 			dup_count = 0;
3880 			dup_curr = 0U;
3881 		} else {
3882 			/* NOTE: Invalidate dup_ext_adv_mode array entries is
3883 			 *       done when sync is established.
3884 			 */
3885 		}
3886 	} else if (!dup_scan) {
3887 		dup_count = DUP_FILTER_DISABLED;
3888 	}
3889 #endif
3890 
3891 	skip = sys_le16_to_cpu(cmd->skip);
3892 	sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
3893 
3894 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
3895 	if ((cmd->cte_type & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_INVALID_VALUE) != 0) {
3896 		status = BT_HCI_ERR_CMD_DISALLOWED;
3897 #else
3898 	if (cmd->cte_type != BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_FILTERING) {
3899 		status = BT_HCI_ERR_INVALID_PARAM;
3900 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
3901 	} else {
3902 		status = ll_sync_create(cmd->options, cmd->sid, cmd->addr.type, cmd->addr.a.val,
3903 					skip, sync_timeout, cmd->cte_type);
3904 	}
3905 	*evt = cmd_status(status);
3906 }
3907 
3908 static void le_per_adv_create_sync_cancel(struct net_buf *buf,
3909 					  struct net_buf **evt, void **node_rx)
3910 {
3911 	struct bt_hci_evt_cc_status *ccst;
3912 	uint8_t status;
3913 
3914 	if (adv_cmds_ext_check(evt)) {
3915 		return;
3916 	}
3917 
3918 	status = ll_sync_create_cancel(node_rx);
3919 
3920 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
3921 	ccst->status = status;
3922 }
3923 
3924 static void le_per_adv_terminate_sync(struct net_buf *buf, struct net_buf **evt)
3925 {
3926 	struct bt_hci_cp_le_per_adv_terminate_sync *cmd = (void *)buf->data;
3927 	struct bt_hci_evt_cc_status *ccst;
3928 	uint16_t handle;
3929 	uint8_t status;
3930 
3931 	if (adv_cmds_ext_check(evt)) {
3932 		return;
3933 	}
3934 
3935 	handle = sys_le16_to_cpu(cmd->handle);
3936 
3937 	status = ll_sync_terminate(handle);
3938 
3939 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
3940 	ccst->status = status;
3941 }
3942 
3943 static void le_per_adv_recv_enable(struct net_buf *buf, struct net_buf **evt)
3944 {
3945 	struct bt_hci_cp_le_set_per_adv_recv_enable *cmd = (void *)buf->data;
3946 	struct bt_hci_evt_cc_status *ccst;
3947 	uint16_t handle;
3948 	uint8_t status;
3949 
3950 	if (adv_cmds_ext_check(evt)) {
3951 		return;
3952 	}
3953 
3954 	handle = sys_le16_to_cpu(cmd->handle);
3955 
3956 	status = ll_sync_recv_enable(handle, cmd->enable);
3957 
3958 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3959 	if (!status) {
3960 		if (cmd->enable &
3961 		    BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) {
3962 			if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
3963 				dup_count = 0;
3964 				dup_curr = 0U;
3965 			} else {
3966 				/* NOTE: Invalidate dup_ext_adv_mode array
3967 				 *       entries is done when sync is
3968 				 *       established.
3969 				 */
3970 			}
3971 		} else if (!dup_scan) {
3972 			dup_count = DUP_FILTER_DISABLED;
3973 		}
3974 	}
3975 #endif
3976 
3977 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
3978 	ccst->status = status;
3979 }
3980 
3981 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
3982 static void le_add_dev_to_pal(struct net_buf *buf, struct net_buf **evt)
3983 {
3984 	struct bt_hci_cp_le_add_dev_to_per_adv_list *cmd = (void *)buf->data;
3985 	uint8_t status;
3986 
3987 	if (adv_cmds_ext_check(evt)) {
3988 		return;
3989 	}
3990 
3991 	status = ll_pal_add(&cmd->addr, cmd->sid);
3992 
3993 	*evt = cmd_complete_status(status);
3994 }
3995 
3996 static void le_rem_dev_from_pal(struct net_buf *buf, struct net_buf **evt)
3997 {
3998 	struct bt_hci_cp_le_rem_dev_from_per_adv_list *cmd = (void *)buf->data;
3999 	uint8_t status;
4000 
4001 	if (adv_cmds_ext_check(evt)) {
4002 		return;
4003 	}
4004 
4005 	status = ll_pal_remove(&cmd->addr, cmd->sid);
4006 
4007 	*evt = cmd_complete_status(status);
4008 }
4009 
4010 static void le_clear_pal(struct net_buf *buf, struct net_buf **evt)
4011 {
4012 	uint8_t status;
4013 
4014 	if (adv_cmds_ext_check(evt)) {
4015 		return;
4016 	}
4017 
4018 	status = ll_pal_clear();
4019 
4020 	*evt = cmd_complete_status(status);
4021 }
4022 
4023 static void le_read_pal_size(struct net_buf *buf, struct net_buf **evt)
4024 {
4025 	struct bt_hci_rp_le_read_per_adv_list_size *rp;
4026 
4027 	if (adv_cmds_ext_check(evt)) {
4028 		return;
4029 	}
4030 
4031 	rp = hci_cmd_complete(evt, sizeof(*rp));
4032 	rp->status = 0x00;
4033 
4034 	rp->list_size = ll_pal_size_get();
4035 }
4036 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4037 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4038 #endif /* CONFIG_BT_OBSERVER */
4039 
4040 #if defined(CONFIG_BT_CENTRAL)
4041 static void le_ext_create_connection(struct net_buf *buf, struct net_buf **evt)
4042 {
4043 	struct bt_hci_cp_le_ext_create_conn *cmd = (void *)buf->data;
4044 	struct bt_hci_ext_conn_phy *p;
4045 	uint8_t peer_addr_type;
4046 	uint8_t own_addr_type;
4047 	uint8_t filter_policy;
4048 	uint8_t phys_bitmask;
4049 	uint8_t *peer_addr;
4050 	uint8_t status;
4051 	uint8_t phys;
4052 
4053 	if (adv_cmds_ext_check(NULL)) {
4054 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
4055 		return;
4056 	}
4057 
4058 	/* Number of bits set indicate scan sets to be configured by calling
4059 	 * ll_create_connection function.
4060 	 */
4061 	phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
4062 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
4063 		phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
4064 	}
4065 
4066 	phys = cmd->phys;
4067 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
4068 	    (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
4069 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
4070 
4071 		return;
4072 	}
4073 
4074 	filter_policy = cmd->filter_policy;
4075 	own_addr_type = cmd->own_addr_type;
4076 	peer_addr_type = cmd->peer_addr.type;
4077 	peer_addr = cmd->peer_addr.a.val;
4078 	p = cmd->p;
4079 
4080 	do {
4081 		uint16_t supervision_timeout;
4082 		uint16_t conn_interval_max;
4083 		uint16_t scan_interval;
4084 		uint16_t conn_latency;
4085 		uint16_t scan_window;
4086 		uint8_t phy;
4087 
4088 		phy = BIT(find_lsb_set(phys_bitmask) - 1);
4089 
4090 		if (phys & phy) {
4091 			scan_interval = sys_le16_to_cpu(p->scan_interval);
4092 			scan_window = sys_le16_to_cpu(p->scan_window);
4093 			conn_interval_max =
4094 				sys_le16_to_cpu(p->conn_interval_max);
4095 			conn_latency = sys_le16_to_cpu(p->conn_latency);
4096 			supervision_timeout =
4097 				sys_le16_to_cpu(p->supervision_timeout);
4098 
4099 			if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
4100 				status = check_cconn_params(true, scan_interval,
4101 							    scan_window,
4102 							    conn_interval_max,
4103 							    conn_latency,
4104 							    supervision_timeout);
4105 				if (status) {
4106 					*evt = cmd_status(status);
4107 					return;
4108 				}
4109 			}
4110 
4111 			status = ll_create_connection(scan_interval,
4112 						      scan_window,
4113 						      filter_policy,
4114 						      peer_addr_type,
4115 						      peer_addr,
4116 						      own_addr_type,
4117 						      conn_interval_max,
4118 						      conn_latency,
4119 						      supervision_timeout,
4120 						      phy);
4121 			p++;
4122 		} else {
4123 			uint8_t type;
4124 
4125 			type = (phy << 1);
4126 			/* NOTE: Pass invalid interval value to reset the PHY
4127 			 *       value in the scan instance so not to start
4128 			 *       scanning on the unselected PHY.
4129 			 */
4130 			status = ll_scan_params_set(type, 0, 0, 0, 0);
4131 		}
4132 
4133 		if (status) {
4134 			*evt = cmd_status(status);
4135 			return;
4136 		}
4137 
4138 		phys_bitmask &= (phys_bitmask - 1);
4139 	} while (phys_bitmask);
4140 
4141 	status = ll_connect_enable(phys & BT_HCI_LE_EXT_SCAN_PHY_CODED);
4142 
4143 	*evt = cmd_status(status);
4144 }
4145 #endif /* CONFIG_BT_CENTRAL */
4146 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4147 
4148 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4149 static void le_cis_request(struct pdu_data *pdu_data,
4150 			   struct node_rx_pdu *node_rx,
4151 			   struct net_buf *buf)
4152 {
4153 	struct bt_hci_evt_le_cis_req *sep;
4154 	struct node_rx_conn_iso_req *req;
4155 	void *node;
4156 
4157 	/* Check for pdu field being aligned before accessing CIS established
4158 	 * event.
4159 	 */
4160 	node = pdu_data;
4161 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4162 
4163 	req = node;
4164 	if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS)) ||
4165 	    !(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4166 	    !(le_event_mask & BT_EVT_MASK_LE_CIS_REQ)) {
4167 		ll_cis_reject(req->cis_handle, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE);
4168 		return;
4169 	}
4170 
4171 	sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_REQ, sizeof(*sep));
4172 	sep->acl_handle = sys_cpu_to_le16(node_rx->hdr.handle);
4173 	sep->cis_handle = sys_cpu_to_le16(req->cis_handle);
4174 	sep->cig_id = req->cig_id;
4175 	sep->cis_id = req->cis_id;
4176 }
4177 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4178 
4179 #if defined(CONFIG_BT_CTLR_CONN_ISO)
4180 static void le_cis_established(struct pdu_data *pdu_data,
4181 			       struct node_rx_pdu *node_rx,
4182 			       struct net_buf *buf)
4183 {
4184 	struct lll_conn_iso_stream_rxtx *lll_cis_c;
4185 	struct lll_conn_iso_stream_rxtx *lll_cis_p;
4186 	struct bt_hci_evt_le_cis_established *sep;
4187 	struct lll_conn_iso_stream *lll_cis;
4188 	struct node_rx_conn_iso_estab *est;
4189 	struct ll_conn_iso_stream *cis;
4190 	struct ll_conn_iso_group *cig;
4191 	bool is_central;
4192 	void *node;
4193 
4194 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4195 	    !(le_event_mask & BT_EVT_MASK_LE_CIS_ESTABLISHED)) {
4196 		return;
4197 	}
4198 
4199 	cis = node_rx->hdr.rx_ftr.param;
4200 	cig = cis->group;
4201 
4202 	sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_ESTABLISHED, sizeof(*sep));
4203 
4204 	/* Check for pdu field being aligned before accessing CIS established
4205 	 * event.
4206 	 */
4207 	node = pdu_data;
4208 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4209 
4210 	est = node;
4211 	sep->status = est->status;
4212 	sep->conn_handle = sys_cpu_to_le16(est->cis_handle);
4213 
4214 	if (!cig) {
4215 		/* CIS was not established and instance was released */
4216 		return;
4217 	}
4218 
4219 	lll_cis = &cis->lll;
4220 	is_central = cig->lll.role == BT_CONN_ROLE_CENTRAL;
4221 	lll_cis_c = is_central ? &lll_cis->tx : &lll_cis->rx;
4222 	lll_cis_p = is_central ? &lll_cis->rx : &lll_cis->tx;
4223 
4224 	sys_put_le24(cig->sync_delay, sep->cig_sync_delay);
4225 	sys_put_le24(cis->sync_delay, sep->cis_sync_delay);
4226 	sys_put_le24(cig->c_latency, sep->c_latency);
4227 	sys_put_le24(cig->p_latency, sep->p_latency);
4228 	sep->c_phy = lll_cis_c->phy;
4229 	sep->p_phy = lll_cis_p->phy;
4230 	sep->nse = lll_cis->nse;
4231 	sep->c_bn = lll_cis_c->bn;
4232 	sep->p_bn = lll_cis_p->bn;
4233 	sep->c_ft = lll_cis_c->ft;
4234 	sep->p_ft = lll_cis_p->ft;
4235 	sep->c_max_pdu = sys_cpu_to_le16(lll_cis_c->max_pdu);
4236 	sep->p_max_pdu = sys_cpu_to_le16(lll_cis_p->max_pdu);
4237 	sep->interval = sys_cpu_to_le16(cig->iso_interval);
4238 
4239 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4240 	if (is_central) {
4241 		cis_pending_count--;
4242 	}
4243 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4244 }
4245 #endif /* CONFIG_BT_CTLR_CONN_ISO */
4246 
4247 static int controller_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
4248 				 struct net_buf **evt, void **node_rx)
4249 {
4250 	switch (ocf) {
4251 	case BT_OCF(BT_HCI_OP_LE_SET_EVENT_MASK):
4252 		le_set_event_mask(cmd, evt);
4253 		break;
4254 
4255 	case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE):
4256 		le_read_buffer_size(cmd, evt);
4257 		break;
4258 
4259 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4260 	case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2):
4261 		le_read_buffer_size_v2(cmd, evt);
4262 		break;
4263 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4264 
4265 	case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_FEATURES):
4266 		le_read_local_features(cmd, evt);
4267 		break;
4268 
4269 	case BT_OCF(BT_HCI_OP_LE_SET_RANDOM_ADDRESS):
4270 		le_set_random_address(cmd, evt);
4271 		break;
4272 
4273 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
4274 	case BT_OCF(BT_HCI_OP_LE_READ_FAL_SIZE):
4275 		le_read_fal_size(cmd, evt);
4276 		break;
4277 
4278 	case BT_OCF(BT_HCI_OP_LE_CLEAR_FAL):
4279 		le_clear_fal(cmd, evt);
4280 		break;
4281 
4282 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_FAL):
4283 		le_add_dev_to_fal(cmd, evt);
4284 		break;
4285 
4286 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_FAL):
4287 		le_rem_dev_from_fal(cmd, evt);
4288 		break;
4289 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
4290 
4291 	case BT_OCF(BT_HCI_OP_LE_ENCRYPT):
4292 		le_encrypt(cmd, evt);
4293 		break;
4294 
4295 	case BT_OCF(BT_HCI_OP_LE_RAND):
4296 		le_rand(cmd, evt);
4297 		break;
4298 
4299 	case BT_OCF(BT_HCI_OP_LE_READ_SUPP_STATES):
4300 		le_read_supp_states(cmd, evt);
4301 		break;
4302 
4303 #if defined(CONFIG_BT_BROADCASTER)
4304 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_PARAM):
4305 		le_set_adv_param(cmd, evt);
4306 		break;
4307 
4308 	case BT_OCF(BT_HCI_OP_LE_READ_ADV_CHAN_TX_POWER):
4309 		le_read_adv_chan_tx_power(cmd, evt);
4310 		break;
4311 
4312 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_DATA):
4313 		le_set_adv_data(cmd, evt);
4314 		break;
4315 
4316 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_RSP_DATA):
4317 		le_set_scan_rsp_data(cmd, evt);
4318 		break;
4319 
4320 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_ENABLE):
4321 		le_set_adv_enable(cmd, evt);
4322 		break;
4323 
4324 #if defined(CONFIG_BT_CTLR_ADV_ISO)
4325 	case BT_OCF(BT_HCI_OP_LE_CREATE_BIG):
4326 		le_create_big(cmd, evt);
4327 		break;
4328 
4329 	case BT_OCF(BT_HCI_OP_LE_CREATE_BIG_TEST):
4330 		le_create_big_test(cmd, evt);
4331 		break;
4332 
4333 	case BT_OCF(BT_HCI_OP_LE_TERMINATE_BIG):
4334 		le_terminate_big(cmd, evt);
4335 		break;
4336 #endif /* CONFIG_BT_CTLR_ADV_ISO */
4337 #endif /* CONFIG_BT_BROADCASTER */
4338 
4339 #if defined(CONFIG_BT_OBSERVER)
4340 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_PARAM):
4341 		le_set_scan_param(cmd, evt);
4342 		break;
4343 
4344 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_ENABLE):
4345 		le_set_scan_enable(cmd, evt);
4346 		break;
4347 
4348 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
4349 	case BT_OCF(BT_HCI_OP_LE_BIG_CREATE_SYNC):
4350 		le_big_create_sync(cmd, evt);
4351 		break;
4352 
4353 	case BT_OCF(BT_HCI_OP_LE_BIG_TERMINATE_SYNC):
4354 		le_big_terminate_sync(cmd, evt, node_rx);
4355 		break;
4356 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
4357 #endif /* CONFIG_BT_OBSERVER */
4358 
4359 #if defined(CONFIG_BT_CENTRAL)
4360 	case BT_OCF(BT_HCI_OP_LE_CREATE_CONN):
4361 		le_create_connection(cmd, evt);
4362 		break;
4363 
4364 	case BT_OCF(BT_HCI_OP_LE_CREATE_CONN_CANCEL):
4365 		le_create_conn_cancel(cmd, evt, node_rx);
4366 		break;
4367 
4368 	case BT_OCF(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF):
4369 		le_set_host_chan_classif(cmd, evt);
4370 		break;
4371 
4372 #if defined(CONFIG_BT_CTLR_LE_ENC)
4373 	case BT_OCF(BT_HCI_OP_LE_START_ENCRYPTION):
4374 		le_start_encryption(cmd, evt);
4375 		break;
4376 #endif /* CONFIG_BT_CTLR_LE_ENC */
4377 
4378 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4379 	case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS):
4380 		le_set_cig_parameters(cmd, evt);
4381 		break;
4382 	case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS_TEST):
4383 		le_set_cig_params_test(cmd, evt);
4384 		break;
4385 	case BT_OCF(BT_HCI_OP_LE_CREATE_CIS):
4386 		le_create_cis(cmd, evt);
4387 		break;
4388 	case BT_OCF(BT_HCI_OP_LE_REMOVE_CIG):
4389 		le_remove_cig(cmd, evt);
4390 		break;
4391 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4392 #endif /* CONFIG_BT_CENTRAL */
4393 
4394 #if defined(CONFIG_BT_PERIPHERAL)
4395 #if defined(CONFIG_BT_CTLR_LE_ENC)
4396 	case BT_OCF(BT_HCI_OP_LE_LTK_REQ_REPLY):
4397 		le_ltk_req_reply(cmd, evt);
4398 		break;
4399 
4400 	case BT_OCF(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY):
4401 		le_ltk_req_neg_reply(cmd, evt);
4402 		break;
4403 #endif /* CONFIG_BT_CTLR_LE_ENC */
4404 
4405 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4406 	case BT_OCF(BT_HCI_OP_LE_ACCEPT_CIS):
4407 		le_accept_cis(cmd, evt);
4408 		break;
4409 	case BT_OCF(BT_HCI_OP_LE_REJECT_CIS):
4410 		le_reject_cis(cmd, evt);
4411 		break;
4412 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4413 #endif /* CONFIG_BT_PERIPHERAL */
4414 
4415 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
4416 	case BT_OCF(BT_HCI_OP_LE_REQ_PEER_SC):
4417 		le_req_peer_sca(cmd, evt);
4418 		break;
4419 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
4420 
4421 #if defined(CONFIG_BT_CTLR_ISO)
4422 	case BT_OCF(BT_HCI_OP_LE_SETUP_ISO_PATH):
4423 		le_setup_iso_path(cmd, evt);
4424 		break;
4425 	case BT_OCF(BT_HCI_OP_LE_REMOVE_ISO_PATH):
4426 		le_remove_iso_path(cmd, evt);
4427 		break;
4428 	case BT_OCF(BT_HCI_OP_LE_ISO_TEST_END):
4429 		le_iso_test_end(cmd, evt);
4430 		break;
4431 #endif /* CONFIG_BT_CTLR_ISO */
4432 
4433 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4434 	case BT_OCF(BT_HCI_OP_LE_ISO_TRANSMIT_TEST):
4435 		le_iso_transmit_test(cmd, evt);
4436 		break;
4437 	case BT_OCF(BT_HCI_OP_LE_READ_ISO_TX_SYNC):
4438 		le_read_iso_tx_sync(cmd, evt);
4439 		break;
4440 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4441 
4442 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4443 	case BT_OCF(BT_HCI_OP_LE_ISO_RECEIVE_TEST):
4444 		le_iso_receive_test(cmd, evt);
4445 		break;
4446 	case BT_OCF(BT_HCI_OP_LE_ISO_READ_TEST_COUNTERS):
4447 		le_iso_read_test_counters(cmd, evt);
4448 		break;
4449 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
4450 	case BT_OCF(BT_HCI_OP_LE_READ_ISO_LINK_QUALITY):
4451 		le_read_iso_link_quality(cmd, evt);
4452 		break;
4453 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
4454 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
4455 
4456 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
4457 	case BT_OCF(BT_HCI_OP_LE_SET_HOST_FEATURE):
4458 		le_set_host_feature(cmd, evt);
4459 		break;
4460 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
4461 
4462 #if defined(CONFIG_BT_CONN)
4463 	case BT_OCF(BT_HCI_OP_LE_READ_CHAN_MAP):
4464 		le_read_chan_map(cmd, evt);
4465 		break;
4466 
4467 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
4468 	case BT_OCF(BT_HCI_OP_LE_READ_REMOTE_FEATURES):
4469 		le_read_remote_features(cmd, evt);
4470 		break;
4471 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
4472 
4473 	case BT_OCF(BT_HCI_OP_LE_CONN_UPDATE):
4474 		le_conn_update(cmd, evt);
4475 		break;
4476 
4477 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
4478 	case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY):
4479 		le_conn_param_req_reply(cmd, evt);
4480 		break;
4481 
4482 	case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY):
4483 		le_conn_param_req_neg_reply(cmd, evt);
4484 		break;
4485 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
4486 
4487 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4488 	case BT_OCF(BT_HCI_OP_LE_SET_DATA_LEN):
4489 		le_set_data_len(cmd, evt);
4490 		break;
4491 
4492 	case BT_OCF(BT_HCI_OP_LE_READ_DEFAULT_DATA_LEN):
4493 		le_read_default_data_len(cmd, evt);
4494 		break;
4495 
4496 	case BT_OCF(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN):
4497 		le_write_default_data_len(cmd, evt);
4498 		break;
4499 
4500 	case BT_OCF(BT_HCI_OP_LE_READ_MAX_DATA_LEN):
4501 		le_read_max_data_len(cmd, evt);
4502 		break;
4503 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4504 
4505 #if defined(CONFIG_BT_CTLR_PHY)
4506 	case BT_OCF(BT_HCI_OP_LE_READ_PHY):
4507 		le_read_phy(cmd, evt);
4508 		break;
4509 
4510 	case BT_OCF(BT_HCI_OP_LE_SET_DEFAULT_PHY):
4511 		le_set_default_phy(cmd, evt);
4512 		break;
4513 
4514 	case BT_OCF(BT_HCI_OP_LE_SET_PHY):
4515 		le_set_phy(cmd, evt);
4516 		break;
4517 #endif /* CONFIG_BT_CTLR_PHY */
4518 #endif /* CONFIG_BT_CONN */
4519 
4520 #if defined(CONFIG_BT_CTLR_ADV_EXT)
4521 #if defined(CONFIG_BT_BROADCASTER)
4522 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_SET_RANDOM_ADDR):
4523 		le_set_adv_set_random_addr(cmd, evt);
4524 		break;
4525 
4526 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_PARAM):
4527 		le_set_ext_adv_param(cmd, evt);
4528 		break;
4529 
4530 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_DATA):
4531 		le_set_ext_adv_data(cmd, evt);
4532 		break;
4533 
4534 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_RSP_DATA):
4535 		le_set_ext_scan_rsp_data(cmd, evt);
4536 		break;
4537 
4538 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_ENABLE):
4539 		le_set_ext_adv_enable(cmd, evt);
4540 		break;
4541 
4542 	case BT_OCF(BT_HCI_OP_LE_READ_MAX_ADV_DATA_LEN):
4543 		le_read_max_adv_data_len(cmd, evt);
4544 		break;
4545 
4546 	case BT_OCF(BT_HCI_OP_LE_READ_NUM_ADV_SETS):
4547 		le_read_num_adv_sets(cmd, evt);
4548 		break;
4549 
4550 	case BT_OCF(BT_HCI_OP_LE_REMOVE_ADV_SET):
4551 		le_remove_adv_set(cmd, evt);
4552 		break;
4553 
4554 	case BT_OCF(BT_HCI_OP_CLEAR_ADV_SETS):
4555 		le_clear_adv_sets(cmd, evt);
4556 		break;
4557 
4558 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
4559 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_PARAM):
4560 		le_set_per_adv_param(cmd, evt);
4561 		break;
4562 
4563 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_DATA):
4564 		le_set_per_adv_data(cmd, evt);
4565 		break;
4566 
4567 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_ENABLE):
4568 		le_set_per_adv_enable(cmd, evt);
4569 		break;
4570 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
4571 #endif /* CONFIG_BT_BROADCASTER */
4572 
4573 #if defined(CONFIG_BT_OBSERVER)
4574 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_PARAM):
4575 		le_set_ext_scan_param(cmd, evt);
4576 		break;
4577 
4578 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_ENABLE):
4579 		le_set_ext_scan_enable(cmd, evt);
4580 		break;
4581 
4582 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
4583 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC):
4584 		le_per_adv_create_sync(cmd, evt);
4585 		break;
4586 
4587 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC_CANCEL):
4588 		le_per_adv_create_sync_cancel(cmd, evt, node_rx);
4589 		break;
4590 
4591 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_TERMINATE_SYNC):
4592 		le_per_adv_terminate_sync(cmd, evt);
4593 		break;
4594 
4595 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_RECV_ENABLE):
4596 		le_per_adv_recv_enable(cmd, evt);
4597 		break;
4598 
4599 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4600 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_PER_ADV_LIST):
4601 		le_add_dev_to_pal(cmd, evt);
4602 		break;
4603 
4604 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_PER_ADV_LIST):
4605 		le_rem_dev_from_pal(cmd, evt);
4606 		break;
4607 
4608 	case BT_OCF(BT_HCI_OP_LE_CLEAR_PER_ADV_LIST):
4609 		le_clear_pal(cmd, evt);
4610 		break;
4611 
4612 	case BT_OCF(BT_HCI_OP_LE_READ_PER_ADV_LIST_SIZE):
4613 		le_read_pal_size(cmd, evt);
4614 		break;
4615 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4616 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4617 #endif /* CONFIG_BT_OBSERVER */
4618 
4619 #if defined(CONFIG_BT_CONN)
4620 #if defined(CONFIG_BT_CENTRAL)
4621 	case BT_OCF(BT_HCI_OP_LE_EXT_CREATE_CONN):
4622 		le_ext_create_connection(cmd, evt);
4623 		break;
4624 #endif /* CONFIG_BT_CENTRAL */
4625 #endif /* CONFIG_BT_CONN */
4626 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4627 
4628 #if defined(CONFIG_BT_CTLR_PRIVACY)
4629 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_RL):
4630 		le_add_dev_to_rl(cmd, evt);
4631 		break;
4632 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_RL):
4633 		le_rem_dev_from_rl(cmd, evt);
4634 		break;
4635 	case BT_OCF(BT_HCI_OP_LE_CLEAR_RL):
4636 		le_clear_rl(cmd, evt);
4637 		break;
4638 	case BT_OCF(BT_HCI_OP_LE_READ_RL_SIZE):
4639 		le_read_rl_size(cmd, evt);
4640 		break;
4641 	case BT_OCF(BT_HCI_OP_LE_READ_PEER_RPA):
4642 		le_read_peer_rpa(cmd, evt);
4643 		break;
4644 	case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_RPA):
4645 		le_read_local_rpa(cmd, evt);
4646 		break;
4647 	case BT_OCF(BT_HCI_OP_LE_SET_ADDR_RES_ENABLE):
4648 		le_set_addr_res_enable(cmd, evt);
4649 		break;
4650 	case BT_OCF(BT_HCI_OP_LE_SET_RPA_TIMEOUT):
4651 		le_set_rpa_timeout(cmd, evt);
4652 		break;
4653 	case BT_OCF(BT_HCI_OP_LE_SET_PRIVACY_MODE):
4654 		le_set_privacy_mode(cmd, evt);
4655 		break;
4656 #endif /* CONFIG_BT_CTLR_PRIVACY */
4657 
4658 	case BT_OCF(BT_HCI_OP_LE_READ_TX_POWER):
4659 		le_read_tx_power(cmd, evt);
4660 		break;
4661 
4662 #if defined(CONFIG_BT_CTLR_DF)
4663 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
4664 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_PARAMS):
4665 		le_df_set_cl_cte_tx_params(cmd, evt);
4666 		break;
4667 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_ENABLE):
4668 		le_df_set_cl_cte_enable(cmd, evt);
4669 		break;
4670 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
4671 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
4672 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_SAMPLING_ENABLE):
4673 		le_df_set_cl_iq_sampling_enable(cmd, evt);
4674 		break;
4675 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
4676 	case BT_OCF(BT_HCI_OP_LE_READ_ANT_INFO):
4677 		le_df_read_ant_inf(cmd, evt);
4678 		break;
4679 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
4680 	case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_TX_PARAMS):
4681 		le_df_set_conn_cte_tx_params(cmd, evt);
4682 		break;
4683 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
4684 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
4685 	case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_RX_PARAMS):
4686 		le_df_set_conn_cte_rx_params(cmd, evt);
4687 		break;
4688 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
4689 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
4690 	case BT_OCF(BT_HCI_OP_LE_CONN_CTE_REQ_ENABLE):
4691 		le_df_set_conn_cte_req_enable(cmd, evt);
4692 		break;
4693 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
4694 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
4695 	case BT_OCF(BT_HCI_OP_LE_CONN_CTE_RSP_ENABLE):
4696 		le_df_set_conn_cte_rsp_enable(cmd, evt);
4697 		break;
4698 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
4699 #endif /* CONFIG_BT_CTLR_DF */
4700 
4701 #if defined(CONFIG_BT_CTLR_DTM_HCI)
4702 	case BT_OCF(BT_HCI_OP_LE_RX_TEST):
4703 		le_rx_test(cmd, evt);
4704 		break;
4705 	case BT_OCF(BT_HCI_OP_LE_TX_TEST):
4706 		le_tx_test(cmd, evt);
4707 		break;
4708 	case BT_OCF(BT_HCI_OP_LE_TEST_END):
4709 		le_test_end(cmd, evt);
4710 		break;
4711 	case BT_OCF(BT_HCI_OP_LE_ENH_RX_TEST):
4712 		le_enh_rx_test(cmd, evt);
4713 		break;
4714 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
4715 	case BT_OCF(BT_HCI_OP_LE_RX_TEST_V3):
4716 		le_rx_test_v3(cmd, evt);
4717 		break;
4718 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
4719 	case BT_OCF(BT_HCI_OP_LE_ENH_TX_TEST):
4720 		le_enh_tx_test(cmd, evt);
4721 		break;
4722 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
4723 	case BT_OCF(BT_HCI_OP_LE_TX_TEST_V3):
4724 		le_tx_test_v3(cmd, evt);
4725 		break;
4726 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
4727 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
4728 	case BT_OCF(BT_HCI_OP_LE_TX_TEST_V4):
4729 		le_tx_test_v4(cmd, evt);
4730 		break;
4731 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
4732 #endif /* CONFIG_BT_CTLR_DTM_HCI */
4733 
4734 	default:
4735 		return -EINVAL;
4736 	}
4737 
4738 	return 0;
4739 }
4740 
4741 /* If Zephyr VS HCI commands are not enabled provide this functionality directly
4742  */
4743 #if !defined(CONFIG_BT_HCI_VS_EXT)
4744 uint8_t bt_read_static_addr(struct bt_hci_vs_static_addr addrs[], uint8_t size)
4745 {
4746 	return hci_vendor_read_static_addr(addrs, size);
4747 }
4748 #endif /* !defined(CONFIG_BT_HCI_VS_EXT) */
4749 
4750 
4751 #if defined(CONFIG_BT_HCI_VS)
4752 static void vs_read_version_info(struct net_buf *buf, struct net_buf **evt)
4753 {
4754 	struct bt_hci_rp_vs_read_version_info *rp;
4755 
4756 	rp = hci_cmd_complete(evt, sizeof(*rp));
4757 
4758 	rp->status = 0x00;
4759 	rp->hw_platform = sys_cpu_to_le16(BT_HCI_VS_HW_PLAT);
4760 	rp->hw_variant = sys_cpu_to_le16(BT_HCI_VS_HW_VAR);
4761 
4762 	rp->fw_variant = 0U;
4763 	rp->fw_version = (KERNEL_VERSION_MAJOR & 0xff);
4764 	rp->fw_revision = sys_cpu_to_le16(KERNEL_VERSION_MINOR);
4765 	rp->fw_build = sys_cpu_to_le32(KERNEL_PATCHLEVEL & 0xffff);
4766 }
4767 
4768 static void vs_read_supported_commands(struct net_buf *buf,
4769 				       struct net_buf **evt)
4770 {
4771 	struct bt_hci_rp_vs_read_supported_commands *rp;
4772 
4773 	rp = hci_cmd_complete(evt, sizeof(*rp));
4774 
4775 	rp->status = 0x00;
4776 	(void)memset(&rp->commands[0], 0, sizeof(rp->commands));
4777 
4778 	/* Set Version Information, Supported Commands, Supported Features. */
4779 	rp->commands[0] |= BIT(0) | BIT(1) | BIT(2);
4780 #if defined(CONFIG_BT_HCI_VS_EXT)
4781 	/* Write BD_ADDR, Read Build Info */
4782 	rp->commands[0] |= BIT(5) | BIT(7);
4783 	/* Read Static Addresses, Read Key Hierarchy Roots */
4784 	rp->commands[1] |= BIT(0) | BIT(1);
4785 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
4786 	/* Write Tx Power, Read Tx Power */
4787 	rp->commands[1] |= BIT(5) | BIT(6);
4788 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
4789 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
4790 	/* Read Supported USB Transport Modes */
4791 	rp->commands[1] |= BIT(7);
4792 	/* Set USB Transport Mode */
4793 	rp->commands[2] |= BIT(0);
4794 #endif /* USB_DEVICE_BLUETOOTH_VS_H4 */
4795 #endif /* CONFIG_BT_HCI_VS_EXT */
4796 }
4797 
4798 static void vs_read_supported_features(struct net_buf *buf,
4799 				       struct net_buf **evt)
4800 {
4801 	struct bt_hci_rp_vs_read_supported_features *rp;
4802 
4803 	rp = hci_cmd_complete(evt, sizeof(*rp));
4804 
4805 	rp->status = 0x00;
4806 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
4807 }
4808 
4809 uint8_t __weak hci_vendor_read_static_addr(struct bt_hci_vs_static_addr addrs[],
4810 					uint8_t size)
4811 {
4812 	ARG_UNUSED(addrs);
4813 	ARG_UNUSED(size);
4814 
4815 	return 0;
4816 }
4817 
4818 #if defined(CONFIG_BT_HCI_VS_EXT)
4819 static void vs_write_bd_addr(struct net_buf *buf, struct net_buf **evt)
4820 {
4821 	struct bt_hci_cp_vs_write_bd_addr *cmd = (void *)buf->data;
4822 
4823 	ll_addr_set(0, &cmd->bdaddr.val[0]);
4824 
4825 	*evt = cmd_complete_status(0x00);
4826 }
4827 
4828 static void vs_read_build_info(struct net_buf *buf, struct net_buf **evt)
4829 {
4830 	struct bt_hci_rp_vs_read_build_info *rp;
4831 
4832 #define HCI_VS_BUILD_INFO "Zephyr OS v" \
4833 	KERNEL_VERSION_STRING CONFIG_BT_CTLR_HCI_VS_BUILD_INFO
4834 
4835 	const char build_info[] = HCI_VS_BUILD_INFO;
4836 
4837 #define BUILD_INFO_EVT_LEN (sizeof(struct bt_hci_evt_hdr) + \
4838 			    sizeof(struct bt_hci_evt_cmd_complete) + \
4839 			    sizeof(struct bt_hci_rp_vs_read_build_info) + \
4840 			    sizeof(build_info))
4841 
4842 	BUILD_ASSERT(CONFIG_BT_BUF_EVT_RX_SIZE >= BUILD_INFO_EVT_LEN);
4843 
4844 	rp = hci_cmd_complete(evt, sizeof(*rp) + sizeof(build_info));
4845 	rp->status = 0x00;
4846 	memcpy(rp->info, build_info, sizeof(build_info));
4847 }
4848 
4849 void __weak hci_vendor_read_key_hierarchy_roots(uint8_t ir[16], uint8_t er[16])
4850 {
4851 	/* Mark IR as invalid */
4852 	(void)memset(ir, 0x00, 16);
4853 
4854 	/* Mark ER as invalid */
4855 	(void)memset(er, 0x00, 16);
4856 }
4857 
4858 static void vs_read_static_addrs(struct net_buf *buf, struct net_buf **evt)
4859 {
4860 	struct bt_hci_rp_vs_read_static_addrs *rp;
4861 
4862 	rp = hci_cmd_complete(evt, sizeof(*rp) +
4863 				   sizeof(struct bt_hci_vs_static_addr));
4864 	rp->status = 0x00;
4865 	rp->num_addrs = hci_vendor_read_static_addr(rp->a, 1);
4866 }
4867 
4868 static void vs_read_key_hierarchy_roots(struct net_buf *buf,
4869 					struct net_buf **evt)
4870 {
4871 	struct bt_hci_rp_vs_read_key_hierarchy_roots *rp;
4872 
4873 	rp = hci_cmd_complete(evt, sizeof(*rp));
4874 	rp->status = 0x00;
4875 	hci_vendor_read_key_hierarchy_roots(rp->ir, rp->er);
4876 }
4877 
4878 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
4879 static void vs_set_min_used_chans(struct net_buf *buf, struct net_buf **evt)
4880 {
4881 	struct bt_hci_cp_vs_set_min_num_used_chans *cmd = (void *)buf->data;
4882 	uint16_t handle = sys_le16_to_cpu(cmd->handle);
4883 	uint8_t status;
4884 
4885 	status = ll_set_min_used_chans(handle, cmd->phys, cmd->min_used_chans);
4886 
4887 	*evt = cmd_complete_status(status);
4888 }
4889 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
4890 
4891 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
4892 static void vs_write_tx_power_level(struct net_buf *buf, struct net_buf **evt)
4893 {
4894 	struct bt_hci_cp_vs_write_tx_power_level *cmd = (void *)buf->data;
4895 	struct bt_hci_rp_vs_write_tx_power_level *rp;
4896 	uint8_t handle_type;
4897 	uint16_t handle;
4898 	uint8_t status;
4899 
4900 	handle_type = cmd->handle_type;
4901 	handle = sys_le16_to_cpu(cmd->handle);
4902 
4903 	rp = hci_cmd_complete(evt, sizeof(*rp));
4904 	rp->selected_tx_power = cmd->tx_power_level;
4905 
4906 	status = ll_tx_pwr_lvl_set(handle_type, handle, &rp->selected_tx_power);
4907 
4908 	rp->status = status;
4909 	rp->handle_type = handle_type;
4910 	rp->handle = sys_cpu_to_le16(handle);
4911 }
4912 
4913 static void vs_read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
4914 {
4915 	struct bt_hci_cp_vs_read_tx_power_level *cmd = (void *)buf->data;
4916 	struct bt_hci_rp_vs_read_tx_power_level *rp;
4917 	uint8_t handle_type;
4918 	uint16_t handle;
4919 	uint8_t status;
4920 
4921 	handle_type = cmd->handle_type;
4922 	handle = sys_le16_to_cpu(cmd->handle);
4923 
4924 	rp = hci_cmd_complete(evt, sizeof(*rp));
4925 
4926 	status = ll_tx_pwr_lvl_get(handle_type, handle, 0, &rp->tx_power_level);
4927 
4928 	rp->status = status;
4929 	rp->handle_type = handle_type;
4930 	rp->handle = sys_cpu_to_le16(handle);
4931 }
4932 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
4933 
4934 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
4935 /* A memory pool for vandor specific events for fatal error reporting purposes. */
4936 NET_BUF_POOL_FIXED_DEFINE(vs_err_tx_pool, 1, BT_BUF_EVT_RX_SIZE, 8, NULL);
4937 
4938 /* The alias for convenience of Controller HCI implementation. Controller is build for
4939  * a particular architecture hence the alias will allow to avoid conditional compilation.
4940  * Host may be not aware of hardware architecture the Controller is working on, hence
4941  * all CPU data types for supported architectures should be available during build, hence
4942  * the alias is defined here.
4943  */
4944 #if defined(CONFIG_CPU_CORTEX_M)
4945 typedef struct bt_hci_vs_fata_error_cpu_data_cortex_m bt_hci_vs_fatal_error_cpu_data;
4946 
4947 static void vs_err_fatal_cpu_data_fill(bt_hci_vs_fatal_error_cpu_data *cpu_data,
4948 				       const z_arch_esf_t *esf)
4949 {
4950 	cpu_data->a1 = sys_cpu_to_le32(esf->basic.a1);
4951 	cpu_data->a2 = sys_cpu_to_le32(esf->basic.a2);
4952 	cpu_data->a3 = sys_cpu_to_le32(esf->basic.a3);
4953 	cpu_data->a4 = sys_cpu_to_le32(esf->basic.a4);
4954 	cpu_data->ip = sys_cpu_to_le32(esf->basic.ip);
4955 	cpu_data->lr = sys_cpu_to_le32(esf->basic.lr);
4956 	cpu_data->xpsr = sys_cpu_to_le32(esf->basic.xpsr);
4957 }
4958 #endif /* CONFIG_CPU_CORTEX_M */
4959 
4960 static struct net_buf *vs_err_evt_create(uint8_t subevt, uint8_t len)
4961 {
4962 	struct net_buf *buf;
4963 
4964 	buf = net_buf_alloc(&vs_err_tx_pool, K_FOREVER);
4965 	if (buf) {
4966 		struct bt_hci_evt_le_meta_event *me;
4967 		struct bt_hci_evt_hdr *hdr;
4968 
4969 		net_buf_reserve(buf, BT_BUF_RESERVE);
4970 		bt_buf_set_type(buf, BT_BUF_EVT);
4971 
4972 		hdr = net_buf_add(buf, sizeof(*hdr));
4973 		hdr->evt = BT_HCI_EVT_VENDOR;
4974 		hdr->len = len + sizeof(*me);
4975 
4976 		me = net_buf_add(buf, sizeof(*me));
4977 		me->subevent = subevt;
4978 	}
4979 
4980 	return buf;
4981 }
4982 
4983 struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const z_arch_esf_t *esf)
4984 {
4985 	/* Prepare vendor specific HCI Fatal Error event */
4986 	struct bt_hci_vs_fatal_error_stack_frame *sf;
4987 	bt_hci_vs_fatal_error_cpu_data *cpu_data;
4988 	struct net_buf *buf;
4989 
4990 	buf = vs_err_evt_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_STACK_FRAME,
4991 				sizeof(*sf) + sizeof(*cpu_data));
4992 	if (buf != NULL) {
4993 		sf = net_buf_add(buf, (sizeof(*sf) + sizeof(*cpu_data)));
4994 		sf->reason = sys_cpu_to_le32(reason);
4995 		sf->cpu_type = BT_HCI_EVT_VS_ERROR_CPU_TYPE_CORTEX_M;
4996 
4997 		vs_err_fatal_cpu_data_fill(
4998 			(bt_hci_vs_fatal_error_cpu_data *)sf->cpu_data, esf);
4999 	} else {
5000 		LOG_ERR("Can't create HCI Fatal Error event");
5001 	}
5002 
5003 	return buf;
5004 }
5005 
5006 static struct net_buf *hci_vs_err_trace_create(uint8_t data_type,
5007 					       const char *file_path,
5008 					       uint32_t line, uint64_t pc)
5009 {
5010 	uint32_t file_name_len = 0U, pos = 0U;
5011 	struct net_buf *buf = NULL;
5012 
5013 	if (file_path) {
5014 		/* Extract file name from a path */
5015 		while (file_path[file_name_len] != '\0') {
5016 			if (file_path[file_name_len] == '/') {
5017 				pos = file_name_len + 1;
5018 			}
5019 			file_name_len++;
5020 		}
5021 		file_path += pos;
5022 		file_name_len -= pos;
5023 
5024 		/* If file name was found in file_path, in other words: file_path is not empty
5025 		 * string and is not `foo/bar/`.
5026 		 */
5027 		if (file_name_len) {
5028 			/* Total data length: len = file name strlen + \0 + sizeof(line number)
5029 			 * Maximum length of an HCI event data is BT_BUF_EVT_RX_SIZE. If total data
5030 			 * length exceeds this maximum, truncate file name.
5031 			 */
5032 			uint32_t data_len = 1 + sizeof(line);
5033 
5034 			/* If a buffer is created for a TRACE data, include sizeof(pc) in total
5035 			 * length.
5036 			 */
5037 			if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5038 				data_len += sizeof(pc);
5039 			}
5040 
5041 			if (data_len + file_name_len > BT_BUF_EVT_RX_SIZE) {
5042 				uint32_t overflow_len =
5043 					file_name_len + data_len - BT_BUF_EVT_RX_SIZE;
5044 
5045 				/* Truncate the file name length by number of overflow bytes */
5046 				file_name_len -= overflow_len;
5047 			}
5048 
5049 			/* Get total event data length including file name length */
5050 			data_len += file_name_len;
5051 
5052 			/* Prepare vendor specific HCI Fatal Error event */
5053 			buf = vs_err_evt_create(data_type, data_len);
5054 			if (buf != NULL) {
5055 				if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5056 					net_buf_add_le64(buf, pc);
5057 				}
5058 				net_buf_add_mem(buf, file_path, file_name_len);
5059 				net_buf_add_u8(buf, STR_NULL_TERMINATOR);
5060 				net_buf_add_le32(buf, line);
5061 			} else {
5062 				LOG_ERR("Can't create HCI Fatal Error event");
5063 			}
5064 		}
5065 	}
5066 
5067 	return buf;
5068 }
5069 
5070 struct net_buf *hci_vs_err_trace(const char *file, uint32_t line, uint64_t pc)
5071 {
5072 	return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE, file, line, pc);
5073 }
5074 
5075 struct net_buf *hci_vs_err_assert(const char *file, uint32_t line)
5076 {
5077 	/* ASSERT data does not contain PC counter, because of that zero constant is used */
5078 	return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_CTRL_ASSERT, file, line, 0U);
5079 }
5080 #endif /* CONFIG_BT_HCI_VS_FATAL_ERROR */
5081 
5082 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
5083 static void vs_le_df_connectionless_iq_report(struct pdu_data *pdu_rx, struct node_rx_pdu *node_rx,
5084 					      struct net_buf *buf)
5085 {
5086 	struct bt_hci_evt_vs_le_connectionless_iq_report *sep;
5087 	struct node_rx_iq_report *iq_report;
5088 	struct lll_sync *lll;
5089 	uint8_t samples_cnt;
5090 	int16_t rssi;
5091 	uint16_t sync_handle;
5092 	uint16_t per_evt_counter;
5093 	struct ll_sync_set *sync = NULL;
5094 
5095 	iq_report = (struct node_rx_iq_report *)node_rx;
5096 
5097 	if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTIONLESS_IQ_REPORT)) {
5098 		return;
5099 	}
5100 
5101 	lll = iq_report->hdr.rx_ftr.param;
5102 
5103 	sync = HDR_LLL2ULL(lll);
5104 
5105 	/* TX LL thread has higher priority than RX thread. It may happen that
5106 	 * host successfully disables CTE sampling in the meantime.
5107 	 * It should be verified here, to avoid reporting IQ samples after
5108 	 * the functionality was disabled or if sync was lost.
5109 	 */
5110 	if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) || !sync->timeout_reload) {
5111 		/* Drop further processing of the event. */
5112 		return;
5113 	}
5114 
5115 	/* Get the sync handle corresponding to the LLL context passed in the
5116 	 * node rx footer field.
5117 	 */
5118 	sync_handle = ull_sync_handle_get(sync);
5119 	per_evt_counter = iq_report->event_counter;
5120 
5121 	/* If packet status does not indicate insufficient resources for IQ samples and for
5122 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5123 	 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE
5124 	 * value.
5125 	 */
5126 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5127 		samples_cnt = 0U;
5128 	} else {
5129 		samples_cnt = MAX(1, iq_report->sample_count);
5130 	}
5131 
5132 	sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
5133 		       (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5134 
5135 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->hdr.rx_ftr.rssi);
5136 
5137 	sep->sync_handle = sys_cpu_to_le16(sync_handle);
5138 	sep->rssi = sys_cpu_to_le16(rssi);
5139 	sep->rssi_ant_id = iq_report->rssi_ant_id;
5140 	sep->cte_type = iq_report->cte_info.type;
5141 
5142 	sep->chan_idx = iq_report->chan_idx;
5143 	sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
5144 
5145 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5146 		sep->slot_durations = iq_report->local_slot_durations;
5147 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5148 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5149 	} else {
5150 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5151 	}
5152 
5153 	sep->packet_status = iq_report->packet_status;
5154 
5155 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5156 		if (iq_report->sample_count == 0U) {
5157 			sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5158 			sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5159 		} else {
5160 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5161 				sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5162 				sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5163 			}
5164 		}
5165 	}
5166 
5167 	sep->sample_count = samples_cnt;
5168 }
5169 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
5170 
5171 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
5172 static void vs_le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
5173 {
5174 	struct bt_hci_evt_vs_le_connection_iq_report *sep;
5175 	struct node_rx_iq_report *iq_report;
5176 	struct lll_conn *lll;
5177 	uint8_t samples_cnt;
5178 	uint8_t phy_rx;
5179 	int16_t rssi;
5180 
5181 	iq_report = (struct node_rx_iq_report *)node_rx;
5182 
5183 	if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTION_IQ_REPORT)) {
5184 		return;
5185 	}
5186 
5187 	lll = iq_report->hdr.rx_ftr.param;
5188 
5189 #if defined(CONFIG_BT_CTLR_PHY)
5190 	phy_rx = lll->phy_rx;
5191 
5192 	/* Make sure the report is generated for connection on PHY UNCODED */
5193 	LL_ASSERT(phy_rx != PHY_CODED);
5194 #else
5195 	phy_rx = PHY_1M;
5196 #endif /* CONFIG_BT_CTLR_PHY */
5197 
5198 	/* TX LL thread has higher priority than RX thread. It may happen that host succefully
5199 	 * disables CTE sampling in the meantime. It should be verified here, to avoid reporing
5200 	 * IQ samples after the functionality was disabled.
5201 	 */
5202 	if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
5203 		/* Dropp further processing of the event. */
5204 		return;
5205 	}
5206 
5207 	/* If packet status does not indicate insufficient resources for IQ samples and for
5208 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5209 	 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE value.
5210 	 */
5211 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5212 		samples_cnt = 0U;
5213 	} else {
5214 		samples_cnt = MAX(1, iq_report->sample_count);
5215 	}
5216 
5217 	sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT,
5218 			(sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5219 
5220 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->hdr.rx_ftr.rssi);
5221 
5222 	sep->conn_handle = sys_cpu_to_le16(iq_report->hdr.handle);
5223 	sep->rx_phy = phy_rx;
5224 	sep->rssi = sys_cpu_to_le16(rssi);
5225 	sep->rssi_ant_id = iq_report->rssi_ant_id;
5226 	sep->cte_type = iq_report->cte_info.type;
5227 
5228 	sep->data_chan_idx = iq_report->chan_idx;
5229 	sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
5230 
5231 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5232 		sep->slot_durations = iq_report->local_slot_durations;
5233 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5234 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5235 	} else {
5236 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5237 	}
5238 
5239 	sep->packet_status = iq_report->packet_status;
5240 
5241 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5242 		if (iq_report->sample_count == 0U) {
5243 			sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5244 			sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5245 		} else {
5246 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5247 				sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5248 				sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5249 			}
5250 		}
5251 	}
5252 
5253 	sep->sample_count = samples_cnt;
5254 }
5255 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
5256 
5257 #endif /* CONFIG_BT_HCI_VS_EXT */
5258 
5259 #if defined(CONFIG_BT_HCI_MESH_EXT)
5260 static void mesh_get_opts(struct net_buf *buf, struct net_buf **evt)
5261 {
5262 	struct bt_hci_rp_mesh_get_opts *rp;
5263 
5264 	rp = hci_cmd_complete(evt, sizeof(*rp));
5265 
5266 	rp->status = 0x00;
5267 	rp->opcode = BT_HCI_OC_MESH_GET_OPTS;
5268 
5269 	rp->revision = BT_HCI_MESH_REVISION;
5270 	rp->ch_map = 0x7;
5271 	/*@todo: nRF51 only */
5272 	rp->min_tx_power = -30;
5273 	/*@todo: nRF51 only */
5274 	rp->max_tx_power = 4;
5275 	rp->max_scan_filter = CONFIG_BT_CTLR_MESH_SCAN_FILTERS;
5276 	rp->max_filter_pattern = CONFIG_BT_CTLR_MESH_SF_PATTERNS;
5277 	rp->max_adv_slot = 1U;
5278 	rp->evt_prefix_len = 0x01;
5279 	rp->evt_prefix = BT_HCI_MESH_EVT_PREFIX;
5280 }
5281 
5282 static void mesh_set_scan_filter(struct net_buf *buf, struct net_buf **evt)
5283 {
5284 	struct bt_hci_cp_mesh_set_scan_filter *cmd = (void *)buf->data;
5285 	struct bt_hci_rp_mesh_set_scan_filter *rp;
5286 	uint8_t filter = cmd->scan_filter - 1;
5287 	struct scan_filter *f;
5288 	uint8_t status = 0x00;
5289 	uint8_t i;
5290 
5291 	if (filter > ARRAY_SIZE(scan_filters) ||
5292 	    cmd->num_patterns > CONFIG_BT_CTLR_MESH_SF_PATTERNS) {
5293 		status = BT_HCI_ERR_INVALID_PARAM;
5294 		goto exit;
5295 	}
5296 
5297 	if (filter == sf_curr) {
5298 		status = BT_HCI_ERR_CMD_DISALLOWED;
5299 		goto exit;
5300 	}
5301 
5302 	/* duplicate filtering not supported yet */
5303 	if (cmd->filter_dup) {
5304 		status = BT_HCI_ERR_INVALID_PARAM;
5305 		goto exit;
5306 	}
5307 
5308 	f = &scan_filters[filter];
5309 	for (i = 0U; i < cmd->num_patterns; i++) {
5310 		if (!cmd->patterns[i].pattern_len ||
5311 		    cmd->patterns[i].pattern_len >
5312 		    BT_HCI_MESH_PATTERN_LEN_MAX) {
5313 			status = BT_HCI_ERR_INVALID_PARAM;
5314 			goto exit;
5315 		}
5316 		f->lengths[i] = cmd->patterns[i].pattern_len;
5317 		memcpy(f->patterns[i], cmd->patterns[i].pattern, f->lengths[i]);
5318 	}
5319 
5320 	f->count = cmd->num_patterns;
5321 
5322 exit:
5323 	rp = hci_cmd_complete(evt, sizeof(*rp));
5324 	rp->status = status;
5325 	rp->opcode = BT_HCI_OC_MESH_SET_SCAN_FILTER;
5326 	rp->scan_filter = filter + 1;
5327 }
5328 
5329 static void mesh_advertise(struct net_buf *buf, struct net_buf **evt)
5330 {
5331 	struct bt_hci_cp_mesh_advertise *cmd = (void *)buf->data;
5332 	struct bt_hci_rp_mesh_advertise *rp;
5333 	uint8_t adv_slot = cmd->adv_slot;
5334 	uint8_t status;
5335 
5336 	status = ll_mesh_advertise(adv_slot,
5337 				   cmd->own_addr_type, cmd->random_addr.val,
5338 				   cmd->ch_map, cmd->tx_power,
5339 				   cmd->min_tx_delay, cmd->max_tx_delay,
5340 				   cmd->retx_count, cmd->retx_interval,
5341 				   cmd->scan_duration, cmd->scan_delay,
5342 				   cmd->scan_filter, cmd->data_len, cmd->data);
5343 	if (!status) {
5344 		/* Yields 0xFF if no scan filter selected */
5345 		sf_curr = cmd->scan_filter - 1;
5346 	}
5347 
5348 	rp = hci_cmd_complete(evt, sizeof(*rp));
5349 	rp->status = status;
5350 	rp->opcode = BT_HCI_OC_MESH_ADVERTISE;
5351 	rp->adv_slot = adv_slot;
5352 }
5353 
5354 static void mesh_advertise_cancel(struct net_buf *buf, struct net_buf **evt)
5355 {
5356 	struct bt_hci_cp_mesh_advertise_cancel *cmd = (void *)buf->data;
5357 	struct bt_hci_rp_mesh_advertise_cancel *rp;
5358 	uint8_t adv_slot = cmd->adv_slot;
5359 	uint8_t status;
5360 
5361 	status = ll_mesh_advertise_cancel(adv_slot);
5362 	if (!status) {
5363 		/* Yields 0xFF if no scan filter selected */
5364 		sf_curr = 0xFF;
5365 	}
5366 
5367 	rp = hci_cmd_complete(evt, sizeof(*rp));
5368 	rp->status = status;
5369 	rp->opcode = BT_HCI_OC_MESH_ADVERTISE_CANCEL;
5370 	rp->adv_slot = adv_slot;
5371 }
5372 
5373 static int mesh_cmd_handle(struct net_buf *cmd, struct net_buf **evt)
5374 {
5375 	struct bt_hci_cp_mesh *cp_mesh;
5376 	uint8_t mesh_op;
5377 
5378 	if (cmd->len < sizeof(*cp_mesh)) {
5379 		LOG_ERR("No HCI VSD Command header");
5380 		return -EINVAL;
5381 	}
5382 
5383 	cp_mesh = net_buf_pull_mem(cmd, sizeof(*cp_mesh));
5384 	mesh_op = cp_mesh->opcode;
5385 
5386 	switch (mesh_op) {
5387 	case BT_HCI_OC_MESH_GET_OPTS:
5388 		mesh_get_opts(cmd, evt);
5389 		break;
5390 
5391 	case BT_HCI_OC_MESH_SET_SCAN_FILTER:
5392 		mesh_set_scan_filter(cmd, evt);
5393 		break;
5394 
5395 	case BT_HCI_OC_MESH_ADVERTISE:
5396 		mesh_advertise(cmd, evt);
5397 		break;
5398 
5399 	case BT_HCI_OC_MESH_ADVERTISE_CANCEL:
5400 		mesh_advertise_cancel(cmd, evt);
5401 		break;
5402 
5403 	default:
5404 		return -EINVAL;
5405 	}
5406 
5407 	return 0;
5408 }
5409 #endif /* CONFIG_BT_HCI_MESH_EXT */
5410 
5411 int hci_vendor_cmd_handle_common(uint16_t ocf, struct net_buf *cmd,
5412 				 struct net_buf **evt)
5413 {
5414 	switch (ocf) {
5415 	case BT_OCF(BT_HCI_OP_VS_READ_VERSION_INFO):
5416 		vs_read_version_info(cmd, evt);
5417 		break;
5418 
5419 	case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS):
5420 		vs_read_supported_commands(cmd, evt);
5421 		break;
5422 
5423 	case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES):
5424 		vs_read_supported_features(cmd, evt);
5425 		break;
5426 
5427 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
5428 	case BT_OCF(BT_HCI_OP_VS_READ_USB_TRANSPORT_MODE):
5429 		break;
5430 	case BT_OCF(BT_HCI_OP_VS_SET_USB_TRANSPORT_MODE):
5431 		reset(cmd, evt);
5432 		break;
5433 #endif /* CONFIG_USB_DEVICE_BLUETOOTH_VS_H4 */
5434 
5435 #if defined(CONFIG_BT_HCI_VS_EXT)
5436 	case BT_OCF(BT_HCI_OP_VS_READ_BUILD_INFO):
5437 		vs_read_build_info(cmd, evt);
5438 		break;
5439 
5440 	case BT_OCF(BT_HCI_OP_VS_WRITE_BD_ADDR):
5441 		vs_write_bd_addr(cmd, evt);
5442 		break;
5443 
5444 	case BT_OCF(BT_HCI_OP_VS_READ_STATIC_ADDRS):
5445 		vs_read_static_addrs(cmd, evt);
5446 		break;
5447 
5448 	case BT_OCF(BT_HCI_OP_VS_READ_KEY_HIERARCHY_ROOTS):
5449 		vs_read_key_hierarchy_roots(cmd, evt);
5450 		break;
5451 
5452 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5453 	case BT_OCF(BT_HCI_OP_VS_WRITE_TX_POWER_LEVEL):
5454 		vs_write_tx_power_level(cmd, evt);
5455 		break;
5456 
5457 	case BT_OCF(BT_HCI_OP_VS_READ_TX_POWER_LEVEL):
5458 		vs_read_tx_power_level(cmd, evt);
5459 		break;
5460 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5461 #endif /* CONFIG_BT_HCI_VS_EXT */
5462 
5463 #if defined(CONFIG_BT_HCI_MESH_EXT)
5464 	case BT_OCF(BT_HCI_OP_VS_MESH):
5465 		mesh_cmd_handle(cmd, evt);
5466 		break;
5467 #endif /* CONFIG_BT_HCI_MESH_EXT */
5468 
5469 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
5470 	case BT_OCF(BT_HCI_OP_VS_SET_MIN_NUM_USED_CHANS):
5471 		vs_set_min_used_chans(cmd, evt);
5472 		break;
5473 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
5474 
5475 	default:
5476 		return -EINVAL;
5477 	}
5478 
5479 	return 0;
5480 }
5481 #endif
5482 
5483 struct net_buf *hci_cmd_handle(struct net_buf *cmd, void **node_rx)
5484 {
5485 	struct bt_hci_cmd_hdr *chdr;
5486 	struct net_buf *evt = NULL;
5487 	uint16_t ocf;
5488 	int err;
5489 
5490 	if (cmd->len < sizeof(*chdr)) {
5491 		LOG_ERR("No HCI Command header");
5492 		return NULL;
5493 	}
5494 
5495 	chdr = net_buf_pull_mem(cmd, sizeof(*chdr));
5496 	if (cmd->len < chdr->param_len) {
5497 		LOG_ERR("Invalid HCI CMD packet length");
5498 		return NULL;
5499 	}
5500 
5501 	/* store in a global for later CC/CS event creation */
5502 	_opcode = sys_le16_to_cpu(chdr->opcode);
5503 
5504 	ocf = BT_OCF(_opcode);
5505 
5506 	switch (BT_OGF(_opcode)) {
5507 	case BT_OGF_LINK_CTRL:
5508 		err = link_control_cmd_handle(ocf, cmd, &evt);
5509 		break;
5510 	case BT_OGF_BASEBAND:
5511 		err = ctrl_bb_cmd_handle(ocf, cmd, &evt);
5512 		break;
5513 	case BT_OGF_INFO:
5514 		err = info_cmd_handle(ocf, cmd, &evt);
5515 		break;
5516 	case BT_OGF_STATUS:
5517 		err = status_cmd_handle(ocf, cmd, &evt);
5518 		break;
5519 	case BT_OGF_LE:
5520 		err = controller_cmd_handle(ocf, cmd, &evt, node_rx);
5521 		break;
5522 #if defined(CONFIG_BT_HCI_VS)
5523 	case BT_OGF_VS:
5524 		err = hci_vendor_cmd_handle(ocf, cmd, &evt);
5525 		break;
5526 #endif
5527 	default:
5528 		err = -EINVAL;
5529 		break;
5530 	}
5531 
5532 	if (err == -EINVAL) {
5533 		evt = cmd_status(BT_HCI_ERR_UNKNOWN_CMD);
5534 	}
5535 
5536 	return evt;
5537 }
5538 
5539 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
5540 	defined(CONFIG_BT_CTLR_CONN_ISO)
5541 static void data_buf_overflow(struct net_buf **buf, uint8_t link_type)
5542 {
5543 	struct bt_hci_evt_data_buf_overflow *ep;
5544 
5545 	if (!(event_mask & BT_EVT_MASK_DATA_BUFFER_OVERFLOW)) {
5546 		return;
5547 	}
5548 
5549 	*buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
5550 	hci_evt_create(*buf, BT_HCI_EVT_DATA_BUF_OVERFLOW, sizeof(*ep));
5551 	ep = net_buf_add(*buf, sizeof(*ep));
5552 
5553 	ep->link_type = link_type;
5554 }
5555 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_SYNC_ISO ||
5556 	* CONFIG_BT_CTLR_CONN_ISO
5557 	*/
5558 
5559 #if defined(CONFIG_BT_CONN)
5560 int hci_acl_handle(struct net_buf *buf, struct net_buf **evt)
5561 {
5562 	struct node_tx *node_tx;
5563 	struct bt_hci_acl_hdr *acl;
5564 	struct pdu_data *pdu_data;
5565 	uint16_t handle;
5566 	uint8_t flags;
5567 	uint16_t len;
5568 
5569 	*evt = NULL;
5570 
5571 	if (buf->len < sizeof(*acl)) {
5572 		LOG_ERR("No HCI ACL header");
5573 		return -EINVAL;
5574 	}
5575 
5576 	acl = net_buf_pull_mem(buf, sizeof(*acl));
5577 	len = sys_le16_to_cpu(acl->len);
5578 	handle = sys_le16_to_cpu(acl->handle);
5579 
5580 	if (buf->len < len) {
5581 		LOG_ERR("Invalid HCI ACL packet length");
5582 		return -EINVAL;
5583 	}
5584 
5585 	if (len > LL_LENGTH_OCTETS_TX_MAX) {
5586 		LOG_ERR("Invalid HCI ACL Data length");
5587 		return -EINVAL;
5588 	}
5589 
5590 	/* assigning flags first because handle will be overwritten */
5591 	flags = bt_acl_flags(handle);
5592 	handle = bt_acl_handle(handle);
5593 
5594 	node_tx = ll_tx_mem_acquire();
5595 	if (!node_tx) {
5596 		LOG_ERR("Tx Buffer Overflow");
5597 		data_buf_overflow(evt, BT_OVERFLOW_LINK_ACL);
5598 		return -ENOBUFS;
5599 	}
5600 
5601 	pdu_data = (void *)node_tx->pdu;
5602 
5603 	if (bt_acl_flags_bc(flags) != BT_ACL_POINT_TO_POINT) {
5604 		return -EINVAL;
5605 	}
5606 
5607 	switch (bt_acl_flags_pb(flags)) {
5608 	case BT_ACL_START_NO_FLUSH:
5609 		pdu_data->ll_id = PDU_DATA_LLID_DATA_START;
5610 		break;
5611 	case BT_ACL_CONT:
5612 		pdu_data->ll_id = PDU_DATA_LLID_DATA_CONTINUE;
5613 		break;
5614 	default:
5615 		/* BT_ACL_START and BT_ACL_COMPLETE not allowed on LE-U
5616 		 * from Host to Controller
5617 		 */
5618 		return -EINVAL;
5619 	}
5620 
5621 	pdu_data->len = len;
5622 	memcpy(&pdu_data->lldata[0], buf->data, len);
5623 
5624 	if (ll_tx_mem_enqueue(handle, node_tx)) {
5625 		LOG_ERR("Invalid Tx Enqueue");
5626 		ll_tx_mem_release(node_tx);
5627 		return -EINVAL;
5628 	}
5629 
5630 	return 0;
5631 }
5632 #endif /* CONFIG_BT_CONN */
5633 
5634 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
5635 int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
5636 {
5637 	struct bt_hci_iso_data_hdr *iso_data_hdr;
5638 	struct isoal_sdu_tx sdu_frag_tx;
5639 	struct bt_hci_iso_hdr *iso_hdr;
5640 	struct ll_iso_datapath *dp_in;
5641 	struct ll_iso_stream_hdr *hdr;
5642 	uint32_t *time_stamp;
5643 	uint16_t handle;
5644 	uint8_t pb_flag;
5645 	uint8_t ts_flag;
5646 	uint8_t flags;
5647 	uint16_t len;
5648 
5649 	iso_data_hdr = NULL;
5650 	*evt  = NULL;
5651 	hdr   = NULL;
5652 	dp_in = NULL;
5653 
5654 	if (buf->len < sizeof(*iso_hdr)) {
5655 		LOG_ERR("No HCI ISO header");
5656 		return -EINVAL;
5657 	}
5658 
5659 	iso_hdr = net_buf_pull_mem(buf, sizeof(*iso_hdr));
5660 	handle = sys_le16_to_cpu(iso_hdr->handle);
5661 	len = sys_le16_to_cpu(iso_hdr->len);
5662 
5663 	if (buf->len < len) {
5664 		LOG_ERR("Invalid HCI ISO packet length");
5665 		return -EINVAL;
5666 	}
5667 
5668 	/* Assigning flags first because handle will be overwritten */
5669 	flags = bt_iso_flags(handle);
5670 	pb_flag = bt_iso_flags_pb(flags);
5671 	ts_flag = bt_iso_flags_ts(flags);
5672 	handle = bt_iso_handle(handle);
5673 
5674 	/* Extract time stamp */
5675 	/* Set default to current time
5676 	 * BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
5677 	 * 3.1 Time_Offset in framed PDUs :
5678 	 * The Controller transmitting a SDU may use any of the following
5679 	 * methods to determine the value of the SDU reference time:
5680 	 * -- A captured time stamp of the SDU
5681 	 * -- A time stamp provided by the higher layer
5682 	 * -- A computed time stamp based on a sequence counter provided by the
5683 	 *    higher layer (Not implemented)
5684 	 * -- Any other method of determining Time_Offset (Not implemented)
5685 	 */
5686 	if (ts_flag) {
5687 		/* Overwrite time stamp with HCI provided time stamp */
5688 		time_stamp = net_buf_pull_mem(buf, sizeof(*time_stamp));
5689 		len -= sizeof(*time_stamp);
5690 		sdu_frag_tx.time_stamp = sys_le32_to_cpu(*time_stamp);
5691 	} else {
5692 		sdu_frag_tx.time_stamp =
5693 			HAL_TICKER_TICKS_TO_US(ticker_ticks_now_get());
5694 	}
5695 
5696 	/* Extract ISO data header if included (PB_Flag 0b00 or 0b10) */
5697 	if ((pb_flag & 0x01) == 0) {
5698 		iso_data_hdr = net_buf_pull_mem(buf, sizeof(*iso_data_hdr));
5699 		len -= sizeof(*iso_data_hdr);
5700 		sdu_frag_tx.packet_sn = sys_le16_to_cpu(iso_data_hdr->sn);
5701 		sdu_frag_tx.iso_sdu_length = sys_le16_to_cpu(iso_data_hdr->slen);
5702 	} else {
5703 		sdu_frag_tx.packet_sn = 0;
5704 		sdu_frag_tx.iso_sdu_length = 0;
5705 	}
5706 
5707 	/* Packet boudary flags should be bitwise identical to the SDU state
5708 	 * 0b00 BT_ISO_START
5709 	 * 0b01 BT_ISO_CONT
5710 	 * 0b10 BT_ISO_SINGLE
5711 	 * 0b11 BT_ISO_END
5712 	 */
5713 	sdu_frag_tx.sdu_state = pb_flag;
5714 	/* Fill in SDU buffer fields */
5715 	sdu_frag_tx.dbuf = buf->data;
5716 	sdu_frag_tx.size = len;
5717 
5718 	if (false) {
5719 
5720 #if defined(CONFIG_BT_CTLR_CONN_ISO)
5721 	/* Extract source handle from CIS or BIS handle by way of header and
5722 	 * data path
5723 	 */
5724 	} else if (IS_CIS_HANDLE(handle)) {
5725 		struct ll_conn_iso_stream *cis =
5726 			ll_iso_stream_connected_get(handle);
5727 		if (!cis) {
5728 			return -EINVAL;
5729 		}
5730 
5731 		struct ll_conn_iso_group *cig = cis->group;
5732 		uint8_t event_offset;
5733 
5734 		hdr = &(cis->hdr);
5735 
5736 		/* We must ensure sufficient time for ISO-AL to fragment SDU and
5737 		 * deliver PDUs to the TX queue. By checking ull_ref_get, we
5738 		 * know if we are within the subevents of an ISO event. If so,
5739 		 * we can assume that we have enough time to deliver in the next
5740 		 * ISO event. If we're not active within the ISO event, we don't
5741 		 * know if there is enough time to deliver in the next event,
5742 		 * and for safety we set the target to current event + 2.
5743 		 *
5744 		 * For FT > 1, we have the opportunity to retransmit in later
5745 		 * event(s), in which case we have the option to target an
5746 		 * earlier event (this or next) because being late does not
5747 		 * instantly flush the payload.
5748 		 */
5749 
5750 		event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
5751 
5752 		if (cis->lll.tx.ft > 1) {
5753 			/* FT > 1, target an earlier event */
5754 			event_offset -= 1;
5755 		}
5756 
5757 		sdu_frag_tx.target_event = cis->lll.event_count + event_offset;
5758 		sdu_frag_tx.grp_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
5759 						(event_offset * cig->iso_interval *
5760 							ISO_INT_UNIT_US));
5761 
5762 		/* Get controller's input data path for CIS */
5763 		dp_in = hdr->datapath_in;
5764 		if (!dp_in || dp_in->path_id != BT_HCI_DATAPATH_ID_HCI) {
5765 			LOG_ERR("Input data path not set for HCI");
5766 			return -EINVAL;
5767 		}
5768 
5769 		/* Get input data path's source handle */
5770 		isoal_source_handle_t source = dp_in->source_hdl;
5771 
5772 		/* Start Fragmentation */
5773 		isoal_status_t isoal_status =
5774 			isoal_tx_sdu_fragment(source, &sdu_frag_tx);
5775 
5776 		if (isoal_status) {
5777 			if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
5778 				data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
5779 				return -ENOBUFS;
5780 			}
5781 
5782 			return -EINVAL;
5783 		}
5784 
5785 		/* TODO: Assign *evt if an immediate response is required */
5786 		return 0;
5787 #endif /* CONFIG_BT_CTLR_CONN_ISO */
5788 
5789 #if defined(CONFIG_BT_CTLR_ADV_ISO)
5790 	} else if (IS_ADV_ISO_HANDLE(handle)) {
5791 		struct lll_adv_iso_stream *stream;
5792 		struct ll_adv_iso_set *adv_iso;
5793 		struct lll_adv_iso *lll_iso;
5794 		uint16_t stream_handle;
5795 		uint8_t target_event;
5796 		uint8_t event_offset;
5797 		uint16_t slen;
5798 
5799 		/* FIXME: Code only expects header present */
5800 		slen = iso_data_hdr ? iso_data_hdr->slen : 0;
5801 
5802 		/* Check invalid BIS PDU length */
5803 		if (slen > LL_BIS_OCTETS_TX_MAX) {
5804 			LOG_ERR("Invalid HCI ISO Data length");
5805 			return -EINVAL;
5806 		}
5807 
5808 		/* Get BIS stream handle and stream context */
5809 		stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
5810 		stream = ull_adv_iso_stream_get(stream_handle);
5811 		if (!stream || !stream->dp) {
5812 			LOG_ERR("Invalid BIS stream");
5813 			return -EINVAL;
5814 		}
5815 
5816 		adv_iso = ull_adv_iso_by_stream_get(stream_handle);
5817 		if (!adv_iso) {
5818 			LOG_ERR("No BIG associated with stream handle");
5819 			return -EINVAL;
5820 		}
5821 
5822 		/* Determine the target event and the first event offset after
5823 		 * datapath setup.
5824 		 * event_offset mitigates the possibility of first SDU being
5825 		 * late on the datapath and avoid all subsequent SDUs being
5826 		 * dropped for a said SDU interval. i.e. upper layer is not
5827 		 * drifting, say first SDU dropped, hence subsequent SDUs all
5828 		 * dropped, is mitigated by offsetting the grp_ref_point.
5829 		 *
5830 		 * It is ok to do the below for every received ISO data, ISOAL
5831 		 * will not consider subsequent skewed target_event after the
5832 		 * first use of target_event value.
5833 		 *
5834 		 * In BIG implementation in LLL, payload_count corresponds to
5835 		 * the next BIG event, hence calculate grp_ref_point for next
5836 		 * BIG event by incrementing the previous elapsed big_ref_point
5837 		 * by one additional ISO interval.
5838 		 */
5839 		lll_iso = &adv_iso->lll;
5840 		target_event = lll_iso->payload_count / lll_iso->bn;
5841 		event_offset = ull_ref_get(&adv_iso->ull) ? 0U : 1U;
5842 		event_offset += lll_iso->latency_prepare;
5843 
5844 		sdu_frag_tx.target_event = target_event + event_offset;
5845 		sdu_frag_tx.grp_ref_point =
5846 			isoal_get_wrapped_time_us(adv_iso->big_ref_point,
5847 						  ((event_offset + 1U) *
5848 						   lll_iso->iso_interval *
5849 						   ISO_INT_UNIT_US));
5850 
5851 		/* Start Fragmentation */
5852 		/* FIXME: need to ensure ISO-AL returns proper isoal_status.
5853 		 * Currently there are cases where ISO-AL calls LL_ASSERT.
5854 		 */
5855 		isoal_status_t isoal_status =
5856 			isoal_tx_sdu_fragment(stream->dp->source_hdl, &sdu_frag_tx);
5857 
5858 		if (isoal_status) {
5859 			if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
5860 				data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
5861 				return -ENOBUFS;
5862 			}
5863 
5864 			return -EINVAL;
5865 		}
5866 
5867 		return 0;
5868 #endif /* CONFIG_BT_CTLR_ADV_ISO */
5869 
5870 	}
5871 
5872 	return -EINVAL;
5873 }
5874 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
5875 
5876 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
5877 #if defined(CONFIG_BT_CTLR_ADV_EXT)
5878 static void dup_ext_adv_adi_store(struct dup_ext_adv_mode *dup_mode,
5879 				  const struct pdu_adv_adi *adi,
5880 				  uint8_t data_status)
5881 {
5882 	struct dup_ext_adv_set *adv_set;
5883 
5884 	adv_set = &dup_mode->set[dup_mode->set_curr];
5885 
5886 	adv_set->data_cmplt = (data_status ==
5887 			       BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) ?
5888 			      1U : 0U;
5889 
5890 	if (adi) {
5891 		(void)memcpy(&adv_set->adi, adi, sizeof(*adi));
5892 	} else {
5893 		(void)memset(&adv_set->adi, 0U, sizeof(*adi));
5894 	}
5895 
5896 	if (dup_mode->set_count < CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
5897 		dup_mode->set_count++;
5898 		dup_mode->set_curr = dup_mode->set_count;
5899 	} else {
5900 		dup_mode->set_curr++;
5901 	}
5902 
5903 	if (dup_mode->set_curr == CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
5904 		dup_mode->set_curr = 0U;
5905 	}
5906 }
5907 
5908 static void dup_ext_adv_mode_reset(struct dup_ext_adv_mode *dup_adv_mode)
5909 {
5910 	uint8_t adv_mode;
5911 
5912 	for (adv_mode = 0U; adv_mode < DUP_EXT_ADV_MODE_COUNT;
5913 	     adv_mode++) {
5914 		struct dup_ext_adv_mode *dup_mode;
5915 
5916 		dup_mode = &dup_adv_mode[adv_mode];
5917 		dup_mode->set_count = 0U;
5918 		dup_mode->set_curr = 0U;
5919 	}
5920 }
5921 
5922 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
5923 static void dup_ext_adv_reset(void)
5924 {
5925 	for (int32_t i = 0; i < dup_count; i++) {
5926 		struct dup_entry *dup;
5927 
5928 		dup = &dup_filter[i];
5929 		dup->mask = 0U;
5930 		dup_ext_adv_mode_reset(dup->adv_mode);
5931 	}
5932 }
5933 
5934 static void dup_periodic_adv_reset(uint8_t addr_type, const uint8_t *addr,
5935 				   uint8_t sid)
5936 {
5937 	for (int32_t addr_idx = 0; addr_idx < dup_count; addr_idx++) {
5938 		struct dup_ext_adv_mode *dup_mode;
5939 		struct dup_entry *dup;
5940 
5941 		dup = &dup_filter[addr_idx];
5942 		if (memcmp(addr, dup->addr.a.val, sizeof(bt_addr_t)) ||
5943 		    (addr_type != dup->addr.type)) {
5944 			continue;
5945 		}
5946 
5947 		dup_mode = &dup->adv_mode[DUP_EXT_ADV_MODE_PERIODIC];
5948 		for (uint16_t set_idx = 0; set_idx < dup_mode->set_count;
5949 		     set_idx++) {
5950 			struct dup_ext_adv_set *adv_set;
5951 
5952 			adv_set = &dup_mode->set[set_idx];
5953 			if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != sid) {
5954 				continue;
5955 			}
5956 
5957 			/* reset data complete state */
5958 			adv_set->data_cmplt = 0U;
5959 
5960 			return;
5961 		}
5962 
5963 		return;
5964 	}
5965 }
5966 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
5967 #endif /* CONFIG_BT_CTLR_ADV_EXT */
5968 
5969 static inline bool is_dup_or_update(struct dup_entry *dup, uint8_t adv_type,
5970 				    uint8_t adv_mode,
5971 				    const struct pdu_adv_adi *adi,
5972 				    uint8_t data_status)
5973 {
5974 	if (!(dup->mask & BIT(adv_type))) {
5975 		/* report different adv types */
5976 		dup->mask |= BIT(adv_type);
5977 
5978 #if defined(CONFIG_BT_CTLR_ADV_EXT)
5979 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
5980 				      data_status);
5981 
5982 		return false;
5983 	} else if (adv_type != PDU_ADV_TYPE_EXT_IND) {
5984 		/* drop duplicate legacy advertising */
5985 		return true;
5986 	} else if (dup->adv_mode[adv_mode].set_count == 0U) {
5987 		/* report different extended adv mode */
5988 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
5989 				      data_status);
5990 		return false;
5991 	} else if (adi) {
5992 		struct dup_ext_adv_mode *dup_mode;
5993 		uint8_t j;
5994 
5995 		dup_mode = &dup->adv_mode[adv_mode];
5996 		for (j = 0; j < dup_mode->set_count; j++) {
5997 			struct dup_ext_adv_set *adv_set;
5998 
5999 			adv_set = &dup_mode->set[j];
6000 			if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != PDU_ADV_ADI_SID_GET(adi)) {
6001 				continue;
6002 			}
6003 
6004 			if (PDU_ADV_ADI_DID_GET(&adv_set->adi) != PDU_ADV_ADI_DID_GET(adi)) {
6005 				/* report different DID */
6006 				adv_set->adi.did_sid_packed[0] = adi->did_sid_packed[0];
6007 				adv_set->adi.did_sid_packed[1] = adi->did_sid_packed[1];
6008 				/* set new data status */
6009 				if (data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
6010 					adv_set->data_cmplt = 1U;
6011 				} else {
6012 					adv_set->data_cmplt = 0U;
6013 				}
6014 
6015 				return false;
6016 			} else if (!adv_set->data_cmplt &&
6017 				   (data_status ==
6018 				    BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE)) {
6019 				/* report data complete */
6020 				adv_set->data_cmplt = 1U;
6021 				return false;
6022 			} else if (!adv_set->data_cmplt) {
6023 				/* report partial and incomplete data */
6024 				return false;
6025 			}
6026 
6027 			return true;
6028 		}
6029 
6030 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6031 				      data_status);
6032 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6033 
6034 		return false;
6035 	}
6036 
6037 	return true;
6038 }
6039 
6040 static bool dup_found(uint8_t adv_type, uint8_t addr_type, const uint8_t *addr,
6041 		      uint8_t adv_mode, const struct pdu_adv_adi *adi,
6042 		      uint8_t data_status)
6043 {
6044 	/* check for duplicate filtering */
6045 	if (dup_count >= 0) {
6046 		struct dup_entry *dup;
6047 
6048 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6049 		__ASSERT((adv_mode < ARRAY_SIZE(dup_filter[0].adv_mode)),
6050 			 "adv_mode index out-of-bound");
6051 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6052 
6053 		/* find for existing entry and update if changed */
6054 		for (int32_t i = 0; i < dup_count; i++) {
6055 			dup = &dup_filter[i];
6056 			if (memcmp(addr, &dup->addr.a.val[0],
6057 				   sizeof(bt_addr_t)) ||
6058 			    (addr_type != dup->addr.type)) {
6059 				continue;
6060 			}
6061 
6062 			/* still duplicate or update entry with change */
6063 			return is_dup_or_update(dup, adv_type, adv_mode, adi,
6064 						data_status);
6065 		}
6066 
6067 		/* insert into the duplicate filter */
6068 		dup = &dup_filter[dup_curr];
6069 		(void)memcpy(&dup->addr.a.val[0], addr, sizeof(bt_addr_t));
6070 		dup->addr.type = addr_type;
6071 		dup->mask = BIT(adv_type);
6072 
6073 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6074 		dup_ext_adv_mode_reset(dup->adv_mode);
6075 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6076 				      data_status);
6077 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6078 
6079 		if (dup_count < CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6080 			dup_count++;
6081 			dup_curr = dup_count;
6082 		} else {
6083 			dup_curr++;
6084 		}
6085 
6086 		if (dup_curr == CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6087 			dup_curr = 0U;
6088 		}
6089 	}
6090 
6091 	return false;
6092 }
6093 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6094 
6095 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6096 static inline void le_dir_adv_report(struct pdu_adv *adv, struct net_buf *buf,
6097 				     int8_t rssi, uint8_t rl_idx)
6098 {
6099 	struct bt_hci_evt_le_direct_adv_report *drp;
6100 	struct bt_hci_evt_le_direct_adv_info *dir_info;
6101 
6102 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6103 	    !(le_event_mask & BT_EVT_MASK_LE_DIRECT_ADV_REPORT)) {
6104 		return;
6105 	}
6106 
6107 	LL_ASSERT(adv->type == PDU_ADV_TYPE_DIRECT_IND);
6108 
6109 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6110 	if (dup_scan &&
6111 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6112 		return;
6113 	}
6114 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6115 
6116 	drp = meta_evt(buf, BT_HCI_EVT_LE_DIRECT_ADV_REPORT,
6117 		       sizeof(*drp) + sizeof(*dir_info));
6118 
6119 	drp->num_reports = 1U;
6120 	dir_info = (void *)(((uint8_t *)drp) + sizeof(*drp));
6121 
6122 	/* Directed Advertising */
6123 	dir_info->evt_type = BT_HCI_ADV_DIRECT_IND;
6124 
6125 #if defined(CONFIG_BT_CTLR_PRIVACY)
6126 	if (rl_idx < ll_rl_size_get()) {
6127 		/* Store identity address */
6128 		ll_rl_id_addr_get(rl_idx, &dir_info->addr.type,
6129 				  &dir_info->addr.a.val[0]);
6130 		/* Mark it as identity address from RPA (0x02, 0x03) */
6131 		dir_info->addr.type += 2U;
6132 	} else {
6133 #else
6134 	if (1) {
6135 #endif /* CONFIG_BT_CTLR_PRIVACY */
6136 		dir_info->addr.type = adv->tx_addr;
6137 		memcpy(&dir_info->addr.a.val[0], &adv->direct_ind.adv_addr[0],
6138 		       sizeof(bt_addr_t));
6139 	}
6140 
6141 	dir_info->dir_addr.type = adv->rx_addr;
6142 	memcpy(&dir_info->dir_addr.a.val[0],
6143 	       &adv->direct_ind.tgt_addr[0], sizeof(bt_addr_t));
6144 
6145 	dir_info->rssi = rssi;
6146 }
6147 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6148 
6149 #if defined(CONFIG_BT_OBSERVER)
6150 #if defined(CONFIG_BT_HCI_MESH_EXT)
6151 static inline bool scan_filter_apply(uint8_t filter, uint8_t *data, uint8_t len)
6152 {
6153 	struct scan_filter *f = &scan_filters[filter];
6154 
6155 	/* No patterns means filter out all advertising packets */
6156 	for (uint8_t i = 0; i < f->count; i++) {
6157 		/* Require at least the length of the pattern */
6158 		if (len >= f->lengths[i] &&
6159 		    !memcmp(data, f->patterns[i], f->lengths[i])) {
6160 			return true;
6161 		}
6162 	}
6163 
6164 	return false;
6165 }
6166 
6167 static inline void le_mesh_scan_report(struct pdu_adv *adv,
6168 				       struct node_rx_pdu *node_rx,
6169 				       struct net_buf *buf, int8_t rssi)
6170 {
6171 	uint8_t data_len = (adv->len - BDADDR_SIZE);
6172 	struct bt_hci_evt_mesh_scanning_report *mep;
6173 	struct bt_hci_evt_mesh_scan_report *sr;
6174 	uint32_t instant;
6175 	uint8_t chan;
6176 
6177 	LL_ASSERT(adv->type == PDU_ADV_TYPE_NONCONN_IND);
6178 
6179 	/* Filter based on currently active Scan Filter */
6180 	if (sf_curr < ARRAY_SIZE(scan_filters) &&
6181 	    !scan_filter_apply(sf_curr, &adv->adv_ind.data[0], data_len)) {
6182 		/* Drop the report */
6183 		return;
6184 	}
6185 
6186 	chan = node_rx->hdr.rx_ftr.chan;
6187 	instant = node_rx->hdr.rx_ftr.anchor_ticks;
6188 
6189 	mep = mesh_evt(buf, BT_HCI_EVT_MESH_SCANNING_REPORT,
6190 			    sizeof(*mep) + sizeof(*sr));
6191 
6192 	mep->num_reports = 1U;
6193 	sr = (void *)(((uint8_t *)mep) + sizeof(*mep));
6194 	sr->addr.type = adv->tx_addr;
6195 	memcpy(&sr->addr.a.val[0], &adv->adv_ind.addr[0], sizeof(bt_addr_t));
6196 	sr->chan = chan;
6197 	sr->rssi = rssi;
6198 	sys_put_le32(instant, (uint8_t *)&sr->instant);
6199 
6200 	sr->data_len = data_len;
6201 	memcpy(&sr->data[0], &adv->adv_ind.data[0], data_len);
6202 }
6203 #endif /* CONFIG_BT_HCI_MESH_EXT */
6204 
6205 static void le_advertising_report(struct pdu_data *pdu_data,
6206 				  struct node_rx_pdu *node_rx,
6207 				  struct net_buf *buf)
6208 {
6209 	const uint8_t c_adv_type[] = { 0x00, 0x01, 0x03, 0xff, 0x04,
6210 				    0xff, 0x02 };
6211 	struct bt_hci_evt_le_advertising_report *sep;
6212 	struct pdu_adv *adv = (void *)pdu_data;
6213 	struct bt_hci_evt_le_advertising_info *adv_info;
6214 	uint8_t data_len;
6215 	uint8_t info_len;
6216 	int8_t rssi;
6217 #if defined(CONFIG_BT_CTLR_PRIVACY)
6218 	uint8_t rl_idx;
6219 #endif /* CONFIG_BT_CTLR_PRIVACY */
6220 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6221 	uint8_t direct_report;
6222 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6223 	int8_t *prssi;
6224 
6225 	rssi = -(node_rx->hdr.rx_ftr.rssi);
6226 #if defined(CONFIG_BT_CTLR_PRIVACY)
6227 	rl_idx = node_rx->hdr.rx_ftr.rl_idx;
6228 #endif /* CONFIG_BT_CTLR_PRIVACY */
6229 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6230 	direct_report = node_rx->hdr.rx_ftr.direct;
6231 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6232 
6233 #if defined(CONFIG_BT_CTLR_PRIVACY)
6234 	if (adv->tx_addr) {
6235 		/* Update current RPA */
6236 		ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6237 	}
6238 #endif /* CONFIG_BT_CTLR_PRIVACY */
6239 
6240 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6241 	if (direct_report) {
6242 #if defined(CONFIG_BT_CTLR_PRIVACY)
6243 		le_dir_adv_report(adv, buf, rssi, rl_idx);
6244 #else
6245 		le_dir_adv_report(adv, buf, rssi, 0xFF);
6246 #endif /* CONFIG_BT_CTLR_PRIVACY */
6247 		return;
6248 	}
6249 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6250 
6251 #if defined(CONFIG_BT_HCI_MESH_EXT)
6252 	if (node_rx->hdr.type == NODE_RX_TYPE_MESH_REPORT) {
6253 		le_mesh_scan_report(adv, node_rx, buf, rssi);
6254 		return;
6255 	}
6256 #endif /* CONFIG_BT_HCI_MESH_EXT */
6257 
6258 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6259 	    !(le_event_mask & BT_EVT_MASK_LE_ADVERTISING_REPORT)) {
6260 		return;
6261 	}
6262 
6263 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6264 	if (dup_scan &&
6265 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6266 		return;
6267 	}
6268 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6269 
6270 	if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6271 		data_len = (adv->len - BDADDR_SIZE);
6272 	} else {
6273 		data_len = 0U;
6274 	}
6275 	info_len = sizeof(struct bt_hci_evt_le_advertising_info) + data_len +
6276 		   sizeof(*prssi);
6277 	sep = meta_evt(buf, BT_HCI_EVT_LE_ADVERTISING_REPORT,
6278 		       sizeof(*sep) + info_len);
6279 
6280 	sep->num_reports = 1U;
6281 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6282 
6283 	adv_info->evt_type = c_adv_type[adv->type];
6284 
6285 #if defined(CONFIG_BT_CTLR_PRIVACY)
6286 	if (rl_idx < ll_rl_size_get()) {
6287 		/* Store identity address */
6288 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6289 				  &adv_info->addr.a.val[0]);
6290 		/* Mark it as identity address from RPA (0x02, 0x03) */
6291 		adv_info->addr.type += 2U;
6292 	} else {
6293 #else
6294 	if (1) {
6295 #endif /* CONFIG_BT_CTLR_PRIVACY */
6296 
6297 		adv_info->addr.type = adv->tx_addr;
6298 		memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6299 		       sizeof(bt_addr_t));
6300 	}
6301 
6302 	adv_info->length = data_len;
6303 	memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6304 	/* RSSI */
6305 	prssi = &adv_info->data[0] + data_len;
6306 	*prssi = rssi;
6307 }
6308 
6309 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6310 static void le_ext_adv_legacy_report(struct pdu_data *pdu_data,
6311 				     struct node_rx_pdu *node_rx,
6312 				     struct net_buf *buf)
6313 {
6314 	/* Lookup event type based on pdu_adv_type set by LLL */
6315 	const uint8_t evt_type_lookup[] = {
6316 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6317 		 BT_HCI_LE_ADV_EVT_TYPE_CONN),   /* ADV_IND */
6318 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_DIRECT |
6319 		 BT_HCI_LE_ADV_EVT_TYPE_CONN),   /* DIRECT_IND */
6320 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY), /* NONCONN_IND */
6321 		0xff,                            /* Invalid index lookup */
6322 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6323 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6324 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN),   /* SCAN_RSP to an ADV_SCAN_IND
6325 						  */
6326 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6327 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6328 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6329 		 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* SCAN_RSP to an ADV_IND,
6330 						* NOTE: LLL explicitly sets
6331 						* adv_type to
6332 						* PDU_ADV_TYPE_ADV_IND_SCAN_RSP
6333 						*/
6334 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6335 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN)    /* SCAN_IND */
6336 	};
6337 	struct bt_hci_evt_le_ext_advertising_info *adv_info;
6338 	struct bt_hci_evt_le_ext_advertising_report *sep;
6339 	struct pdu_adv *adv = (void *)pdu_data;
6340 	uint8_t data_len;
6341 	uint8_t info_len;
6342 	int8_t rssi;
6343 
6344 #if defined(CONFIG_BT_CTLR_PRIVACY)
6345 	uint8_t rl_idx;
6346 #endif /* CONFIG_BT_CTLR_PRIVACY */
6347 
6348 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6349 	    !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
6350 		return;
6351 	}
6352 
6353 	/* The Link Layer currently returns RSSI as an absolute value */
6354 	rssi = -(node_rx->hdr.rx_ftr.rssi);
6355 
6356 #if defined(CONFIG_BT_CTLR_PRIVACY)
6357 	rl_idx = node_rx->hdr.rx_ftr.rl_idx;
6358 #endif /* CONFIG_BT_CTLR_PRIVACY */
6359 
6360 #if defined(CONFIG_BT_CTLR_PRIVACY)
6361 	if (adv->tx_addr) {
6362 		/* Update current RPA */
6363 		ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6364 	}
6365 #endif /* CONFIG_BT_CTLR_PRIVACY */
6366 
6367 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6368 	if (dup_scan &&
6369 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6370 		return;
6371 	}
6372 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6373 
6374 	if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6375 		data_len = (adv->len - BDADDR_SIZE);
6376 	} else {
6377 		data_len = 0U;
6378 	}
6379 
6380 	info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6381 		   data_len;
6382 	sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6383 		       sizeof(*sep) + info_len);
6384 
6385 	sep->num_reports = 1U;
6386 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6387 
6388 	adv_info->evt_type = evt_type_lookup[adv->type];
6389 
6390 #if defined(CONFIG_BT_CTLR_PRIVACY)
6391 	if (rl_idx < ll_rl_size_get()) {
6392 		/* Store identity address */
6393 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6394 				  &adv_info->addr.a.val[0]);
6395 		/* Mark it as identity address from RPA (0x02, 0x03) */
6396 		adv_info->addr.type += 2U;
6397 	} else
6398 #endif /* CONFIG_BT_CTLR_PRIVACY */
6399 	{
6400 		adv_info->addr.type = adv->tx_addr;
6401 		memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6402 		       sizeof(bt_addr_t));
6403 	}
6404 
6405 	adv_info->prim_phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
6406 	adv_info->sec_phy = 0U;
6407 	adv_info->sid = 0xff;
6408 	adv_info->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6409 	adv_info->rssi = rssi;
6410 	adv_info->interval = 0U;
6411 
6412 	if (adv->type == PDU_ADV_TYPE_DIRECT_IND) {
6413 		adv_info->direct_addr.type = adv->rx_addr;
6414 		bt_addr_copy(&adv_info->direct_addr.a,
6415 			     (void *)adv->direct_ind.tgt_addr);
6416 	} else {
6417 		adv_info->direct_addr.type = 0U;
6418 		(void)memset(adv_info->direct_addr.a.val, 0U,
6419 			     sizeof(adv_info->direct_addr.a.val));
6420 	}
6421 
6422 	adv_info->length = data_len;
6423 	memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6424 }
6425 
6426 static uint8_t ext_adv_direct_addr_type(struct lll_scan *lll,
6427 					bool peer_resolved, bool direct_report,
6428 					uint8_t rx_addr_type,
6429 					const uint8_t *const rx_addr)
6430 {
6431 	/* The directed address is resolvable private address, but Controller
6432 	 * could not resolve it.
6433 	 */
6434 	if (direct_report) {
6435 		return BT_ADDR_LE_UNRESOLVED;
6436 	}
6437 
6438 	if (0) {
6439 #if defined(CONFIG_BT_CTLR_PRIVACY)
6440 	/* Peer directed advertiser's address was resolved */
6441 	} else if (peer_resolved) {
6442 		struct ll_scan_set *scan;
6443 
6444 		scan = HDR_LLL2ULL(lll);
6445 		if ((rx_addr_type == lll->init_addr_type) &&
6446 		    !memcmp(lll->init_addr, rx_addr, BDADDR_SIZE)) {
6447 			/* Peer directed advertiser used local scanner's
6448 			 * initiator address.
6449 			 */
6450 			return scan->own_addr_type;
6451 		}
6452 
6453 		/* Peer directed advertiser used directed resolvable
6454 		 * private address generated from the local scanner's
6455 		 * Identity Resolution Key.
6456 		 */
6457 		return scan->own_addr_type | BIT(1);
6458 #endif /* CONFIG_BT_CTLR_PRIVACY */
6459 	} else {
6460 		struct ll_scan_set *scan;
6461 
6462 		scan = HDR_LLL2ULL(lll);
6463 
6464 		/* Peer directed advertiser used local scanner's
6465 		 * initiator address.
6466 		 */
6467 		return scan->own_addr_type;
6468 	}
6469 }
6470 
6471 static uint8_t ext_adv_data_get(const struct node_rx_pdu *node_rx_data,
6472 				uint8_t *const sec_phy, int8_t *const tx_pwr,
6473 				const uint8_t **const data)
6474 {
6475 	const struct pdu_adv *adv = (void *)node_rx_data->pdu;
6476 	const struct pdu_adv_com_ext_adv *p;
6477 	const struct pdu_adv_ext_hdr *h;
6478 	uint8_t hdr_buf_len;
6479 	const uint8_t *ptr;
6480 	uint8_t hdr_len;
6481 
6482 	*tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6483 
6484 	p = (void *)&adv->adv_ext_ind;
6485 	h = (void *)p->ext_hdr_adv_data;
6486 	ptr = (void *)h;
6487 
6488 	if (!p->ext_hdr_len) {
6489 		hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
6490 
6491 		goto no_ext_hdr;
6492 	}
6493 
6494 	ptr = h->data;
6495 
6496 	if (h->adv_addr) {
6497 		ptr += BDADDR_SIZE;
6498 	}
6499 
6500 	if (h->tgt_addr) {
6501 		ptr += BDADDR_SIZE;
6502 	}
6503 
6504 	if (h->adi) {
6505 		ptr += sizeof(struct pdu_adv_adi);
6506 	}
6507 
6508 	if (h->aux_ptr) {
6509 		struct pdu_adv_aux_ptr *aux_ptr;
6510 
6511 		aux_ptr = (void *)ptr;
6512 		ptr += sizeof(*aux_ptr);
6513 
6514 		*sec_phy = HCI_AUX_PHY_TO_HCI_PHY(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
6515 	}
6516 
6517 	if (h->sync_info) {
6518 		ptr += sizeof(struct pdu_adv_sync_info);
6519 	}
6520 
6521 	if (h->tx_pwr) {
6522 		*tx_pwr = *(int8_t *)ptr;
6523 		ptr++;
6524 	}
6525 
6526 	hdr_len = ptr - (uint8_t *)p;
6527 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
6528 	if (hdr_len < hdr_buf_len) {
6529 		uint8_t acad_len = hdr_buf_len - hdr_len;
6530 
6531 		ptr += acad_len;
6532 		hdr_len += acad_len;
6533 	}
6534 
6535 no_ext_hdr:
6536 	if (hdr_len < adv->len) {
6537 		*data = ptr;
6538 
6539 		return adv->len - hdr_len;
6540 	}
6541 
6542 	return 0;
6543 }
6544 
6545 static void node_rx_extra_list_release(struct node_rx_pdu *node_rx_extra)
6546 {
6547 	while (node_rx_extra) {
6548 		struct node_rx_pdu *node_rx_curr;
6549 
6550 		node_rx_curr = node_rx_extra;
6551 		node_rx_extra = node_rx_curr->hdr.rx_ftr.extra;
6552 
6553 		node_rx_curr->hdr.next = NULL;
6554 		ll_rx_mem_release((void **)&node_rx_curr);
6555 	}
6556 }
6557 
6558 static void ext_adv_info_fill(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6559 			      uint8_t adv_addr_type, const uint8_t *adv_addr,
6560 			      uint8_t direct_addr_type,
6561 			      const uint8_t *direct_addr, uint8_t rl_idx,
6562 			      int8_t tx_pwr, int8_t rssi,
6563 			      uint16_t interval_le16,
6564 			      const struct pdu_adv_adi *adi, uint8_t data_len,
6565 			      const uint8_t *data, struct net_buf *buf)
6566 {
6567 	struct bt_hci_evt_le_ext_advertising_info *adv_info;
6568 	struct bt_hci_evt_le_ext_advertising_report *sep;
6569 	uint8_t info_len;
6570 
6571 	info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6572 		   data_len;
6573 	sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6574 		       sizeof(*sep) + info_len);
6575 
6576 	sep->num_reports = 1U;
6577 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6578 
6579 	adv_info->evt_type = evt_type;
6580 
6581 	if (0) {
6582 #if defined(CONFIG_BT_CTLR_PRIVACY)
6583 	} else if (rl_idx < ll_rl_size_get()) {
6584 		/* Store identity address */
6585 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6586 				  adv_info->addr.a.val);
6587 		/* Mark it as identity address from RPA (0x02, 0x03) */
6588 		adv_info->addr.type += 2U;
6589 #else /* !CONFIG_BT_CTLR_PRIVACY */
6590 		ARG_UNUSED(rl_idx);
6591 #endif /* !CONFIG_BT_CTLR_PRIVACY */
6592 	} else if (adv_addr) {
6593 		adv_info->addr.type = adv_addr_type;
6594 		(void)memcpy(adv_info->addr.a.val, adv_addr, sizeof(bt_addr_t));
6595 	} else {
6596 		adv_info->addr.type = 0U;
6597 		(void)memset(adv_info->addr.a.val, 0, sizeof(bt_addr_t));
6598 	}
6599 
6600 	adv_info->prim_phy = find_lsb_set(phy);
6601 	adv_info->sec_phy = sec_phy;
6602 	adv_info->sid = (adi) ? PDU_ADV_ADI_SID_GET(adi) : BT_HCI_LE_EXT_ADV_SID_INVALID;
6603 	adv_info->tx_power = tx_pwr;
6604 	adv_info->rssi = rssi;
6605 	adv_info->interval = interval_le16;
6606 
6607 	if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_DIRECT) {
6608 		adv_info->direct_addr.type = direct_addr_type;
6609 		(void)memcpy(adv_info->direct_addr.a.val, direct_addr,
6610 			     sizeof(bt_addr_t));
6611 	} else {
6612 		adv_info->direct_addr.type = 0U;
6613 		(void)memset(adv_info->direct_addr.a.val, 0, sizeof(bt_addr_t));
6614 	}
6615 
6616 	adv_info->length = data_len;
6617 	(void)memcpy(adv_info->data, data, data_len);
6618 }
6619 
6620 static void ext_adv_pdu_frag(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6621 			     uint8_t adv_addr_type, const uint8_t *adv_addr,
6622 			     uint8_t direct_addr_type,
6623 			     const uint8_t *direct_addr, uint8_t rl_idx,
6624 			     int8_t tx_pwr, int8_t rssi, uint16_t interval_le16,
6625 			     const struct pdu_adv_adi *adi,
6626 			     uint8_t data_len_max,
6627 			     uint16_t *const data_len_total,
6628 			     uint8_t *const data_len,
6629 			     const uint8_t **const data, struct net_buf *buf,
6630 			     struct net_buf **const evt_buf)
6631 {
6632 	const uint8_t data_len_frag = MIN(*data_len, data_len_max);
6633 
6634 	do {
6635 		/* Prepare a fragment of PDU data in a HCI event */
6636 		ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type,
6637 				  adv_addr, direct_addr_type, direct_addr,
6638 				  rl_idx, tx_pwr, rssi, interval_le16, adi,
6639 				  data_len_frag, *data, *evt_buf);
6640 
6641 		*data += data_len_frag;
6642 		*data_len -= data_len_frag;
6643 		*data_len_total -= data_len_frag;
6644 
6645 		*evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
6646 		LL_ASSERT(*evt_buf);
6647 
6648 		net_buf_frag_add(buf, *evt_buf);
6649 
6650 		/* Continue to fragment until last partial PDU data fragment,
6651 		 * remainder PDU data's HCI event will be prepare by caller.
6652 		 */
6653 	} while (*data_len > data_len_max);
6654 }
6655 
6656 static void ext_adv_data_frag(const struct node_rx_pdu *node_rx_data,
6657 			      uint8_t evt_type, uint8_t phy,
6658 			      uint8_t *const sec_phy, uint8_t adv_addr_type,
6659 			      const uint8_t *adv_addr, uint8_t direct_addr_type,
6660 			      const uint8_t *direct_addr, uint8_t rl_idx,
6661 			      int8_t *const tx_pwr, int8_t rssi,
6662 			      uint16_t interval_le16,
6663 			      const struct pdu_adv_adi *adi,
6664 			      uint8_t data_len_max, uint16_t data_len_total,
6665 			      uint8_t *const data_len,
6666 			      const uint8_t **const data, struct net_buf *buf,
6667 			      struct net_buf **const evt_buf)
6668 {
6669 	evt_type |= (BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL << 5);
6670 
6671 	do {
6672 		/* Fragment the PDU data */
6673 		ext_adv_pdu_frag(evt_type, phy, *sec_phy, adv_addr_type,
6674 				 adv_addr, direct_addr_type, direct_addr,
6675 				 rl_idx, *tx_pwr, rssi, interval_le16, adi,
6676 				 data_len_max, &data_len_total, data_len,
6677 				 data, buf, evt_buf);
6678 
6679 		/* Check if more PDUs in the list */
6680 		node_rx_data = node_rx_data->hdr.rx_ftr.extra;
6681 		if (node_rx_data) {
6682 			if (*data_len >= data_len_total) {
6683 				/* Last fragment restricted to maximum scan
6684 				 * data length, caller will prepare the last
6685 				 * HCI fragment event.
6686 				 */
6687 				break;
6688 			} else if (*data_len) {
6689 				/* Last fragment of current PDU data */
6690 				ext_adv_pdu_frag(evt_type, phy, *sec_phy,
6691 						 adv_addr_type, adv_addr,
6692 						 direct_addr_type, direct_addr,
6693 						 rl_idx, *tx_pwr, rssi,
6694 						 interval_le16, adi,
6695 						 data_len_max, &data_len_total,
6696 						 data_len, data, buf, evt_buf);
6697 			}
6698 
6699 			/* Get next PDU data in list */
6700 			*data_len = ext_adv_data_get(node_rx_data, sec_phy,
6701 						     tx_pwr, data);
6702 
6703 			/* Restrict PDU data to maximum scan data length */
6704 			if (*data_len > data_len_total) {
6705 				*data_len = data_len_total;
6706 			}
6707 		}
6708 
6709 		/* Continue to fragment if current PDU data length less than
6710 		 * total data length or current PDU data length greater than
6711 		 * HCI event max length.
6712 		 */
6713 	} while ((*data_len < data_len_total) || (*data_len > data_len_max));
6714 }
6715 
6716 static void le_ext_adv_report(struct pdu_data *pdu_data,
6717 			      struct node_rx_pdu *node_rx,
6718 			      struct net_buf *buf, uint8_t phy)
6719 {
6720 	int8_t scan_rsp_tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6721 	int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6722 	struct node_rx_pdu *node_rx_scan_data = NULL;
6723 	struct node_rx_pdu *node_rx_data = NULL;
6724 	const struct pdu_adv_adi *adi = NULL;
6725 	uint16_t scan_data_len_total = 0U;
6726 	struct node_rx_pdu *node_rx_curr;
6727 	struct node_rx_pdu *node_rx_next;
6728 	const uint8_t *scan_data = NULL;
6729 	uint8_t scan_data_status = 0U;
6730 	uint8_t direct_addr_type = 0U;
6731 	uint16_t data_len_total = 0U;
6732 	uint8_t *direct_addr = NULL;
6733 	uint16_t interval_le16 = 0U;
6734 	const uint8_t *data = NULL;
6735 	uint8_t scan_data_len = 0U;
6736 	uint8_t adv_addr_type = 0U;
6737 	uint8_t sec_phy_scan = 0U;
6738 	uint8_t *adv_addr = NULL;
6739 	uint8_t data_status = 0U;
6740 	struct net_buf *evt_buf;
6741 	bool devmatch = false;
6742 	uint8_t data_len = 0U;
6743 	uint8_t evt_type = 0U;
6744 	uint8_t sec_phy = 0U;
6745 	uint8_t data_len_max;
6746 	uint8_t rl_idx = 0U;
6747 	struct pdu_adv *adv;
6748 	int8_t rssi;
6749 
6750 	/* NOTE: This function uses a lot of initializers before the check and
6751 	 * return below, as an exception to initializing close to their locality
6752 	 * of reference. This is acceptable as the return is unlikely in typical
6753 	 * Controller use.
6754 	 */
6755 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6756 	    !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
6757 		node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
6758 		return;
6759 	}
6760 
6761 #if defined(CONFIG_BT_CTLR_PRIVACY)
6762 	rl_idx = ll_rl_size_get();
6763 #endif /* CONFIG_BT_CTLR_PRIVACY */
6764 
6765 	adv = (void *)pdu_data;
6766 	node_rx_curr = node_rx;
6767 	node_rx_next = node_rx_curr->hdr.rx_ftr.extra;
6768 	do {
6769 		int8_t tx_pwr_curr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6770 		struct pdu_adv_adi *adi_curr = NULL;
6771 		uint8_t direct_addr_type_curr = 0U;
6772 		bool direct_resolved_curr = false;
6773 		uint8_t *direct_addr_curr = NULL;
6774 		uint8_t adv_addr_type_curr = 0U;
6775 		struct pdu_adv_com_ext_adv *p;
6776 		uint8_t *adv_addr_curr = NULL;
6777 		uint8_t data_len_curr = 0U;
6778 		uint8_t *data_curr = NULL;
6779 		struct pdu_adv_ext_hdr *h;
6780 		uint8_t sec_phy_curr = 0U;
6781 		uint8_t evt_type_curr;
6782 		uint8_t hdr_buf_len;
6783 		uint8_t hdr_len;
6784 		uint8_t *ptr;
6785 
6786 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6787 		bool direct_report_curr = node_rx_curr->hdr.rx_ftr.direct;
6788 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6789 
6790 #if defined(CONFIG_BT_CTLR_PRIVACY)
6791 		uint8_t rl_idx_curr = node_rx_curr->hdr.rx_ftr.rl_idx;
6792 
6793 		direct_resolved_curr = node_rx_curr->hdr.rx_ftr.direct_resolved;
6794 #endif /* CONFIG_BT_CTLR_PRIVACY */
6795 
6796 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
6797 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
6798 		const bool devmatch_curr = node_rx_curr->hdr.rx_ftr.devmatch;
6799 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
6800 
6801 		/* The Link Layer currently returns RSSI as an absolute value */
6802 		rssi = -(node_rx_curr->hdr.rx_ftr.rssi);
6803 
6804 		LOG_DBG("phy= 0x%x, type= 0x%x, len= %u, tat= %u, rat= %u,"
6805 		       " rssi=%d dB", phy, adv->type, adv->len, adv->tx_addr,
6806 		       adv->rx_addr, rssi);
6807 
6808 		p = (void *)&adv->adv_ext_ind;
6809 		h = (void *)p->ext_hdr_adv_data;
6810 		ptr = (void *)h;
6811 
6812 		LOG_DBG("    Ext. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
6813 
6814 		evt_type_curr = p->adv_mode;
6815 
6816 		if (!p->ext_hdr_len) {
6817 			hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
6818 
6819 			goto no_ext_hdr;
6820 		}
6821 
6822 		ptr = h->data;
6823 
6824 		if (h->adv_addr) {
6825 			bt_addr_le_t addr;
6826 
6827 			adv_addr_type_curr = adv->tx_addr;
6828 			adv_addr_curr = ptr;
6829 
6830 			addr.type = adv->tx_addr;
6831 			(void)memcpy(addr.a.val, ptr, sizeof(bt_addr_t));
6832 			ptr += BDADDR_SIZE;
6833 
6834 			LOG_DBG("    AdvA: %s", bt_addr_le_str(&addr));
6835 		}
6836 
6837 		if (h->tgt_addr) {
6838 			struct lll_scan *lll;
6839 			bt_addr_le_t addr;
6840 
6841 			lll = node_rx->hdr.rx_ftr.param;
6842 
6843 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6844 			direct_addr_type_curr =
6845 				ext_adv_direct_addr_type(lll,
6846 							 direct_resolved_curr,
6847 							 direct_report_curr,
6848 							 adv->rx_addr, ptr);
6849 #else /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
6850 			direct_addr_type_curr =
6851 				ext_adv_direct_addr_type(lll,
6852 							 direct_resolved_curr,
6853 							 false, adv->rx_addr,
6854 							 ptr);
6855 #endif /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
6856 
6857 			direct_addr_curr = ptr;
6858 			ptr += BDADDR_SIZE;
6859 
6860 			addr.type = adv->rx_addr;
6861 			(void)memcpy(addr.a.val, direct_addr_curr,
6862 				     sizeof(bt_addr_t));
6863 
6864 			LOG_DBG("    TgtA: %s", bt_addr_le_str(&addr));
6865 		}
6866 
6867 		if (h->adi) {
6868 			adi_curr = (void *)ptr;
6869 
6870 			ptr += sizeof(*adi);
6871 
6872 			LOG_DBG("    AdvDataInfo DID = 0x%x, SID = 0x%x",
6873 				PDU_ADV_ADI_DID_GET(adi_curr), PDU_ADV_ADI_SID_GET(adi_curr));
6874 		}
6875 
6876 		if (h->aux_ptr) {
6877 			struct pdu_adv_aux_ptr *aux_ptr;
6878 			uint8_t aux_phy;
6879 
6880 			aux_ptr = (void *)ptr;
6881 			if (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
6882 				struct node_rx_ftr *ftr;
6883 
6884 				ftr = &node_rx->hdr.rx_ftr;
6885 				node_rx_extra_list_release(ftr->extra);
6886 				return;
6887 			}
6888 
6889 			ptr += sizeof(*aux_ptr);
6890 
6891 			sec_phy_curr = HCI_AUX_PHY_TO_HCI_PHY(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
6892 
6893 			aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
6894 
6895 			LOG_DBG("    AuxPtr chan_idx = %u, ca = %u, offs_units "
6896 			       "= %u offs = 0x%x, phy = 0x%x",
6897 			       aux_ptr->chan_idx, aux_ptr->ca,
6898 			       aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr), aux_phy);
6899 		}
6900 
6901 		if (h->sync_info) {
6902 			struct pdu_adv_sync_info *si;
6903 
6904 			si = (void *)ptr;
6905 			ptr += sizeof(*si);
6906 
6907 			interval_le16 = si->interval;
6908 
6909 			LOG_DBG("    SyncInfo offs = %u, offs_unit = 0x%x, "
6910 			       "interval = 0x%x, sca = 0x%x, "
6911 			       "chan map = 0x%x 0x%x 0x%x 0x%x 0x%x, "
6912 			       "AA = 0x%x, CRC = 0x%x 0x%x 0x%x, "
6913 			       "evt cntr = 0x%x",
6914 			       sys_le16_to_cpu(si->offs),
6915 			       si->offs_units,
6916 			       sys_le16_to_cpu(si->interval),
6917 			       ((si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
6918 				 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
6919 				PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS),
6920 			       si->sca_chm[0], si->sca_chm[1], si->sca_chm[2],
6921 			       si->sca_chm[3],
6922 			       (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
6923 				~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK),
6924 			       sys_le32_to_cpu(si->aa),
6925 			       si->crc_init[0], si->crc_init[1],
6926 			       si->crc_init[2], sys_le16_to_cpu(si->evt_cntr));
6927 		}
6928 
6929 		if (h->tx_pwr) {
6930 			tx_pwr_curr = *(int8_t *)ptr;
6931 			ptr++;
6932 
6933 			LOG_DBG("    Tx pwr= %d dB", tx_pwr_curr);
6934 		}
6935 
6936 		hdr_len = ptr - (uint8_t *)p;
6937 		hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
6938 		if (hdr_len > hdr_buf_len) {
6939 			LOG_WRN("    Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
6940 		} else {
6941 			uint8_t acad_len = hdr_buf_len - hdr_len;
6942 
6943 			if (acad_len) {
6944 				ptr += acad_len;
6945 				hdr_len += acad_len;
6946 			}
6947 		}
6948 
6949 no_ext_hdr:
6950 		if (hdr_len < adv->len) {
6951 			data_len_curr = adv->len - hdr_len;
6952 			data_curr = ptr;
6953 
6954 			LOG_DBG("    AD Data (%u): <todo>", data_len);
6955 		}
6956 
6957 		if (node_rx_curr == node_rx) {
6958 			evt_type = evt_type_curr;
6959 			adv_addr_type = adv_addr_type_curr;
6960 			adv_addr = adv_addr_curr;
6961 			direct_addr_type = direct_addr_type_curr;
6962 			direct_addr = direct_addr_curr;
6963 			adi = adi_curr;
6964 			sec_phy = sec_phy_curr;
6965 			node_rx_data = node_rx_curr;
6966 			data_len = data_len_curr;
6967 			data_len_total = data_len;
6968 			data = data_curr;
6969 			scan_data_len_total = 0U;
6970 			tx_pwr = tx_pwr_curr;
6971 
6972 #if defined(CONFIG_BT_CTLR_PRIVACY)
6973 			rl_idx = rl_idx_curr;
6974 #endif /* CONFIG_BT_CTLR_PRIVACY */
6975 
6976 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
6977 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
6978 			devmatch = devmatch_curr;
6979 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
6980 
6981 		} else {
6982 			/* TODO: Validate current value with previous */
6983 
6984 			/* Detect the scan response in the list of node_rx */
6985 			if (node_rx_curr->hdr.rx_ftr.scan_rsp) {
6986 				node_rx_scan_data = node_rx_curr;
6987 				if (sec_phy_curr) {
6988 					sec_phy_scan = sec_phy_curr;
6989 				} else {
6990 					sec_phy_scan = sec_phy;
6991 				}
6992 				scan_data_len = data_len_curr;
6993 				scan_data = data_curr;
6994 				scan_rsp_tx_pwr = tx_pwr_curr;
6995 			}
6996 
6997 			if (!adv_addr) {
6998 				adv_addr_type = adv_addr_type_curr;
6999 				adv_addr = adv_addr_curr;
7000 			}
7001 
7002 			if (!direct_addr) {
7003 				direct_addr_type = direct_addr_type_curr;
7004 				direct_addr = direct_addr_curr;
7005 			}
7006 
7007 			if (scan_data) {
7008 				scan_data_len_total += data_len_curr;
7009 			} else if (!data) {
7010 				node_rx_data = node_rx_curr;
7011 				data_len = data_len_curr;
7012 				data_len_total = data_len;
7013 				data = data_curr;
7014 				tx_pwr = tx_pwr_curr;
7015 			} else {
7016 				data_len_total += data_len_curr;
7017 			}
7018 
7019 #if defined(CONFIG_BT_CTLR_PRIVACY)
7020 			if (rl_idx >= ll_rl_size_get()) {
7021 				rl_idx = rl_idx_curr;
7022 			}
7023 #endif /* CONFIG_BT_CTLR_PRIVACY */
7024 
7025 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7026 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7027 			if (!devmatch) {
7028 				devmatch = devmatch_curr;
7029 			}
7030 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7031 		}
7032 
7033 		if (!node_rx_next) {
7034 			bool has_aux_ptr = !!sec_phy_curr;
7035 
7036 			if (scan_data) {
7037 				if (has_aux_ptr) {
7038 					scan_data_status =
7039 				  BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7040 				}
7041 			} else if (has_aux_ptr) {
7042 				data_status =
7043 				  BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7044 			}
7045 
7046 			break;
7047 		}
7048 
7049 		node_rx_curr = node_rx_next;
7050 		node_rx_next = node_rx_curr->hdr.rx_ftr.extra;
7051 		adv = (void *)node_rx_curr->pdu;
7052 	} while (1);
7053 
7054 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
7055 	    IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST) &&
7056 	    !devmatch) {
7057 		node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7058 		return;
7059 	}
7060 
7061 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
7062 	if (adv_addr) {
7063 		if (dup_scan &&
7064 		    dup_found(PDU_ADV_TYPE_EXT_IND, adv_addr_type, adv_addr,
7065 			      (evt_type & BIT_MASK(2)), adi, data_status)) {
7066 			node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7067 			return;
7068 		}
7069 	}
7070 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
7071 
7072 	/* If data incomplete */
7073 	if (data_status) {
7074 		/* Data incomplete and no more to come */
7075 		if (!(adv_addr ||
7076 		      (adi && ((tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) ||
7077 			       data)))) {
7078 			/* No device address and no valid AD data parsed or
7079 			 * Tx Power present for this PDU chain that has ADI,
7080 			 * skip HCI event generation.
7081 			 * In other terms, generate HCI event if device address
7082 			 * is present or if Tx pwr and/or data is present from
7083 			 * anonymous device.
7084 			 */
7085 			node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7086 			return;
7087 		}
7088 	}
7089 
7090 	/* Restrict data length to maximum scan data length */
7091 	if (data_len_total > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
7092 		data_len_total = CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX;
7093 		if (data_len > data_len_total) {
7094 			data_len = data_len_total;
7095 		}
7096 
7097 		data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7098 	}
7099 
7100 	/* Set directed advertising bit */
7101 	if (direct_addr) {
7102 		evt_type |= BT_HCI_LE_ADV_EVT_TYPE_DIRECT;
7103 	}
7104 
7105 	/* HCI fragment */
7106 	evt_buf = buf;
7107 	data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7108 		       sizeof(struct bt_hci_evt_le_meta_event) -
7109 		       sizeof(struct bt_hci_evt_le_ext_advertising_report) -
7110 		       sizeof(struct bt_hci_evt_le_ext_advertising_info);
7111 
7112 	/* If PDU data length less than total data length or PDU data length
7113 	 * greater than maximum HCI event data length, then fragment.
7114 	 */
7115 	if ((data_len < data_len_total) || (data_len > data_len_max)) {
7116 		ext_adv_data_frag(node_rx_data, evt_type, phy, &sec_phy,
7117 				  adv_addr_type, adv_addr, direct_addr_type,
7118 				  direct_addr, rl_idx, &tx_pwr, rssi,
7119 				  interval_le16, adi, data_len_max,
7120 				  data_len_total, &data_len, &data, buf,
7121 				  &evt_buf);
7122 	}
7123 
7124 	/* Set data status bits */
7125 	evt_type |= (data_status << 5);
7126 
7127 	/* Start constructing the adv event for remainder of the PDU data */
7128 	ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type, adv_addr,
7129 			  direct_addr_type, direct_addr, rl_idx, tx_pwr, rssi,
7130 			  interval_le16, adi, data_len, data, evt_buf);
7131 
7132 	/* If scan response event to be constructed */
7133 	if (!scan_data) {
7134 		node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7135 
7136 		return;
7137 	}
7138 
7139 	/* Restrict scan response data length to maximum scan data length */
7140 	if (scan_data_len_total > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
7141 		scan_data_len_total = CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX;
7142 		if (scan_data_len > scan_data_len_total) {
7143 			scan_data_len = scan_data_len_total;
7144 		}
7145 
7146 		scan_data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7147 	}
7148 
7149 	/* Set scan response bit */
7150 	evt_type |= BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP;
7151 
7152 	/* Clear the data status bits */
7153 	evt_type &= ~(BIT_MASK(2) << 5);
7154 
7155 	/* Allocate, append as buf fragement and construct the scan response
7156 	 * event.
7157 	 */
7158 	evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7159 	LL_ASSERT(evt_buf);
7160 
7161 	net_buf_frag_add(buf, evt_buf);
7162 
7163 	/* If PDU data length less than total data length or PDU data length
7164 	 * greater than maximum HCI event data length, then fragment.
7165 	 */
7166 	if ((scan_data_len < scan_data_len_total) ||
7167 	    (scan_data_len > data_len_max)) {
7168 		ext_adv_data_frag(node_rx_scan_data, evt_type, phy,
7169 				  &sec_phy_scan, adv_addr_type, adv_addr,
7170 				  direct_addr_type, direct_addr, rl_idx,
7171 				  &scan_rsp_tx_pwr, rssi, interval_le16, adi,
7172 				  data_len_max, scan_data_len_total,
7173 				  &scan_data_len, &scan_data, buf, &evt_buf);
7174 	}
7175 
7176 	/* set scan data status bits */
7177 	evt_type |= (scan_data_status << 5);
7178 
7179 	/* Start constructing the event for remainder of the PDU data */
7180 	ext_adv_info_fill(evt_type, phy, sec_phy_scan, adv_addr_type, adv_addr,
7181 			  direct_addr_type, direct_addr, rl_idx,
7182 			  scan_rsp_tx_pwr, rssi, interval_le16, adi,
7183 			  scan_data_len, scan_data, evt_buf);
7184 
7185 	node_rx_extra_list_release(node_rx->hdr.rx_ftr.extra);
7186 }
7187 
7188 static void le_adv_ext_report(struct pdu_data *pdu_data,
7189 			      struct node_rx_pdu *node_rx,
7190 			      struct net_buf *buf, uint8_t phy)
7191 {
7192 	struct pdu_adv *adv = (void *)pdu_data;
7193 
7194 	if ((adv->type == PDU_ADV_TYPE_EXT_IND) && adv->len) {
7195 		le_ext_adv_report(pdu_data, node_rx, buf, phy);
7196 	} else {
7197 		le_ext_adv_legacy_report(pdu_data, node_rx, buf);
7198 	}
7199 }
7200 
7201 static void le_adv_ext_1M_report(struct pdu_data *pdu_data,
7202 				 struct node_rx_pdu *node_rx,
7203 				 struct net_buf *buf)
7204 {
7205 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_1M);
7206 }
7207 
7208 static void le_adv_ext_2M_report(struct pdu_data *pdu_data,
7209 				 struct node_rx_pdu *node_rx,
7210 				 struct net_buf *buf)
7211 {
7212 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_2M);
7213 }
7214 
7215 static void le_adv_ext_coded_report(struct pdu_data *pdu_data,
7216 				    struct node_rx_pdu *node_rx,
7217 				    struct net_buf *buf)
7218 {
7219 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_CODED);
7220 }
7221 
7222 static void le_scan_timeout(struct pdu_data *pdu_data,
7223 			    struct node_rx_pdu *node_rx, struct net_buf *buf)
7224 {
7225 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7226 	    !(le_event_mask & BT_EVT_MASK_LE_SCAN_TIMEOUT)) {
7227 		return;
7228 	}
7229 
7230 	meta_evt(buf, BT_HCI_EVT_LE_SCAN_TIMEOUT, 0U);
7231 }
7232 
7233 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
7234 static void le_per_adv_sync_established(struct pdu_data *pdu_data,
7235 					struct node_rx_pdu *node_rx,
7236 					struct net_buf *buf)
7237 {
7238 	struct bt_hci_evt_le_per_adv_sync_established *sep;
7239 	struct ll_scan_set *scan;
7240 	struct node_rx_sync *se;
7241 	void *node;
7242 
7243 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7244 	    !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED)) {
7245 		return;
7246 	}
7247 
7248 	sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
7249 		       sizeof(*sep));
7250 
7251 	/* Check for pdu field being aligned before accessing sync established
7252 	 * event.
7253 	 */
7254 	node = pdu_data;
7255 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync));
7256 
7257 	se = node;
7258 	sep->status = se->status;
7259 
7260 	if (se->status == BT_HCI_ERR_OP_CANCELLED_BY_HOST) {
7261 		return;
7262 	}
7263 
7264 	scan = node_rx->hdr.rx_ftr.param;
7265 
7266 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7267 	defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7268 	dup_periodic_adv_reset(scan->periodic.adv_addr_type,
7269 			       scan->periodic.adv_addr,
7270 			       scan->periodic.sid);
7271 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7272 	* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7273 	*/
7274 
7275 	sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7276 
7277 	/* Resolved address, if private, has been populated in ULL */
7278 	sep->adv_addr.type = scan->periodic.adv_addr_type;
7279 	(void)memcpy(sep->adv_addr.a.val, scan->periodic.adv_addr, BDADDR_SIZE);
7280 
7281 	sep->sid = scan->periodic.sid;
7282 	sep->phy = find_lsb_set(se->phy);
7283 	sep->interval = sys_cpu_to_le16(se->interval);
7284 	sep->clock_accuracy = se->sca;
7285 }
7286 
7287 static void le_per_adv_sync_report(struct pdu_data *pdu_data,
7288 				   struct node_rx_pdu *node_rx,
7289 				   struct net_buf *buf)
7290 {
7291 	struct node_rx_ftr *ftr = &node_rx->hdr.rx_ftr;
7292 	int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7293 	struct pdu_adv *adv = (void *)pdu_data;
7294 	struct pdu_adv_aux_ptr *aux_ptr = NULL;
7295 	const struct pdu_adv_adi *adi = NULL;
7296 	uint8_t cte_type = BT_HCI_LE_NO_CTE;
7297 	const struct ll_sync_set *sync;
7298 	struct pdu_adv_com_ext_adv *p;
7299 	struct pdu_adv_ext_hdr *h;
7300 	uint16_t data_len_total;
7301 	struct net_buf *evt_buf;
7302 	uint8_t data_len = 0U;
7303 	uint8_t acad_len = 0U;
7304 	uint8_t *data = NULL;
7305 	uint8_t data_len_max;
7306 	uint8_t *acad = NULL;
7307 	uint8_t hdr_buf_len;
7308 	uint8_t hdr_len;
7309 	uint8_t *ptr;
7310 	int8_t rssi;
7311 	bool accept;
7312 
7313 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7314 	    (!(le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7315 	     !(le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT))) {
7316 		return;
7317 	}
7318 
7319 	/* NOTE: The timeout_reload field in the sync context is checked under
7320 	 *       race condition between HCI Tx and Rx thread wherein a sync
7321 	 *       terminate was performed which resets the timeout_reload field
7322 	 *       before releasing the sync context back into its memory pool.
7323 	 *       It is important that timeout_reload field is at safe offset
7324 	 *       inside the sync context such that it is not corrupt while being
7325 	 *       in the memory pool.
7326 	 *
7327 	 *       This check ensures reports are not sent out after sync
7328 	 *       terminate.
7329 	 */
7330 	sync = HDR_LLL2ULL(ftr->param);
7331 	if (unlikely(!sync->timeout_reload)) {
7332 		return;
7333 	}
7334 
7335 	if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7336 	    node_rx->hdr.rx_ftr.aux_failed) {
7337 		struct bt_hci_evt_le_per_advertising_report *sep;
7338 
7339 		sep = meta_evt(buf,
7340 			       BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7341 			       sizeof(*sep));
7342 
7343 		sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7344 		sep->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7345 		sep->rssi = BT_HCI_LE_RSSI_NOT_AVAILABLE;
7346 		sep->cte_type = BT_HCI_LE_NO_CTE;
7347 		sep->data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7348 		sep->length = 0;
7349 
7350 		return;
7351 	}
7352 
7353 	/* The Link Layer currently returns RSSI as an absolute value */
7354 	rssi = -(node_rx->hdr.rx_ftr.rssi);
7355 
7356 	LOG_DBG("len = %u, rssi = %d", adv->len, rssi);
7357 
7358 	p = (void *)&adv->adv_ext_ind;
7359 	h = (void *)p->ext_hdr_adv_data;
7360 	ptr = (void *)h;
7361 
7362 	LOG_DBG("    Per. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
7363 
7364 	if (!p->ext_hdr_len) {
7365 		hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
7366 
7367 		goto no_ext_hdr;
7368 	}
7369 
7370 	ptr = h->data;
7371 
7372 	if (h->adv_addr) {
7373 		ptr += BDADDR_SIZE;
7374 	}
7375 
7376 	if (h->tgt_addr) {
7377 		ptr += BDADDR_SIZE;
7378 	}
7379 
7380 	if (h->cte_info) {
7381 		struct pdu_cte_info *cte_info;
7382 
7383 		cte_info = (void *)ptr;
7384 		cte_type = cte_info->type;
7385 		ptr++;
7386 
7387 		LOG_DBG("    CTE type= %d", cte_type);
7388 	}
7389 
7390 	if (h->adi) {
7391 		adi = (void *)ptr;
7392 
7393 		ptr += sizeof(struct pdu_adv_adi);
7394 	}
7395 
7396 	/* AuxPtr */
7397 	if (h->aux_ptr) {
7398 		uint8_t aux_phy;
7399 
7400 		aux_ptr = (void *)ptr;
7401 		if (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7402 			return;
7403 		}
7404 
7405 		ptr += sizeof(*aux_ptr);
7406 
7407 		aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7408 
7409 		LOG_DBG("    AuxPtr chan_idx = %u, ca = %u, offs_units "
7410 		       "= %u offs = 0x%x, phy = 0x%x",
7411 		       aux_ptr->chan_idx, aux_ptr->ca,
7412 		       aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr), aux_phy);
7413 	}
7414 
7415 	/* No SyncInfo */
7416 	if (h->sync_info) {
7417 		ptr += sizeof(struct pdu_adv_sync_info);
7418 	}
7419 
7420 	/* Tx Power */
7421 	if (h->tx_pwr) {
7422 		tx_pwr = *(int8_t *)ptr;
7423 		ptr++;
7424 
7425 		LOG_DBG("    Tx pwr= %d dB", tx_pwr);
7426 	}
7427 
7428 	hdr_len = ptr - (uint8_t *)p;
7429 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7430 	if (hdr_len > hdr_buf_len) {
7431 		LOG_WRN("    Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7432 	} else {
7433 		acad_len = hdr_buf_len - hdr_len;
7434 		if (acad_len) {
7435 			acad = ptr;
7436 
7437 			ptr += acad_len;
7438 			hdr_len += acad_len;
7439 		}
7440 	}
7441 
7442 no_ext_hdr:
7443 	if (hdr_len < adv->len) {
7444 		data_len = adv->len - hdr_len;
7445 		data = ptr;
7446 
7447 		LOG_DBG("    AD Data (%u): <todo>", data_len);
7448 	}
7449 
7450 	if (0) {
7451 
7452 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7453 	defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7454 	} else if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
7455 		   adi) {
7456 		uint8_t data_status;
7457 
7458 		data_status = (aux_ptr) ?
7459 			      BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL :
7460 			      BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7461 
7462 		accept = sync->rx_enable && ftr->sync_rx_enabled &&
7463 			 (!sync->nodups ||
7464 			  !dup_found(PDU_ADV_TYPE_EXT_IND,
7465 				     sync->peer_id_addr_type,
7466 				     sync->peer_id_addr,
7467 				     DUP_EXT_ADV_MODE_PERIODIC,
7468 				     adi, data_status));
7469 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7470 	* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7471 	*/
7472 
7473 	} else {
7474 		accept = sync->rx_enable && ftr->sync_rx_enabled;
7475 	}
7476 
7477 	data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7478 		       sizeof(struct bt_hci_evt_le_meta_event) -
7479 		       sizeof(struct bt_hci_evt_le_per_advertising_report);
7480 	data_len_total = node_rx->hdr.rx_ftr.aux_data_len;
7481 
7482 	evt_buf = buf;
7483 
7484 	if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) && accept &&
7485 	    ((data_len_total - data_len) < CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7486 
7487 		/* Pass verdict in LL.TS.p19 section 4.2.3.6 Extended Scanning,
7488 		 * Passive, Periodic Advertising Report, RSSI and TX_Power
7489 		 * states:
7490 		 * TX_Power is set to value of the TxPower field for the
7491 		 * AUX_SYNC_IND received, and RSSI set to a valid value.
7492 		 * Subsequent reports with data and the status set to
7493 		 * "Incomplete, more data to come" or "complete" can have the
7494 		 * TX_Power field set to 0x7F.
7495 		 *
7496 		 * In the implementation data_len_total is the running total
7497 		 * AD data length so far, data_len is the current PDU's AD data
7498 		 * length. For AUX_SYNC_IND received, data_len_total ==
7499 		 * data_len.
7500 		 */
7501 		if (data_len_total > data_len) {
7502 			/* Subsequent reports */
7503 			tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7504 		}
7505 
7506 		data_len = MIN(data_len, (CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX +
7507 					  data_len - data_len_total));
7508 
7509 		do {
7510 			struct bt_hci_evt_le_per_advertising_report *sep;
7511 			uint8_t data_len_frag;
7512 			uint8_t data_status;
7513 
7514 			data_len_frag = MIN(data_len, data_len_max);
7515 
7516 			/* Start constructing periodic advertising report */
7517 			sep = meta_evt(evt_buf,
7518 				       BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7519 				       sizeof(*sep) + data_len_frag);
7520 
7521 			sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7522 			sep->tx_power = tx_pwr;
7523 			sep->rssi = rssi;
7524 			sep->cte_type = cte_type;
7525 			sep->length = data_len_frag;
7526 			memcpy(&sep->data[0], data, data_len_frag);
7527 
7528 			data += data_len_frag;
7529 			data_len -= data_len_frag;
7530 
7531 			if (data_len > 0) {
7532 				/* Some data left in PDU, mark as partial data. */
7533 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7534 
7535 				evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7536 				LL_ASSERT(evt_buf);
7537 
7538 				net_buf_frag_add(buf, evt_buf);
7539 
7540 				tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7541 			} else if (!aux_ptr &&
7542 				   (data_len_total <= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7543 				/* No data left, no AuxPtr, mark as complete data. */
7544 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7545 			} else if (ftr->aux_sched &&
7546 				   (data_len_total < CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7547 				/* No data left, but have AuxPtr and scheduled aux scan,
7548 				 * mark as partial data.
7549 				 */
7550 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7551 			} else {
7552 				/* No data left, have AuxPtr but not aux scan scheduled,
7553 				 * mark as incomplete data.
7554 				 */
7555 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7556 			}
7557 
7558 			sep->data_status = data_status;
7559 		} while (data_len > 0);
7560 
7561 		evt_buf = NULL;
7562 	}
7563 
7564 	if ((le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT) && acad &&
7565 	    (acad_len >= (PDU_BIG_INFO_CLEARTEXT_SIZE +
7566 			  PDU_ADV_DATA_HEADER_SIZE))) {
7567 		struct bt_hci_evt_le_biginfo_adv_report *sep;
7568 		struct pdu_big_info *bi;
7569 		uint8_t bi_size;
7570 
7571 		/* FIXME: Parse and find the BIGInfo */
7572 		if (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] != BT_DATA_BIG_INFO) {
7573 			return;
7574 		}
7575 
7576 		bi_size = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
7577 		bi = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
7578 
7579 		/* Allocate new event buffer if periodic advertising report was
7580 		 * constructed with the caller supplied buffer.
7581 		 */
7582 		if (!evt_buf) {
7583 			evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7584 			LL_ASSERT(evt_buf);
7585 
7586 			net_buf_frag_add(buf, evt_buf);
7587 		}
7588 
7589 		/* Start constructing BIGInfo  advertising report */
7590 		sep = meta_evt(evt_buf, BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
7591 			       sizeof(*sep));
7592 
7593 		sep->sync_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7594 
7595 		/* NOTE: both sep and bi struct store little-endian values,
7596 		 *       explicit endian-ness conversion not required.
7597 		 */
7598 		sep->num_bis = bi->num_bis;
7599 		sep->nse = bi->nse;
7600 		sep->iso_interval = bi->iso_interval;
7601 		sep->bn = bi->bn;
7602 		sep->pto = bi->pto;
7603 		sep->irc = bi->irc;
7604 		sep->max_pdu = bi->max_pdu;
7605 		sys_put_le24(sys_le24_to_cpu(bi->sdu_interval),
7606 			     sep->sdu_interval);
7607 		sep->max_sdu = bi->max_sdu;
7608 		sep->phy = HCI_AUX_PHY_TO_HCI_PHY(bi->chm_phy[4] >> 5);
7609 		sep->framing = (bi->payload_count_framing[4] >> 7) & 0x01;
7610 		if (bi_size == (PDU_BIG_INFO_ENCRYPTED_SIZE + 1)) {
7611 			sep->encryption = 1U;
7612 		} else {
7613 			sep->encryption = 0U;
7614 		}
7615 	}
7616 }
7617 
7618 static void le_per_adv_sync_lost(struct pdu_data *pdu_data,
7619 				 struct node_rx_pdu *node_rx,
7620 				 struct net_buf *buf)
7621 {
7622 	struct bt_hci_evt_le_per_adv_sync_lost *sep;
7623 
7624 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7625 	    !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_LOST)) {
7626 		return;
7627 	}
7628 
7629 	sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, sizeof(*sep));
7630 	sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7631 }
7632 
7633 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
7634 static void le_big_sync_established(struct pdu_data *pdu,
7635 				    struct node_rx_pdu *node_rx,
7636 				    struct net_buf *buf)
7637 {
7638 	struct bt_hci_evt_le_big_sync_established *sep;
7639 	struct ll_sync_iso_set *sync_iso;
7640 	struct node_rx_sync_iso *se;
7641 	struct lll_sync_iso *lll;
7642 	size_t evt_size;
7643 	void *node;
7644 
7645 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7646 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED)) {
7647 		return;
7648 	}
7649 
7650 	sync_iso = node_rx->hdr.rx_ftr.param;
7651 	lll = &sync_iso->lll;
7652 
7653 	evt_size = sizeof(*sep) + (lll->num_bis * sizeof(uint16_t));
7654 
7655 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED, evt_size);
7656 	sep->big_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7657 
7658 	/* Check for pdu field being aligned before accessing ISO sync
7659 	 * established event.
7660 	 */
7661 	node = pdu;
7662 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync_iso));
7663 
7664 	se = node;
7665 	sep->status = se->status;
7666 	if (sep->status) {
7667 		return;
7668 	}
7669 
7670 	/* FIXME: Fill latency */
7671 	sys_put_le24(0, sep->latency);
7672 
7673 	sep->nse = lll->nse;
7674 	sep->bn = lll->bn;
7675 	sep->pto = lll->pto;
7676 	sep->irc = lll->irc;
7677 	sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
7678 	sep->iso_interval = sys_cpu_to_le16(lll->iso_interval);
7679 	sep->num_bis = lll->stream_count;
7680 
7681 	/* Connection handle list of all BISes synchronized in the BIG */
7682 	for (uint8_t i = 0U; i < lll->stream_count; i++) {
7683 		uint16_t handle;
7684 
7685 		handle = LL_BIS_SYNC_HANDLE_FROM_IDX(lll->stream_handle[i]);
7686 		sep->handle[i] = sys_cpu_to_le16(handle);
7687 	}
7688 }
7689 
7690 static void le_big_sync_lost(struct pdu_data *pdu,
7691 			     struct node_rx_pdu *node_rx,
7692 			     struct net_buf *buf)
7693 {
7694 	struct bt_hci_evt_le_big_sync_lost *sep;
7695 
7696 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7697 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_LOST)) {
7698 		return;
7699 	}
7700 
7701 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_LOST, sizeof(*sep));
7702 	sep->big_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7703 	sep->reason = *((uint8_t *)pdu);
7704 }
7705 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
7706 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
7707 #endif /* CONFIG_BT_CTLR_ADV_EXT */
7708 #endif /* CONFIG_BT_OBSERVER */
7709 
7710 #if defined(CONFIG_BT_BROADCASTER)
7711 #if defined(CONFIG_BT_CTLR_ADV_EXT)
7712 static void le_adv_ext_terminate(struct pdu_data *pdu_data,
7713 				    struct node_rx_pdu *node_rx,
7714 				    struct net_buf *buf)
7715 {
7716 	struct bt_hci_evt_le_adv_set_terminated *sep;
7717 
7718 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7719 	    !(le_event_mask & BT_EVT_MASK_LE_ADV_SET_TERMINATED)) {
7720 		return;
7721 	}
7722 
7723 	sep = meta_evt(buf, BT_HCI_EVT_LE_ADV_SET_TERMINATED, sizeof(*sep));
7724 	sep->status = node_rx->hdr.rx_ftr.param_adv_term.status;
7725 	sep->adv_handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
7726 	sep->conn_handle =
7727 		sys_cpu_to_le16(node_rx->hdr.rx_ftr.param_adv_term.conn_handle);
7728 	sep->num_completed_ext_adv_evts =
7729 		node_rx->hdr.rx_ftr.param_adv_term.num_events;
7730 }
7731 
7732 #if defined(CONFIG_BT_CTLR_ADV_ISO)
7733 static void le_big_complete(struct pdu_data *pdu_data,
7734 			    struct node_rx_pdu *node_rx,
7735 			    struct net_buf *buf)
7736 {
7737 	struct bt_hci_evt_le_big_complete *sep;
7738 	struct ll_adv_iso_set *adv_iso;
7739 	struct lll_adv_iso *lll;
7740 	size_t evt_size;
7741 
7742 	adv_iso = node_rx->hdr.rx_ftr.param;
7743 	lll = &adv_iso->lll;
7744 
7745 	evt_size = sizeof(*sep) + (lll->num_bis * sizeof(uint16_t));
7746 
7747 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_COMPLETE, evt_size);
7748 
7749 	sep->status = BT_HCI_ERR_SUCCESS;
7750 	sep->big_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7751 
7752 	if (sep->status) {
7753 		return;
7754 	}
7755 
7756 	/* FIXME: Fill sync delay and latency */
7757 	sys_put_le24(0, sep->sync_delay);
7758 	sys_put_le24(0, sep->latency);
7759 
7760 	sep->phy = find_lsb_set(lll->phy);
7761 	sep->nse = lll->nse;
7762 	sep->bn = lll->bn;
7763 	sep->pto = lll->pto;
7764 	sep->irc = lll->irc;
7765 	sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
7766 	sep->num_bis = lll->num_bis;
7767 
7768 	/* Connection handle list of all BISes in the BIG */
7769 	for (uint8_t i = 0U; i < lll->num_bis; i++) {
7770 		uint16_t handle;
7771 
7772 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(lll->stream_handle[i]);
7773 		sep->handle[i] = sys_cpu_to_le16(handle);
7774 	}
7775 }
7776 
7777 static void le_big_terminate(struct pdu_data *pdu,
7778 			     struct node_rx_pdu *node_rx,
7779 			     struct net_buf *buf)
7780 {
7781 	struct bt_hci_evt_le_big_terminate *sep;
7782 
7783 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7784 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_TERMINATED)) {
7785 		return;
7786 	}
7787 
7788 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_TERMINATE, sizeof(*sep));
7789 	sep->big_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7790 	sep->reason = *((uint8_t *)pdu);
7791 }
7792 #endif /* CONFIG_BT_CTLR_ADV_ISO */
7793 #endif /* CONFIG_BT_CTLR_ADV_EXT */
7794 #endif /* CONFIG_BT_BROADCASTER */
7795 
7796 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
7797 static void le_scan_req_received(struct pdu_data *pdu_data,
7798 				 struct node_rx_pdu *node_rx,
7799 				 struct net_buf *buf)
7800 {
7801 	struct pdu_adv *adv = (void *)pdu_data;
7802 	struct bt_hci_evt_le_scan_req_received *sep;
7803 
7804 #if defined(CONFIG_BT_CTLR_PRIVACY)
7805 	uint8_t rl_idx;
7806 #endif
7807 
7808 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7809 	    !(le_event_mask & BT_EVT_MASK_LE_SCAN_REQ_RECEIVED)) {
7810 		bt_addr_le_t addr;
7811 		uint8_t handle;
7812 		int8_t rssi;
7813 
7814 		handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
7815 		addr.type = adv->tx_addr;
7816 		memcpy(&addr.a.val[0], &adv->scan_req.scan_addr[0],
7817 		       sizeof(bt_addr_t));
7818 
7819 		/* The Link Layer currently returns RSSI as an absolute value */
7820 		rssi = -(node_rx->hdr.rx_ftr.rssi);
7821 
7822 		LOG_DBG("handle: %d, addr: %s, rssi: %d dB.", handle, bt_addr_le_str(&addr), rssi);
7823 
7824 		return;
7825 	}
7826 
7827 	sep = meta_evt(buf, BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, sizeof(*sep));
7828 	sep->handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
7829 	sep->addr.type = adv->tx_addr;
7830 	memcpy(&sep->addr.a.val[0], &adv->scan_req.scan_addr[0],
7831 	       sizeof(bt_addr_t));
7832 
7833 #if defined(CONFIG_BT_CTLR_PRIVACY)
7834 	rl_idx = node_rx->hdr.rx_ftr.rl_idx;
7835 	if (rl_idx < ll_rl_size_get()) {
7836 		/* Store identity address */
7837 		ll_rl_id_addr_get(rl_idx, &sep->addr.type,
7838 				  &sep->addr.a.val[0]);
7839 		/* Mark it as identity address from RPA (0x02, 0x03) */
7840 		sep->addr.type += 2U;
7841 	} else {
7842 #else
7843 	if (1) {
7844 #endif
7845 		sep->addr.type = adv->tx_addr;
7846 		memcpy(&sep->addr.a.val[0], &adv->adv_ind.addr[0],
7847 		       sizeof(bt_addr_t));
7848 	}
7849 }
7850 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
7851 
7852 #if defined(CONFIG_BT_CONN)
7853 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
7854 			     struct net_buf *buf)
7855 {
7856 	struct bt_hci_evt_le_conn_complete *lecc;
7857 	struct node_rx_cc *cc;
7858 	uint8_t status;
7859 	void *node;
7860 
7861 	/* Check for pdu field being aligned before accessing connection
7862 	 * complete event.
7863 	 */
7864 	node = pdu_data;
7865 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cc));
7866 
7867 	cc = node;
7868 	status = cc->status;
7869 
7870 #if defined(CONFIG_BT_CTLR_PRIVACY)
7871 	if (!status) {
7872 		/* Update current RPA */
7873 		ll_rl_crpa_set(cc->peer_addr_type,
7874 			       &cc->peer_addr[0], 0xff,
7875 			       &cc->peer_rpa[0]);
7876 	}
7877 #endif
7878 
7879 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7880 	    (!(le_event_mask & BT_EVT_MASK_LE_CONN_COMPLETE) &&
7881 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
7882 	     !(le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE))) {
7883 #else
7884 	     1)) {
7885 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
7886 		return;
7887 	}
7888 
7889 	if (!status) {
7890 		conn_count++;
7891 	}
7892 
7893 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
7894 	if (le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE) {
7895 		struct bt_hci_evt_le_enh_conn_complete *leecc;
7896 
7897 		leecc = meta_evt(buf, BT_HCI_EVT_LE_ENH_CONN_COMPLETE,
7898 				 sizeof(*leecc));
7899 
7900 		if (status) {
7901 			(void)memset(leecc, 0x00, sizeof(*leecc));
7902 			leecc->status = status;
7903 			return;
7904 		}
7905 
7906 		leecc->status = 0x00;
7907 		leecc->handle = sys_cpu_to_le16(handle);
7908 		leecc->role = cc->role;
7909 
7910 		leecc->peer_addr.type = cc->peer_addr_type;
7911 		memcpy(&leecc->peer_addr.a.val[0], &cc->peer_addr[0],
7912 		       BDADDR_SIZE);
7913 
7914 #if defined(CONFIG_BT_CTLR_PRIVACY)
7915 		memcpy(&leecc->local_rpa.val[0], &cc->local_rpa[0],
7916 		       BDADDR_SIZE);
7917 		memcpy(&leecc->peer_rpa.val[0], &cc->peer_rpa[0],
7918 		       BDADDR_SIZE);
7919 #else /* !CONFIG_BT_CTLR_PRIVACY */
7920 		memset(&leecc->local_rpa.val[0], 0, BDADDR_SIZE);
7921 		memset(&leecc->peer_rpa.val[0], 0, BDADDR_SIZE);
7922 #endif /* !CONFIG_BT_CTLR_PRIVACY */
7923 
7924 		leecc->interval = sys_cpu_to_le16(cc->interval);
7925 		leecc->latency = sys_cpu_to_le16(cc->latency);
7926 		leecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
7927 		leecc->clock_accuracy = cc->sca;
7928 		return;
7929 	}
7930 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
7931 
7932 	lecc = meta_evt(buf, BT_HCI_EVT_LE_CONN_COMPLETE, sizeof(*lecc));
7933 
7934 	if (status) {
7935 		(void)memset(lecc, 0x00, sizeof(*lecc));
7936 		lecc->status = status;
7937 		return;
7938 	}
7939 
7940 	lecc->status = 0x00;
7941 	lecc->handle = sys_cpu_to_le16(handle);
7942 	lecc->role = cc->role;
7943 	lecc->peer_addr.type = cc->peer_addr_type & 0x1;
7944 	memcpy(&lecc->peer_addr.a.val[0], &cc->peer_addr[0], BDADDR_SIZE);
7945 	lecc->interval = sys_cpu_to_le16(cc->interval);
7946 	lecc->latency = sys_cpu_to_le16(cc->latency);
7947 	lecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
7948 	lecc->clock_accuracy = cc->sca;
7949 }
7950 
7951 void hci_disconn_complete_encode(struct pdu_data *pdu_data, uint16_t handle,
7952 				 struct net_buf *buf)
7953 {
7954 	struct bt_hci_evt_disconn_complete *ep;
7955 
7956 	if (!(event_mask & BT_EVT_MASK_DISCONN_COMPLETE)) {
7957 		return;
7958 	}
7959 
7960 	hci_evt_create(buf, BT_HCI_EVT_DISCONN_COMPLETE, sizeof(*ep));
7961 	ep = net_buf_add(buf, sizeof(*ep));
7962 
7963 	ep->status = 0x00;
7964 	ep->handle = sys_cpu_to_le16(handle);
7965 	ep->reason = *((uint8_t *)pdu_data);
7966 }
7967 
7968 void hci_disconn_complete_process(uint16_t handle)
7969 {
7970 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
7971 	/* Clear any pending packets upon disconnection */
7972 	/* Note: This requires linear handle values starting from 0 */
7973 	if (handle >= ARRAY_SIZE(hci_hbuf_pend)) {
7974 		return;
7975 	}
7976 
7977 	hci_hbuf_acked += hci_hbuf_pend[handle];
7978 	hci_hbuf_pend[handle] = 0U;
7979 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
7980 
7981 	conn_count--;
7982 }
7983 
7984 static void le_conn_update_complete(struct pdu_data *pdu_data, uint16_t handle,
7985 				    struct net_buf *buf)
7986 {
7987 	struct bt_hci_evt_le_conn_update_complete *sep;
7988 	struct node_rx_cu *cu;
7989 	void *node;
7990 
7991 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7992 	    !(le_event_mask & BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE)) {
7993 		return;
7994 	}
7995 
7996 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE, sizeof(*sep));
7997 
7998 	/* Check for pdu field being aligned before accessing connection
7999 	 * update complete event.
8000 	 */
8001 	node = pdu_data;
8002 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cu));
8003 
8004 	cu = node;
8005 	sep->status = cu->status;
8006 	sep->handle = sys_cpu_to_le16(handle);
8007 	sep->interval = sys_cpu_to_le16(cu->interval);
8008 	sep->latency = sys_cpu_to_le16(cu->latency);
8009 	sep->supv_timeout = sys_cpu_to_le16(cu->timeout);
8010 }
8011 
8012 #if defined(CONFIG_BT_CTLR_LE_ENC)
8013 static void enc_refresh_complete(struct pdu_data *pdu_data, uint16_t handle,
8014 				 struct net_buf *buf)
8015 {
8016 	struct bt_hci_evt_encrypt_key_refresh_complete *ep;
8017 
8018 	if (!(event_mask & BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE)) {
8019 		return;
8020 	}
8021 
8022 	hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
8023 		       sizeof(*ep));
8024 	ep = net_buf_add(buf, sizeof(*ep));
8025 
8026 	ep->status = 0x00;
8027 	ep->handle = sys_cpu_to_le16(handle);
8028 }
8029 #endif /* CONFIG_BT_CTLR_LE_ENC */
8030 
8031 #if defined(CONFIG_BT_CTLR_LE_PING)
8032 static void auth_payload_timeout_exp(struct pdu_data *pdu_data, uint16_t handle,
8033 				     struct net_buf *buf)
8034 {
8035 	struct bt_hci_evt_auth_payload_timeout_exp *ep;
8036 
8037 	if (!(event_mask_page_2 & BT_EVT_MASK_AUTH_PAYLOAD_TIMEOUT_EXP)) {
8038 		return;
8039 	}
8040 
8041 	hci_evt_create(buf, BT_HCI_EVT_AUTH_PAYLOAD_TIMEOUT_EXP, sizeof(*ep));
8042 	ep = net_buf_add(buf, sizeof(*ep));
8043 
8044 	ep->handle = sys_cpu_to_le16(handle);
8045 }
8046 #endif /* CONFIG_BT_CTLR_LE_PING */
8047 
8048 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8049 static void le_chan_sel_algo(struct pdu_data *pdu_data, uint16_t handle,
8050 			     struct net_buf *buf)
8051 {
8052 	struct bt_hci_evt_le_chan_sel_algo *sep;
8053 	struct node_rx_cs *cs;
8054 
8055 	cs = (void *)pdu_data;
8056 
8057 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8058 	    !(le_event_mask & BT_EVT_MASK_LE_CHAN_SEL_ALGO)) {
8059 		LOG_DBG("handle: 0x%04x, CSA: %x.", handle, cs->csa);
8060 		return;
8061 	}
8062 
8063 	sep = meta_evt(buf, BT_HCI_EVT_LE_CHAN_SEL_ALGO, sizeof(*sep));
8064 
8065 	sep->handle = sys_cpu_to_le16(handle);
8066 	sep->chan_sel_algo = cs->csa;
8067 }
8068 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8069 
8070 #if defined(CONFIG_BT_CTLR_PHY)
8071 static void le_phy_upd_complete(struct pdu_data *pdu_data, uint16_t handle,
8072 				struct net_buf *buf)
8073 {
8074 	struct bt_hci_evt_le_phy_update_complete *sep;
8075 	struct node_rx_pu *pu;
8076 
8077 	pu = (void *)pdu_data;
8078 
8079 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8080 	    !(le_event_mask & BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE)) {
8081 		LOG_WRN("handle: 0x%04x, status: %x, tx: %x, rx: %x.", handle, pu->status,
8082 			find_lsb_set(pu->tx), find_lsb_set(pu->rx));
8083 		return;
8084 	}
8085 
8086 	sep = meta_evt(buf, BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE, sizeof(*sep));
8087 
8088 	sep->status = pu->status;
8089 	sep->handle = sys_cpu_to_le16(handle);
8090 	sep->tx_phy = find_lsb_set(pu->tx);
8091 	sep->rx_phy = find_lsb_set(pu->rx);
8092 }
8093 #endif /* CONFIG_BT_CTLR_PHY */
8094 
8095 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8096 static void le_req_peer_sca_complete(struct pdu_data *pdu, uint16_t handle,
8097 				struct net_buf *buf)
8098 {
8099 	struct bt_hci_evt_le_req_peer_sca_complete *sep;
8100 	struct node_rx_sca *scau;
8101 
8102 	scau = (void *)pdu;
8103 
8104 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8105 	    !(le_event_mask & BT_EVT_MASK_LE_REQ_PEER_SCA_COMPLETE)) {
8106 		LOG_WRN("handle: 0x%04x, status: %x, sca: %x.", handle,
8107 			scau->status,
8108 			scau->sca);
8109 		return;
8110 	}
8111 
8112 	sep = meta_evt(buf, BT_HCI_EVT_LE_REQ_PEER_SCA_COMPLETE, sizeof(*sep));
8113 
8114 	sep->status = scau->status;
8115 	sep->handle = sys_cpu_to_le16(handle);
8116 	sep->sca = scau->sca;
8117 }
8118 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8119 #endif /* CONFIG_BT_CONN */
8120 
8121 #if defined(CONFIG_BT_HCI_MESH_EXT)
8122 static void mesh_adv_cplt(struct pdu_data *pdu_data,
8123 			  struct node_rx_pdu *node_rx,
8124 			  struct net_buf *buf)
8125 {
8126 	struct bt_hci_evt_mesh_adv_complete *mep;
8127 
8128 	mep = mesh_evt(buf, BT_HCI_EVT_MESH_ADV_COMPLETE, sizeof(*mep));
8129 	mep->adv_slot = ((uint8_t *)pdu_data)[0];
8130 }
8131 #endif /* CONFIG_BT_HCI_MESH_EXT */
8132 
8133 /**
8134  * @brief Encode a control-PDU into an HCI buffer
8135  * @details Execution context: Host thread
8136  *
8137  * @param node_rx_pdu[in] RX node containing header and PDU
8138  * @param pdu_data[in]    PDU. Same as node_rx_pdu->pdu, but more convenient
8139  * @param net_buf[out]    Upwards-going HCI buffer to fill
8140  */
8141 static void encode_control(struct node_rx_pdu *node_rx,
8142 			   struct pdu_data *pdu_data, struct net_buf *buf)
8143 {
8144 	uint16_t handle;
8145 
8146 	handle = node_rx->hdr.handle;
8147 
8148 	switch (node_rx->hdr.type) {
8149 #if defined(CONFIG_BT_OBSERVER)
8150 	case NODE_RX_TYPE_REPORT:
8151 		le_advertising_report(pdu_data, node_rx, buf);
8152 		break;
8153 
8154 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8155 	case NODE_RX_TYPE_EXT_1M_REPORT:
8156 		le_adv_ext_1M_report(pdu_data, node_rx, buf);
8157 		break;
8158 
8159 	case NODE_RX_TYPE_EXT_2M_REPORT:
8160 		le_adv_ext_2M_report(pdu_data, node_rx, buf);
8161 		break;
8162 
8163 	case NODE_RX_TYPE_EXT_CODED_REPORT:
8164 		le_adv_ext_coded_report(pdu_data, node_rx, buf);
8165 		break;
8166 
8167 	case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
8168 		le_scan_timeout(pdu_data, node_rx, buf);
8169 		break;
8170 
8171 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
8172 	case NODE_RX_TYPE_SYNC:
8173 		le_per_adv_sync_established(pdu_data, node_rx, buf);
8174 		break;
8175 
8176 	case NODE_RX_TYPE_SYNC_REPORT:
8177 		le_per_adv_sync_report(pdu_data, node_rx, buf);
8178 		break;
8179 
8180 	case NODE_RX_TYPE_SYNC_LOST:
8181 		le_per_adv_sync_lost(pdu_data, node_rx, buf);
8182 		break;
8183 
8184 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
8185 	case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
8186 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
8187 		vs_le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8188 #else
8189 		le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8190 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
8191 		break;
8192 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
8193 
8194 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8195 	case NODE_RX_TYPE_SYNC_ISO:
8196 		le_big_sync_established(pdu_data, node_rx, buf);
8197 		break;
8198 
8199 	case NODE_RX_TYPE_SYNC_ISO_LOST:
8200 		le_big_sync_lost(pdu_data, node_rx, buf);
8201 		break;
8202 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8203 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8204 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8205 #endif /* CONFIG_BT_OBSERVER */
8206 
8207 #if defined(CONFIG_BT_BROADCASTER)
8208 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8209 	case NODE_RX_TYPE_EXT_ADV_TERMINATE:
8210 		le_adv_ext_terminate(pdu_data, node_rx, buf);
8211 		break;
8212 
8213 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8214 	case NODE_RX_TYPE_BIG_COMPLETE:
8215 		le_big_complete(pdu_data, node_rx, buf);
8216 		break;
8217 	case NODE_RX_TYPE_BIG_TERMINATE:
8218 		le_big_terminate(pdu_data, node_rx, buf);
8219 		break;
8220 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8221 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8222 #endif /* CONFIG_BT_BROADCASTER */
8223 
8224 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8225 	case NODE_RX_TYPE_SCAN_REQ:
8226 		le_scan_req_received(pdu_data, node_rx, buf);
8227 		break;
8228 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8229 
8230 #if defined(CONFIG_BT_CONN)
8231 	case NODE_RX_TYPE_CONNECTION:
8232 		le_conn_complete(pdu_data, handle, buf);
8233 		break;
8234 
8235 	case NODE_RX_TYPE_TERMINATE:
8236 		hci_disconn_complete_encode(pdu_data, handle, buf);
8237 		break;
8238 
8239 	case NODE_RX_TYPE_CONN_UPDATE:
8240 		le_conn_update_complete(pdu_data, handle, buf);
8241 		break;
8242 
8243 #if defined(CONFIG_BT_CTLR_LE_ENC)
8244 	case NODE_RX_TYPE_ENC_REFRESH:
8245 		enc_refresh_complete(pdu_data, handle, buf);
8246 		break;
8247 #endif /* CONFIG_BT_CTLR_LE_ENC */
8248 
8249 #if defined(CONFIG_BT_CTLR_LE_PING)
8250 	case NODE_RX_TYPE_APTO:
8251 		auth_payload_timeout_exp(pdu_data, handle, buf);
8252 		break;
8253 #endif /* CONFIG_BT_CTLR_LE_PING */
8254 
8255 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8256 	case NODE_RX_TYPE_CHAN_SEL_ALGO:
8257 		le_chan_sel_algo(pdu_data, handle, buf);
8258 		break;
8259 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8260 
8261 #if defined(CONFIG_BT_CTLR_PHY)
8262 	case NODE_RX_TYPE_PHY_UPDATE:
8263 		le_phy_upd_complete(pdu_data, handle, buf);
8264 		return;
8265 #endif /* CONFIG_BT_CTLR_PHY */
8266 
8267 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
8268 	case NODE_RX_TYPE_RSSI:
8269 		LOG_INF("handle: 0x%04x, rssi: -%d dB.", handle, pdu_data->rssi);
8270 		return;
8271 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
8272 
8273 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
8274 	case NODE_RX_TYPE_CIS_REQUEST:
8275 		le_cis_request(pdu_data, node_rx, buf);
8276 		return;
8277 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
8278 
8279 #if defined(CONFIG_BT_CTLR_CONN_ISO)
8280 	case NODE_RX_TYPE_CIS_ESTABLISHED:
8281 		le_cis_established(pdu_data, node_rx, buf);
8282 		return;
8283 #endif /* CONFIG_BT_CTLR_CONN_ISO */
8284 
8285 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8286 	case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
8287 		le_req_peer_sca_complete(pdu_data, handle, buf);
8288 		return;
8289 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8290 
8291 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
8292 	case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
8293 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
8294 		vs_le_df_connection_iq_report(node_rx, buf);
8295 #else
8296 		le_df_connection_iq_report(node_rx, buf);
8297 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
8298 		return;
8299 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
8300 #endif /* CONFIG_BT_CONN */
8301 
8302 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
8303 	case NODE_RX_TYPE_ADV_INDICATION:
8304 		LOG_INF("Advertised.");
8305 		return;
8306 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
8307 
8308 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
8309 	case NODE_RX_TYPE_SCAN_INDICATION:
8310 		LOG_INF("Scanned.");
8311 		return;
8312 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
8313 
8314 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
8315 	case NODE_RX_TYPE_PROFILE:
8316 		LOG_INF("l: %u, %u, %u; t: %u, %u, %u; cpu: %u, %u, %u, %u.",
8317 			pdu_data->profile.lcur, pdu_data->profile.lmin, pdu_data->profile.lmax,
8318 			pdu_data->profile.cur, pdu_data->profile.min, pdu_data->profile.max,
8319 			pdu_data->profile.radio, pdu_data->profile.lll, pdu_data->profile.ull_high,
8320 			pdu_data->profile.ull_low);
8321 		return;
8322 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
8323 
8324 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
8325 	case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
8326 		le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8327 		return;
8328 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
8329 
8330 #if defined(CONFIG_BT_HCI_MESH_EXT)
8331 	case NODE_RX_TYPE_MESH_ADV_CPLT:
8332 		mesh_adv_cplt(pdu_data, node_rx, buf);
8333 		return;
8334 
8335 	case NODE_RX_TYPE_MESH_REPORT:
8336 		le_advertising_report(pdu_data, node_rx, buf);
8337 		return;
8338 #endif /* CONFIG_BT_HCI_MESH_EXT */
8339 
8340 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
8341 	case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
8342 		hci_user_ext_encode_control(node_rx, pdu_data, buf);
8343 		return;
8344 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
8345 
8346 	default:
8347 		LL_ASSERT(0);
8348 		return;
8349 	}
8350 }
8351 
8352 #if defined(CONFIG_BT_CTLR_LE_ENC)
8353 static void le_ltk_request(struct pdu_data *pdu_data, uint16_t handle,
8354 			   struct net_buf *buf)
8355 {
8356 	struct bt_hci_evt_le_ltk_request *sep;
8357 
8358 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8359 	    !(le_event_mask & BT_EVT_MASK_LE_LTK_REQUEST)) {
8360 		return;
8361 	}
8362 
8363 	sep = meta_evt(buf, BT_HCI_EVT_LE_LTK_REQUEST, sizeof(*sep));
8364 
8365 	sep->handle = sys_cpu_to_le16(handle);
8366 	memcpy(&sep->rand, pdu_data->llctrl.enc_req.rand, sizeof(uint64_t));
8367 	memcpy(&sep->ediv, pdu_data->llctrl.enc_req.ediv, sizeof(uint16_t));
8368 }
8369 
8370 static void encrypt_change(uint8_t err, uint16_t handle,
8371 			   struct net_buf *buf)
8372 {
8373 	struct bt_hci_evt_encrypt_change *ep;
8374 
8375 	if (!(event_mask & BT_EVT_MASK_ENCRYPT_CHANGE)) {
8376 		return;
8377 	}
8378 
8379 	hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_CHANGE, sizeof(*ep));
8380 	ep = net_buf_add(buf, sizeof(*ep));
8381 
8382 	ep->status = err;
8383 	ep->handle = sys_cpu_to_le16(handle);
8384 	ep->encrypt = !err ? 1 : 0;
8385 }
8386 #endif /* CONFIG_BT_CTLR_LE_ENC */
8387 
8388 static void le_remote_feat_complete(uint8_t status, struct pdu_data *pdu_data,
8389 				    uint16_t handle, struct net_buf *buf)
8390 {
8391 	struct bt_hci_evt_le_remote_feat_complete *sep;
8392 
8393 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8394 	    !(le_event_mask & BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE)) {
8395 		return;
8396 	}
8397 
8398 	sep = meta_evt(buf, BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE, sizeof(*sep));
8399 
8400 	sep->status = status;
8401 	sep->handle = sys_cpu_to_le16(handle);
8402 	if (!status) {
8403 		memcpy(&sep->features[0],
8404 		       &pdu_data->llctrl.feature_rsp.features[0],
8405 		       sizeof(sep->features));
8406 	} else {
8407 		(void)memset(&sep->features[0], 0x00, sizeof(sep->features));
8408 	}
8409 }
8410 
8411 static void le_unknown_rsp(struct pdu_data *pdu_data, uint16_t handle,
8412 			   struct net_buf *buf)
8413 {
8414 
8415 	switch (pdu_data->llctrl.unknown_rsp.type) {
8416 	case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
8417 		le_remote_feat_complete(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE,
8418 					    NULL, handle, buf);
8419 		break;
8420 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8421 	case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8422 		le_df_cte_req_failed(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, handle, buf);
8423 		break;
8424 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8425 	default:
8426 		LOG_WRN("type: 0x%02x",	pdu_data->llctrl.unknown_rsp.type);
8427 		break;
8428 	}
8429 }
8430 
8431 static void le_reject_ext_ind(struct pdu_data *pdu, uint16_t handle, struct net_buf *buf)
8432 {
8433 	switch (pdu->llctrl.reject_ext_ind.reject_opcode) {
8434 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8435 	case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8436 		le_df_cte_req_failed(pdu->llctrl.reject_ext_ind.error_code, handle, buf);
8437 		break;
8438 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8439 	default:
8440 		LOG_WRN("reject opcode: 0x%02x", pdu->llctrl.reject_ext_ind.reject_opcode);
8441 		break;
8442 	}
8443 }
8444 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
8445 static void le_conn_param_req(struct pdu_data *pdu_data, uint16_t handle,
8446 			      struct net_buf *buf)
8447 {
8448 	struct bt_hci_evt_le_conn_param_req *sep;
8449 
8450 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8451 	    !(le_event_mask & BT_EVT_MASK_LE_CONN_PARAM_REQ)) {
8452 		/* event masked, reject the conn param req */
8453 		ll_conn_update(handle, 2, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, 0,
8454 			       0, 0, 0, NULL);
8455 
8456 		return;
8457 	}
8458 
8459 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_PARAM_REQ, sizeof(*sep));
8460 
8461 	sep->handle = sys_cpu_to_le16(handle);
8462 	sep->interval_min = pdu_data->llctrl.conn_param_req.interval_min;
8463 	sep->interval_max = pdu_data->llctrl.conn_param_req.interval_max;
8464 	sep->latency = pdu_data->llctrl.conn_param_req.latency;
8465 	sep->timeout = pdu_data->llctrl.conn_param_req.timeout;
8466 }
8467 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
8468 
8469 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
8470 static void le_data_len_change(struct pdu_data *pdu_data, uint16_t handle,
8471 			       struct net_buf *buf)
8472 {
8473 	struct bt_hci_evt_le_data_len_change *sep;
8474 
8475 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8476 	    !(le_event_mask & BT_EVT_MASK_LE_DATA_LEN_CHANGE)) {
8477 		return;
8478 	}
8479 
8480 	sep = meta_evt(buf, BT_HCI_EVT_LE_DATA_LEN_CHANGE, sizeof(*sep));
8481 
8482 	sep->handle = sys_cpu_to_le16(handle);
8483 	sep->max_tx_octets = pdu_data->llctrl.length_rsp.max_tx_octets;
8484 	sep->max_tx_time = pdu_data->llctrl.length_rsp.max_tx_time;
8485 	sep->max_rx_octets = pdu_data->llctrl.length_rsp.max_rx_octets;
8486 	sep->max_rx_time = pdu_data->llctrl.length_rsp.max_rx_time;
8487 }
8488 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
8489 
8490 #if defined(CONFIG_BT_REMOTE_VERSION)
8491 static void remote_version_info_encode(struct pdu_data *pdu_data,
8492 				       uint16_t handle, struct net_buf *buf)
8493 {
8494 	struct pdu_data_llctrl_version_ind *ver_ind;
8495 	struct bt_hci_evt_remote_version_info *ep;
8496 
8497 	if (!(event_mask & BT_EVT_MASK_REMOTE_VERSION_INFO)) {
8498 		return;
8499 	}
8500 
8501 	hci_evt_create(buf, BT_HCI_EVT_REMOTE_VERSION_INFO, sizeof(*ep));
8502 	ep = net_buf_add(buf, sizeof(*ep));
8503 
8504 	ver_ind = &pdu_data->llctrl.version_ind;
8505 	ep->status = 0x00;
8506 	ep->handle = sys_cpu_to_le16(handle);
8507 	ep->version = ver_ind->version_number;
8508 	ep->manufacturer = ver_ind->company_id;
8509 	ep->subversion = ver_ind->sub_version_number;
8510 }
8511 #endif /* CONFIG_BT_REMOTE_VERSION */
8512 
8513 static void encode_data_ctrl(struct node_rx_pdu *node_rx,
8514 			     struct pdu_data *pdu_data, struct net_buf *buf)
8515 {
8516 	uint16_t handle = node_rx->hdr.handle;
8517 
8518 	switch (pdu_data->llctrl.opcode) {
8519 
8520 #if defined(CONFIG_BT_CTLR_LE_ENC)
8521 	case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
8522 		le_ltk_request(pdu_data, handle, buf);
8523 		break;
8524 
8525 	case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
8526 		encrypt_change(0x00, handle, buf);
8527 		break;
8528 #endif /* CONFIG_BT_CTLR_LE_ENC */
8529 
8530 #if defined(CONFIG_BT_REMOTE_VERSION)
8531 	case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
8532 		remote_version_info_encode(pdu_data, handle, buf);
8533 		break;
8534 #endif /* defined(CONFIG_BT_REMOTE_VERSION) */
8535 
8536 	case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
8537 		le_remote_feat_complete(0x00, pdu_data, handle, buf);
8538 		break;
8539 
8540 #if defined(CONFIG_BT_CTLR_LE_ENC)
8541 	case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
8542 		encrypt_change(pdu_data->llctrl.reject_ind.error_code, handle,
8543 			       buf);
8544 		break;
8545 #endif /* CONFIG_BT_CTLR_LE_ENC */
8546 
8547 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
8548 	case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
8549 		le_conn_param_req(pdu_data, handle, buf);
8550 		break;
8551 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
8552 
8553 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
8554 	case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
8555 	case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
8556 		le_data_len_change(pdu_data, handle, buf);
8557 		break;
8558 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
8559 
8560 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8561 	case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
8562 		le_df_cte_req_failed(BT_HCI_CTE_REQ_STATUS_RSP_WITHOUT_CTE, handle, buf);
8563 		break;
8564 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8565 
8566 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
8567 		le_unknown_rsp(pdu_data, handle, buf);
8568 		break;
8569 
8570 	case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
8571 		le_reject_ext_ind(pdu_data, handle, buf);
8572 		break;
8573 
8574 	default:
8575 		LL_ASSERT(0);
8576 		return;
8577 	}
8578 }
8579 
8580 #if defined(CONFIG_BT_CONN)
8581 void hci_acl_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
8582 {
8583 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
8584 	struct bt_hci_acl_hdr *acl;
8585 	uint16_t handle_flags;
8586 	uint16_t handle;
8587 	uint8_t *data;
8588 
8589 	handle = node_rx->hdr.handle;
8590 
8591 	switch (pdu_data->ll_id) {
8592 	case PDU_DATA_LLID_DATA_CONTINUE:
8593 	case PDU_DATA_LLID_DATA_START:
8594 		acl = (void *)net_buf_add(buf, sizeof(*acl));
8595 		if (pdu_data->ll_id == PDU_DATA_LLID_DATA_START) {
8596 			handle_flags = bt_acl_handle_pack(handle, BT_ACL_START);
8597 		} else {
8598 			handle_flags = bt_acl_handle_pack(handle, BT_ACL_CONT);
8599 		}
8600 		acl->handle = sys_cpu_to_le16(handle_flags);
8601 		acl->len = sys_cpu_to_le16(pdu_data->len);
8602 		data = (void *)net_buf_add(buf, pdu_data->len);
8603 		memcpy(data, pdu_data->lldata, pdu_data->len);
8604 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8605 		if (hci_hbuf_total > 0) {
8606 			LL_ASSERT((hci_hbuf_sent - hci_hbuf_acked) <
8607 				  hci_hbuf_total);
8608 			hci_hbuf_sent++;
8609 			/* Note: This requires linear handle values starting
8610 			 * from 0
8611 			 */
8612 			LL_ASSERT(handle < ARRAY_SIZE(hci_hbuf_pend));
8613 			hci_hbuf_pend[handle]++;
8614 		}
8615 #endif
8616 		break;
8617 
8618 	default:
8619 		LL_ASSERT(0);
8620 		break;
8621 	}
8622 }
8623 #endif /* CONFIG_BT_CONN */
8624 
8625 void hci_evt_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
8626 {
8627 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
8628 
8629 	if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
8630 		encode_control(node_rx, pdu_data, buf);
8631 	} else if (IS_ENABLED(CONFIG_BT_CONN)) {
8632 		encode_data_ctrl(node_rx, pdu_data, buf);
8633 	}
8634 }
8635 
8636 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
8637 	defined(CONFIG_BT_CTLR_CONN_ISO)
8638 void hci_num_cmplt_encode(struct net_buf *buf, uint16_t handle, uint8_t num)
8639 {
8640 	struct bt_hci_evt_num_completed_packets *ep;
8641 	struct bt_hci_handle_count *hc;
8642 	uint8_t num_handles;
8643 	uint8_t len;
8644 
8645 	num_handles = 1U;
8646 
8647 	len = (sizeof(*ep) + (sizeof(*hc) * num_handles));
8648 	hci_evt_create(buf, BT_HCI_EVT_NUM_COMPLETED_PACKETS, len);
8649 
8650 	ep = net_buf_add(buf, len);
8651 	ep->num_handles = num_handles;
8652 	hc = &ep->h[0];
8653 	hc->handle = sys_cpu_to_le16(handle);
8654 	hc->count = sys_cpu_to_le16(num);
8655 }
8656 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
8657 
8658 uint8_t hci_get_class(struct node_rx_pdu *node_rx)
8659 {
8660 #if defined(CONFIG_BT_CONN)
8661 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
8662 #endif
8663 
8664 	if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
8665 
8666 		switch (node_rx->hdr.type) {
8667 #if defined(CONFIG_BT_OBSERVER) || \
8668 	defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
8669 	defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
8670 	defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
8671 	defined(CONFIG_BT_CTLR_PROFILE_ISR)
8672 #if defined(CONFIG_BT_OBSERVER)
8673 		case NODE_RX_TYPE_REPORT:
8674 #endif /* CONFIG_BT_OBSERVER */
8675 
8676 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8677 		case NODE_RX_TYPE_SCAN_REQ:
8678 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8679 
8680 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
8681 		case NODE_RX_TYPE_ADV_INDICATION:
8682 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
8683 
8684 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
8685 		case NODE_RX_TYPE_SCAN_INDICATION:
8686 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
8687 
8688 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
8689 		case NODE_RX_TYPE_PROFILE:
8690 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
8691 			return HCI_CLASS_EVT_DISCARDABLE;
8692 #endif
8693 
8694 #if defined(CONFIG_BT_HCI_MESH_EXT)
8695 		case NODE_RX_TYPE_MESH_ADV_CPLT:
8696 		case NODE_RX_TYPE_MESH_REPORT:
8697 #endif /* CONFIG_BT_HCI_MESH_EXT */
8698 
8699 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8700 #if defined(CONFIG_BT_BROADCASTER)
8701 		case NODE_RX_TYPE_EXT_ADV_TERMINATE:
8702 
8703 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8704 		case NODE_RX_TYPE_BIG_COMPLETE:
8705 		case NODE_RX_TYPE_BIG_TERMINATE:
8706 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8707 #endif /* CONFIG_BT_BROADCASTER */
8708 
8709 #if defined(CONFIG_BT_OBSERVER)
8710 		case NODE_RX_TYPE_EXT_1M_REPORT:
8711 		case NODE_RX_TYPE_EXT_2M_REPORT:
8712 		case NODE_RX_TYPE_EXT_CODED_REPORT:
8713 		case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
8714 
8715 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
8716 		case NODE_RX_TYPE_SYNC:
8717 		case NODE_RX_TYPE_SYNC_REPORT:
8718 		case NODE_RX_TYPE_SYNC_LOST:
8719 
8720 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
8721 		case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
8722 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
8723 
8724 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8725 		case NODE_RX_TYPE_SYNC_ISO:
8726 		case NODE_RX_TYPE_SYNC_ISO_LOST:
8727 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8728 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8729 #endif /* CONFIG_BT_OBSERVER */
8730 
8731 			return HCI_CLASS_EVT_REQUIRED;
8732 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8733 
8734 #if defined(CONFIG_BT_CONN)
8735 		case NODE_RX_TYPE_CONNECTION:
8736 
8737 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
8738 		case NODE_RX_TYPE_CIS_REQUEST:
8739 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
8740 
8741 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8742 		case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
8743 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8744 
8745 #if defined(CONFIG_BT_CTLR_CONN_ISO)
8746 		case NODE_RX_TYPE_CIS_ESTABLISHED:
8747 #endif /* CONFIG_BT_CTLR_CONN_ISO */
8748 
8749 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
8750 		case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
8751 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
8752 
8753 			return HCI_CLASS_EVT_REQUIRED;
8754 
8755 		case NODE_RX_TYPE_TERMINATE:
8756 		case NODE_RX_TYPE_CONN_UPDATE:
8757 
8758 #if defined(CONFIG_BT_CTLR_LE_ENC)
8759 		case NODE_RX_TYPE_ENC_REFRESH:
8760 #endif /* CONFIG_BT_CTLR_LE_ENC */
8761 
8762 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
8763 		case NODE_RX_TYPE_RSSI:
8764 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
8765 
8766 #if defined(CONFIG_BT_CTLR_LE_PING)
8767 		case NODE_RX_TYPE_APTO:
8768 #endif /* CONFIG_BT_CTLR_LE_PING */
8769 
8770 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8771 		case NODE_RX_TYPE_CHAN_SEL_ALGO:
8772 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8773 
8774 #if defined(CONFIG_BT_CTLR_PHY)
8775 		case NODE_RX_TYPE_PHY_UPDATE:
8776 #endif /* CONFIG_BT_CTLR_PHY */
8777 
8778 			return HCI_CLASS_EVT_CONNECTION;
8779 #endif /* CONFIG_BT_CONN */
8780 
8781 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
8782 		case NODE_RX_TYPE_ISO_PDU:
8783 			return HCI_CLASS_ISO_DATA;
8784 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
8785 
8786 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
8787 		case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
8788 			return HCI_CLASS_EVT_REQUIRED;
8789 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
8790 
8791 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
8792 		case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
8793 			return hci_user_ext_get_class(node_rx);
8794 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
8795 
8796 		default:
8797 			return HCI_CLASS_NONE;
8798 		}
8799 
8800 #if defined(CONFIG_BT_CONN)
8801 	} else if (pdu_data->ll_id == PDU_DATA_LLID_CTRL) {
8802 		return HCI_CLASS_EVT_LLCP;
8803 	} else {
8804 		return HCI_CLASS_ACL_DATA;
8805 	}
8806 #else
8807 	} else {
8808 		return HCI_CLASS_NONE;
8809 	}
8810 #endif
8811 }
8812 
8813 void hci_init(struct k_poll_signal *signal_host_buf)
8814 {
8815 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8816 	hbuf_signal = signal_host_buf;
8817 #endif
8818 	reset(NULL, NULL);
8819 }
8820