1 /*
2  * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <stddef.h>
9 #include <string.h>
10 
11 #include <zephyr/version.h>
12 #include <errno.h>
13 
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/atomic.h>
17 
18 #include <zephyr/drivers/bluetooth.h>
19 
20 #include <zephyr/bluetooth/hci_types.h>
21 #include <zephyr/bluetooth/hci_vs.h>
22 #include <zephyr/bluetooth/buf.h>
23 
24 #include "../host/hci_ecc.h"
25 
26 #include "util/util.h"
27 #include "util/memq.h"
28 #include "util/mem.h"
29 #include "util/dbuf.h"
30 
31 #include "hal/ecb.h"
32 #include "hal/ccm.h"
33 #include "hal/ticker.h"
34 
35 #include "ticker/ticker.h"
36 
37 #include "ll_sw/pdu_df.h"
38 #include "lll/pdu_vendor.h"
39 #include "ll_sw/pdu.h"
40 
41 #include "ll_sw/lll.h"
42 #include "lll/lll_adv_types.h"
43 #include "ll_sw/lll_adv.h"
44 #include "lll/lll_adv_pdu.h"
45 #include "ll_sw/lll_scan.h"
46 #include "lll/lll_df_types.h"
47 #include "ll_sw/lll_sync.h"
48 #include "ll_sw/lll_sync_iso.h"
49 #include "ll_sw/lll_conn.h"
50 #include "ll_sw/lll_conn_iso.h"
51 #include "ll_sw/lll_iso_tx.h"
52 
53 #include "ll_sw/isoal.h"
54 
55 #include "ll_sw/ull_tx_queue.h"
56 
57 #include "ll_sw/ull_adv_types.h"
58 #include "ll_sw/ull_scan_types.h"
59 #include "ll_sw/ull_sync_types.h"
60 #include "ll_sw/ull_conn_types.h"
61 #include "ll_sw/ull_iso_types.h"
62 #include "ll_sw/ull_conn_iso_types.h"
63 #include "ll_sw/ull_conn_iso_internal.h"
64 #include "ll_sw/ull_df_types.h"
65 #include "ll_sw/ull_internal.h"
66 
67 #include "ll_sw/ull_adv_internal.h"
68 #include "ll_sw/ull_sync_internal.h"
69 #include "ll_sw/ull_conn_internal.h"
70 #include "ll_sw/ull_sync_iso_internal.h"
71 #include "ll_sw/ull_iso_internal.h"
72 #include "ll_sw/ull_df_internal.h"
73 
74 #include "ll.h"
75 #include "ll_feat.h"
76 #include "ll_settings.h"
77 
78 #include "hci_internal.h"
79 #include "hci_vendor.h"
80 
81 #if defined(CONFIG_BT_HCI_MESH_EXT)
82 #include "ll_sw/ll_mesh.h"
83 #endif /* CONFIG_BT_HCI_MESH_EXT */
84 
85 #if defined(CONFIG_BT_CTLR_DTM_HCI)
86 #include "ll_sw/ll_test.h"
87 #endif /* CONFIG_BT_CTLR_DTM_HCI */
88 
89 #if defined(CONFIG_BT_CTLR_USER_EXT)
90 #include "hci_user_ext.h"
91 #endif /* CONFIG_BT_CTLR_USER_EXT */
92 
93 #include "common/bt_str.h"
94 #include "hal/debug.h"
95 
96 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
97 #include <zephyr/logging/log.h>
98 LOG_MODULE_REGISTER(bt_ctlr_hci);
99 
100 #define STR_NULL_TERMINATOR 0x00
101 
102 /* opcode of the HCI command currently being processed. The opcode is stored
103  * by hci_cmd_handle() and then used during the creation of cmd complete and
104  * cmd status events to avoid passing it up the call chain.
105  */
106 static uint16_t _opcode;
107 
108 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
109 /* NOTE: Duplicate filter uses two LS bits value of standard advertising modes:
110  *       0 - Non-Connectable Non-Scannable advertising report
111  *       1 - Connectable Non-Scannable advertising report
112  *       2 - Non-Connectable Scannable advertisig report
113  *       3 - Connectable Scannable advertising report
114  *
115  *       FIXME: Duplicate filtering of Connectable Directed low and high duty
116  *              cycle. If advertiser changes between Connectable Non-Scannable,
117  *              Connectable Directed low, and high duty cycle without changing
118  *              SID and DID, then such reports will be filtered out by the
119  *              implementation. Needs enhancement to current implementation.
120  *
121  *       Define a custom duplicate filter mode for periodic advertising:
122  *       4 - Periodic Advertising report
123  */
124 
125 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
126 #define DUP_EXT_ADV_MODE_MAX      5
127 #define DUP_EXT_ADV_MODE_PERIODIC BIT(2)
128 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
129 #define DUP_EXT_ADV_MODE_MAX      4
130 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
131 
132 #define DUP_EXT_ADV_MODE_COUNT    4
133 
134 /* Duplicate filter entries, one per Bluetooth address */
135 static struct dup_entry {
136 	bt_addr_le_t addr;
137 
138 	/* Mask to accumulate advertising PDU type as bitmask */
139 	uint8_t      mask;
140 
141 #if defined(CONFIG_BT_CTLR_ADV_EXT)
142 	struct dup_ext_adv_mode {
143 		uint16_t set_count:5;
144 		uint16_t set_curr:5;
145 		struct dup_ext_adv_set {
146 			uint8_t data_cmplt:1;
147 			struct pdu_adv_adi adi;
148 		} set[CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX];
149 	} adv_mode[DUP_EXT_ADV_MODE_MAX];
150 #endif
151 } dup_filter[CONFIG_BT_CTLR_DUP_FILTER_LEN];
152 
153 /* Duplicate filtering is disabled if count value is set to negative integer */
154 #define DUP_FILTER_DISABLED (-1)
155 
156 /* Duplicate filtering array entry count, filtering disabled if negative */
157 static int32_t dup_count;
158 /* Duplicate filtering current free entry, overwrites entries after rollover */
159 static uint32_t dup_curr;
160 
161 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
162 /* Helper function to reset non-periodic advertising entries in filter table */
163 static void dup_ext_adv_reset(void);
164 /* Flag for advertising reports be filtered for duplicates. */
165 static bool dup_scan;
166 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
167 /* Set constant true so that (dup_count >= 0) decides if advertising duplicate
168  * filter is enabled when Periodic Advertising ADI support is disabled.
169  */
170 static const bool dup_scan = true;
171 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
172 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
173 
174 #if defined(CONFIG_BT_HCI_MESH_EXT)
175 struct scan_filter {
176 	uint8_t count;
177 	uint8_t lengths[CONFIG_BT_CTLR_MESH_SF_PATTERNS];
178 	uint8_t patterns[CONFIG_BT_CTLR_MESH_SF_PATTERNS]
179 		     [BT_HCI_MESH_PATTERN_LEN_MAX];
180 };
181 
182 static struct scan_filter scan_filters[CONFIG_BT_CTLR_MESH_SCAN_FILTERS];
183 static uint8_t sf_curr;
184 #endif
185 
186 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
187 int32_t    hci_hbuf_total;
188 uint32_t    hci_hbuf_sent;
189 uint32_t    hci_hbuf_acked;
190 uint16_t    hci_hbuf_pend[CONFIG_BT_MAX_CONN];
191 atomic_t hci_state_mask;
192 static struct k_poll_signal *hbuf_signal;
193 #endif
194 
195 #if defined(CONFIG_BT_CONN)
196 static uint32_t conn_count;
197 #endif
198 
199 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
200 static uint32_t cis_pending_count;
201 #endif
202 
203 /* In HCI event PHY indices start at 1 compare to 0 indexed in aux_ptr field in
204  * the Common Extended Payload Format in the PDUs.
205  */
206 #define HCI_AUX_PHY_TO_HCI_PHY(aux_phy) ((aux_phy) + 1)
207 
208 #define DEFAULT_EVENT_MASK           0x1fffffffffff
209 #define DEFAULT_EVENT_MASK_PAGE_2    0x0
210 #define DEFAULT_LE_EVENT_MASK 0x1f
211 
212 static uint64_t event_mask = DEFAULT_EVENT_MASK;
213 static uint64_t event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
214 static uint64_t le_event_mask = DEFAULT_LE_EVENT_MASK;
215 #if defined(CONFIG_BT_HCI_VS)
216 __maybe_unused static uint64_t vs_events_mask = DEFAULT_VS_EVT_MASK;
217 #endif /* CONFIG_BT_HCI_VS */
218 
219 static struct net_buf *cmd_complete_status(uint8_t status);
220 
221 #if defined(CONFIG_BT_CTLR_ADV_EXT)
222 #define BUF_GET_TIMEOUT K_SECONDS(10)
223 
224 #if defined(CONFIG_BT_HCI_RAW)
225 static uint8_t ll_adv_cmds;
226 
ll_adv_cmds_set(uint8_t adv_cmds)227 __weak int ll_adv_cmds_set(uint8_t adv_cmds)
228 {
229 	if (!ll_adv_cmds) {
230 		ll_adv_cmds = adv_cmds;
231 	}
232 
233 	if (ll_adv_cmds != adv_cmds) {
234 		return -EINVAL;
235 	}
236 
237 	return 0;
238 }
239 
ll_adv_cmds_is_ext(void)240 __weak int ll_adv_cmds_is_ext(void)
241 {
242 	return ll_adv_cmds == LL_ADV_CMDS_EXT;
243 }
244 
245 #else /* !CONFIG_BT_HCI_RAW */
ll_adv_cmds_is_ext(void)246 __weak int ll_adv_cmds_is_ext(void)
247 {
248 	return 1;
249 }
250 #endif /* !CONFIG_BT_HCI_RAW */
251 
adv_cmds_legacy_check(struct net_buf ** cc_evt)252 static int adv_cmds_legacy_check(struct net_buf **cc_evt)
253 {
254 	int err;
255 
256 #if defined(CONFIG_BT_HCI_RAW)
257 	err = ll_adv_cmds_set(LL_ADV_CMDS_LEGACY);
258 	if (err && cc_evt) {
259 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
260 	}
261 #else
262 	if (cc_evt) {
263 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
264 	}
265 
266 	err = -EINVAL;
267 #endif /* CONFIG_BT_HCI_RAW */
268 
269 	return err;
270 }
271 
adv_cmds_ext_check(struct net_buf ** cc_evt)272 static int adv_cmds_ext_check(struct net_buf **cc_evt)
273 {
274 	int err;
275 
276 #if defined(CONFIG_BT_HCI_RAW)
277 	err = ll_adv_cmds_set(LL_ADV_CMDS_EXT);
278 	if (err && cc_evt) {
279 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
280 	}
281 #else
282 	err = 0;
283 #endif /* CONFIG_BT_HCI_RAW */
284 
285 	return err;
286 }
287 #else
adv_cmds_legacy_check(struct net_buf ** cc_evt)288 static inline int adv_cmds_legacy_check(struct net_buf **cc_evt)
289 {
290 	return 0;
291 }
292 #endif /* CONFIG_BT_CTLR_ADV_EXT */
293 
294 #if defined(CONFIG_BT_CONN)
295 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
296 			     struct net_buf *buf);
297 #endif /* CONFIG_BT_CONN */
298 
hci_evt_create(struct net_buf * buf,uint8_t evt,uint8_t len)299 static void hci_evt_create(struct net_buf *buf, uint8_t evt, uint8_t len)
300 {
301 	struct bt_hci_evt_hdr *hdr;
302 
303 	hdr = net_buf_add(buf, sizeof(*hdr));
304 	hdr->evt = evt;
305 	hdr->len = len;
306 }
307 
hci_cmd_complete(struct net_buf ** buf,uint8_t plen)308 void *hci_cmd_complete(struct net_buf **buf, uint8_t plen)
309 {
310 	*buf = bt_hci_cmd_complete_create(_opcode, plen);
311 
312 	return net_buf_add(*buf, plen);
313 }
314 
cmd_status(uint8_t status)315 static struct net_buf *cmd_status(uint8_t status)
316 {
317 	return bt_hci_cmd_status_create(_opcode, status);
318 }
319 
cmd_complete_status(uint8_t status)320 static struct net_buf *cmd_complete_status(uint8_t status)
321 {
322 	struct net_buf *buf;
323 	struct bt_hci_evt_cc_status *ccst;
324 
325 	buf = bt_hci_cmd_complete_create(_opcode, sizeof(*ccst));
326 	ccst = net_buf_add(buf, sizeof(*ccst));
327 	ccst->status = status;
328 
329 	return buf;
330 }
331 
meta_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)332 static void *meta_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
333 {
334 	struct bt_hci_evt_le_meta_event *me;
335 
336 	hci_evt_create(buf, BT_HCI_EVT_LE_META_EVENT, sizeof(*me) + melen);
337 	me = net_buf_add(buf, sizeof(*me));
338 	me->subevent = subevt;
339 
340 	return net_buf_add(buf, melen);
341 }
342 
343 #if defined(CONFIG_BT_HCI_VS)
vs_event(struct net_buf * buf,uint8_t subevt,uint8_t evt_len)344 __maybe_unused static void *vs_event(struct net_buf *buf, uint8_t subevt, uint8_t evt_len)
345 {
346 	struct bt_hci_evt_vs *evt;
347 
348 	hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*evt) + evt_len);
349 	evt = net_buf_add(buf, sizeof(*evt));
350 	evt->subevent = subevt;
351 
352 	return net_buf_add(buf, evt_len);
353 }
354 #endif /* CONFIG_BT_HCI_VS */
355 
356 #if defined(CONFIG_BT_HCI_MESH_EXT)
mesh_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)357 static void *mesh_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
358 {
359 	struct bt_hci_evt_mesh *me;
360 
361 	hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*me) + melen);
362 	me = net_buf_add(buf, sizeof(*me));
363 	me->prefix = BT_HCI_MESH_EVT_PREFIX;
364 	me->subevent = subevt;
365 
366 	return net_buf_add(buf, melen);
367 }
368 #endif /* CONFIG_BT_HCI_MESH_EXT */
369 
370 #if defined(CONFIG_BT_CONN)
disconnect(struct net_buf * buf,struct net_buf ** evt)371 static void disconnect(struct net_buf *buf, struct net_buf **evt)
372 {
373 	struct bt_hci_cp_disconnect *cmd = (void *)buf->data;
374 	uint16_t handle;
375 	uint8_t status;
376 
377 	handle = sys_le16_to_cpu(cmd->handle);
378 	status = ll_terminate_ind_send(handle, cmd->reason);
379 
380 	*evt = cmd_status(status);
381 }
382 
read_remote_ver_info(struct net_buf * buf,struct net_buf ** evt)383 static void read_remote_ver_info(struct net_buf *buf, struct net_buf **evt)
384 {
385 	struct bt_hci_cp_read_remote_version_info *cmd = (void *)buf->data;
386 	uint16_t handle;
387 	uint8_t status;
388 
389 	handle = sys_le16_to_cpu(cmd->handle);
390 	status = ll_version_ind_send(handle);
391 
392 	*evt = cmd_status(status);
393 }
394 #endif /* CONFIG_BT_CONN */
395 
link_control_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)396 static int link_control_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
397 				   struct net_buf **evt)
398 {
399 	switch (ocf) {
400 #if defined(CONFIG_BT_CONN)
401 	case BT_OCF(BT_HCI_OP_DISCONNECT):
402 		disconnect(cmd, evt);
403 		break;
404 	case BT_OCF(BT_HCI_OP_READ_REMOTE_VERSION_INFO):
405 		read_remote_ver_info(cmd, evt);
406 		break;
407 #endif /* CONFIG_BT_CONN */
408 	default:
409 		return -EINVAL;
410 	}
411 
412 	return 0;
413 }
414 
set_event_mask(struct net_buf * buf,struct net_buf ** evt)415 static void set_event_mask(struct net_buf *buf, struct net_buf **evt)
416 {
417 	struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
418 
419 	event_mask = sys_get_le64(cmd->events);
420 
421 	*evt = cmd_complete_status(0x00);
422 }
423 
set_event_mask_page_2(struct net_buf * buf,struct net_buf ** evt)424 static void set_event_mask_page_2(struct net_buf *buf, struct net_buf **evt)
425 {
426 	struct bt_hci_cp_set_event_mask_page_2 *cmd = (void *)buf->data;
427 
428 	event_mask_page_2 = sys_get_le64(cmd->events_page_2);
429 
430 	*evt = cmd_complete_status(0x00);
431 }
432 
reset(struct net_buf * buf,struct net_buf ** evt)433 static void reset(struct net_buf *buf, struct net_buf **evt)
434 {
435 #if defined(CONFIG_BT_HCI_MESH_EXT)
436 	int i;
437 
438 	for (i = 0; i < ARRAY_SIZE(scan_filters); i++) {
439 		scan_filters[i].count = 0U;
440 	}
441 	sf_curr = 0xFF;
442 #endif
443 
444 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
445 	dup_count = DUP_FILTER_DISABLED;
446 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
447 	dup_scan = false;
448 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
449 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
450 
451 	/* reset event masks */
452 	event_mask = DEFAULT_EVENT_MASK;
453 	event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
454 	le_event_mask = DEFAULT_LE_EVENT_MASK;
455 
456 	if (buf) {
457 		ll_reset();
458 		*evt = cmd_complete_status(0x00);
459 	}
460 
461 #if defined(CONFIG_BT_CONN)
462 	conn_count = 0U;
463 #endif
464 
465 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
466 	cis_pending_count = 0U;
467 #endif
468 
469 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
470 	hci_hbuf_total = 0;
471 	hci_hbuf_sent = 0U;
472 	hci_hbuf_acked = 0U;
473 	(void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
474 	if (buf) {
475 		atomic_set_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
476 		k_poll_signal_raise(hbuf_signal, 0x0);
477 	}
478 #endif
479 
480 	hci_recv_fifo_reset();
481 }
482 
483 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_ctl_to_host_flow(struct net_buf * buf,struct net_buf ** evt)484 static void set_ctl_to_host_flow(struct net_buf *buf, struct net_buf **evt)
485 {
486 	struct bt_hci_cp_set_ctl_to_host_flow *cmd = (void *)buf->data;
487 	uint8_t flow_enable = cmd->flow_enable;
488 	struct bt_hci_evt_cc_status *ccst;
489 
490 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
491 
492 	/* require host buffer size before enabling flow control, and
493 	 * disallow if any connections are up
494 	 */
495 	if (!hci_hbuf_total || conn_count) {
496 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
497 		return;
498 	} else {
499 		ccst->status = 0x00;
500 	}
501 
502 	switch (flow_enable) {
503 	case BT_HCI_CTL_TO_HOST_FLOW_DISABLE:
504 		if (hci_hbuf_total < 0) {
505 			/* already disabled */
506 			return;
507 		}
508 		break;
509 	case BT_HCI_CTL_TO_HOST_FLOW_ENABLE:
510 		if (hci_hbuf_total > 0) {
511 			/* already enabled */
512 			return;
513 		}
514 		break;
515 	default:
516 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
517 		return;
518 	}
519 
520 	hci_hbuf_sent = 0U;
521 	hci_hbuf_acked = 0U;
522 	(void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
523 	hci_hbuf_total = -hci_hbuf_total;
524 }
525 
host_buffer_size(struct net_buf * buf,struct net_buf ** evt)526 static void host_buffer_size(struct net_buf *buf, struct net_buf **evt)
527 {
528 	struct bt_hci_cp_host_buffer_size *cmd = (void *)buf->data;
529 	uint16_t acl_pkts = sys_le16_to_cpu(cmd->acl_pkts);
530 	uint16_t acl_mtu = sys_le16_to_cpu(cmd->acl_mtu);
531 	struct bt_hci_evt_cc_status *ccst;
532 
533 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
534 
535 	if (hci_hbuf_total) {
536 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
537 		return;
538 	}
539 	/* fragmentation from controller to host not supported, require
540 	 * ACL MTU to be at least the LL MTU
541 	 */
542 	if (acl_mtu < LL_LENGTH_OCTETS_RX_MAX) {
543 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
544 		return;
545 	}
546 
547 	LOG_DBG("FC: host buf size: %d", acl_pkts);
548 	hci_hbuf_total = -acl_pkts;
549 }
550 
host_num_completed_packets(struct net_buf * buf,struct net_buf ** evt)551 static void host_num_completed_packets(struct net_buf *buf,
552 				       struct net_buf **evt)
553 {
554 	struct bt_hci_cp_host_num_completed_packets *cmd = (void *)buf->data;
555 	struct bt_hci_evt_cc_status *ccst;
556 	uint32_t count = 0U;
557 
558 	/* special case, no event returned except for error conditions */
559 	if (hci_hbuf_total <= 0) {
560 		ccst = hci_cmd_complete(evt, sizeof(*ccst));
561 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
562 		return;
563 	} else if (!conn_count) {
564 		ccst = hci_cmd_complete(evt, sizeof(*ccst));
565 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
566 		return;
567 	}
568 
569 	/* leave *evt == NULL so no event is generated */
570 	for (uint8_t i = 0; i < cmd->num_handles; i++) {
571 		uint16_t h = sys_le16_to_cpu(cmd->h[i].handle);
572 		uint16_t c = sys_le16_to_cpu(cmd->h[i].count);
573 
574 		if ((h >= ARRAY_SIZE(hci_hbuf_pend)) ||
575 		    (c > hci_hbuf_pend[h])) {
576 			ccst = hci_cmd_complete(evt, sizeof(*ccst));
577 			ccst->status = BT_HCI_ERR_INVALID_PARAM;
578 			return;
579 		}
580 
581 		hci_hbuf_pend[h] -= c;
582 		count += c;
583 	}
584 
585 	LOG_DBG("FC: acked: %d", count);
586 	hci_hbuf_acked += count;
587 	k_poll_signal_raise(hbuf_signal, 0x0);
588 }
589 #endif
590 
591 #if defined(CONFIG_BT_CTLR_LE_PING)
read_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)592 static void read_auth_payload_timeout(struct net_buf *buf, struct net_buf **evt)
593 {
594 	struct bt_hci_cp_read_auth_payload_timeout *cmd = (void *)buf->data;
595 	struct bt_hci_rp_read_auth_payload_timeout *rp;
596 	uint16_t auth_payload_timeout;
597 	uint16_t handle;
598 	uint8_t status;
599 
600 	handle = sys_le16_to_cpu(cmd->handle);
601 
602 	status = ll_apto_get(handle, &auth_payload_timeout);
603 
604 	rp = hci_cmd_complete(evt, sizeof(*rp));
605 	rp->status = status;
606 	rp->handle = sys_cpu_to_le16(handle);
607 	rp->auth_payload_timeout = sys_cpu_to_le16(auth_payload_timeout);
608 }
609 
write_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)610 static void write_auth_payload_timeout(struct net_buf *buf,
611 				       struct net_buf **evt)
612 {
613 	struct bt_hci_cp_write_auth_payload_timeout *cmd = (void *)buf->data;
614 	struct bt_hci_rp_write_auth_payload_timeout *rp;
615 	uint16_t auth_payload_timeout;
616 	uint16_t handle;
617 	uint8_t status;
618 
619 	handle = sys_le16_to_cpu(cmd->handle);
620 	auth_payload_timeout = sys_le16_to_cpu(cmd->auth_payload_timeout);
621 
622 	status = ll_apto_set(handle, auth_payload_timeout);
623 
624 	rp = hci_cmd_complete(evt, sizeof(*rp));
625 	rp->status = status;
626 	rp->handle = sys_cpu_to_le16(handle);
627 }
628 #endif /* CONFIG_BT_CTLR_LE_PING */
629 
630 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
configure_data_path(struct net_buf * buf,struct net_buf ** evt)631 static void configure_data_path(struct net_buf *buf,
632 				struct net_buf **evt)
633 {
634 	struct bt_hci_cp_configure_data_path *cmd = (void *)buf->data;
635 	struct bt_hci_rp_configure_data_path *rp;
636 
637 	uint8_t *vs_config;
638 	uint8_t status;
639 
640 	vs_config = &cmd->vs_config[0];
641 
642 	if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)) {
643 		status = ll_configure_data_path(cmd->data_path_dir,
644 						cmd->data_path_id,
645 						cmd->vs_config_len,
646 						vs_config);
647 	} else {
648 		status = BT_HCI_ERR_INVALID_PARAM;
649 	}
650 
651 	rp = hci_cmd_complete(evt, sizeof(*rp));
652 	rp->status = status;
653 }
654 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
655 
656 #if defined(CONFIG_BT_CTLR_CONN_ISO)
read_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)657 static void read_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
658 {
659 	struct bt_hci_rp_read_conn_accept_timeout *rp;
660 	uint16_t timeout;
661 
662 	ARG_UNUSED(buf);
663 
664 	rp = hci_cmd_complete(evt, sizeof(*rp));
665 
666 	rp->status = ll_conn_iso_accept_timeout_get(&timeout);
667 	rp->conn_accept_timeout = sys_cpu_to_le16(timeout);
668 }
669 
write_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)670 static void write_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
671 {
672 	struct bt_hci_cp_write_conn_accept_timeout *cmd = (void *)buf->data;
673 	struct bt_hci_rp_write_conn_accept_timeout *rp;
674 	uint16_t timeout;
675 
676 	timeout = sys_le16_to_cpu(cmd->conn_accept_timeout);
677 
678 	rp = hci_cmd_complete(evt, sizeof(*rp));
679 
680 	rp->status = ll_conn_iso_accept_timeout_set(timeout);
681 }
682 #endif /* CONFIG_BT_CTLR_CONN_ISO */
683 
684 #if defined(CONFIG_BT_CONN)
read_tx_power_level(struct net_buf * buf,struct net_buf ** evt)685 static void read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
686 {
687 	struct bt_hci_cp_read_tx_power_level *cmd = (void *)buf->data;
688 	struct bt_hci_rp_read_tx_power_level *rp;
689 	uint16_t handle;
690 	uint8_t status;
691 	uint8_t type;
692 
693 	handle = sys_le16_to_cpu(cmd->handle);
694 	type = cmd->type;
695 
696 	rp = hci_cmd_complete(evt, sizeof(*rp));
697 
698 	status = ll_tx_pwr_lvl_get(BT_HCI_VS_LL_HANDLE_TYPE_CONN,
699 				   handle, type, &rp->tx_power_level);
700 
701 	rp->status = status;
702 	rp->handle = sys_cpu_to_le16(handle);
703 }
704 #endif /* CONFIG_BT_CONN */
705 
ctrl_bb_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)706 static int ctrl_bb_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
707 			      struct net_buf **evt)
708 {
709 	switch (ocf) {
710 	case BT_OCF(BT_HCI_OP_SET_EVENT_MASK):
711 		set_event_mask(cmd, evt);
712 		break;
713 
714 	case BT_OCF(BT_HCI_OP_RESET):
715 		reset(cmd, evt);
716 		break;
717 
718 	case BT_OCF(BT_HCI_OP_SET_EVENT_MASK_PAGE_2):
719 		set_event_mask_page_2(cmd, evt);
720 		break;
721 
722 #if defined(CONFIG_BT_CTLR_CONN_ISO)
723 	case BT_OCF(BT_HCI_OP_READ_CONN_ACCEPT_TIMEOUT):
724 		read_conn_accept_timeout(cmd, evt);
725 		break;
726 
727 	case BT_OCF(BT_HCI_OP_WRITE_CONN_ACCEPT_TIMEOUT):
728 		write_conn_accept_timeout(cmd, evt);
729 		break;
730 #endif /* CONFIG_BT_CTLR_CONN_ISO */
731 
732 #if defined(CONFIG_BT_CONN)
733 	case BT_OCF(BT_HCI_OP_READ_TX_POWER_LEVEL):
734 		read_tx_power_level(cmd, evt);
735 		break;
736 #endif /* CONFIG_BT_CONN */
737 
738 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
739 	case BT_OCF(BT_HCI_OP_SET_CTL_TO_HOST_FLOW):
740 		set_ctl_to_host_flow(cmd, evt);
741 		break;
742 
743 	case BT_OCF(BT_HCI_OP_HOST_BUFFER_SIZE):
744 		host_buffer_size(cmd, evt);
745 		break;
746 
747 	case BT_OCF(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS):
748 		host_num_completed_packets(cmd, evt);
749 		break;
750 #endif
751 
752 #if defined(CONFIG_BT_CTLR_LE_PING)
753 	case BT_OCF(BT_HCI_OP_READ_AUTH_PAYLOAD_TIMEOUT):
754 		read_auth_payload_timeout(cmd, evt);
755 		break;
756 
757 	case BT_OCF(BT_HCI_OP_WRITE_AUTH_PAYLOAD_TIMEOUT):
758 		write_auth_payload_timeout(cmd, evt);
759 		break;
760 #endif /* CONFIG_BT_CTLR_LE_PING */
761 
762 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
763 	case BT_OCF(BT_HCI_OP_CONFIGURE_DATA_PATH):
764 		configure_data_path(cmd, evt);
765 		break;
766 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
767 
768 	default:
769 		return -EINVAL;
770 	}
771 
772 	return 0;
773 }
774 
read_local_version_info(struct net_buf * buf,struct net_buf ** evt)775 static void read_local_version_info(struct net_buf *buf, struct net_buf **evt)
776 {
777 	struct bt_hci_rp_read_local_version_info *rp;
778 
779 	rp = hci_cmd_complete(evt, sizeof(*rp));
780 
781 	rp->status = 0x00;
782 	rp->hci_version = LL_VERSION_NUMBER;
783 	rp->hci_revision = sys_cpu_to_le16(0);
784 	rp->lmp_version = LL_VERSION_NUMBER;
785 	rp->manufacturer = sys_cpu_to_le16(ll_settings_company_id());
786 	rp->lmp_subversion = sys_cpu_to_le16(ll_settings_subversion_number());
787 }
788 
read_supported_commands(struct net_buf * buf,struct net_buf ** evt)789 static void read_supported_commands(struct net_buf *buf, struct net_buf **evt)
790 {
791 	struct bt_hci_rp_read_supported_commands *rp;
792 
793 	rp = hci_cmd_complete(evt, sizeof(*rp));
794 
795 	rp->status = 0x00;
796 	(void)memset(&rp->commands[0], 0, sizeof(rp->commands));
797 
798 #if defined(CONFIG_BT_REMOTE_VERSION)
799 	/* Read Remote Version Info. */
800 	rp->commands[2] |= BIT(7);
801 #endif
802 	/* Set Event Mask, and Reset. */
803 	rp->commands[5] |= BIT(6) | BIT(7);
804 
805 #if defined(CONFIG_BT_CTLR_CONN_ISO)
806 	/* Read/Write Connection Accept Timeout */
807 	rp->commands[7] |= BIT(2) | BIT(3);
808 #endif /* CONFIG_BT_CTLR_CONN_ISO */
809 
810 	/* Read TX Power Level. */
811 	rp->commands[10] |= BIT(2);
812 
813 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
814 	/* Set FC, Host Buffer Size and Host Num Completed */
815 	rp->commands[10] |= BIT(5) | BIT(6) | BIT(7);
816 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
817 
818 	/* Read Local Version Info, Read Local Supported Features. */
819 	rp->commands[14] |= BIT(3) | BIT(5);
820 	/* Read BD ADDR. */
821 	rp->commands[15] |= BIT(1);
822 
823 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
824 	/* Read RSSI. */
825 	rp->commands[15] |= BIT(5);
826 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
827 
828 	/* Set Event Mask Page 2 */
829 	rp->commands[22] |= BIT(2);
830 	/* LE Set Event Mask, LE Read Buffer Size, LE Read Local Supp Feats,
831 	 * Set Random Addr
832 	 */
833 	rp->commands[25] |= BIT(0) | BIT(1) | BIT(2) | BIT(4);
834 
835 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
836 	/* LE Read FAL Size, LE Clear FAL */
837 	rp->commands[26] |= BIT(6) | BIT(7);
838 	/* LE Add Dev to FAL, LE Remove Dev from FAL */
839 	rp->commands[27] |= BIT(0) | BIT(1);
840 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
841 
842 	/* LE Encrypt, LE Rand */
843 	rp->commands[27] |= BIT(6) | BIT(7);
844 	/* LE Read Supported States */
845 	rp->commands[28] |= BIT(3);
846 
847 #if defined(CONFIG_BT_BROADCASTER)
848 	/* LE Set Adv Params, LE Read Adv Channel TX Power, LE Set Adv Data */
849 	rp->commands[25] |= BIT(5) | BIT(6) | BIT(7);
850 	/* LE Set Scan Response Data, LE Set Adv Enable */
851 	rp->commands[26] |= BIT(0) | BIT(1);
852 
853 #if defined(CONFIG_BT_CTLR_ADV_EXT)
854 	/* LE Set Adv Set Random Addr, LE Set Ext Adv Params, LE Set Ext Adv
855 	 * Data, LE Set Ext Adv Scan Rsp Data, LE Set Ext Adv Enable, LE Read
856 	 * Max Adv Data Len, LE Read Num Supp Adv Sets
857 	 */
858 	rp->commands[36] |= BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
859 			    BIT(6) | BIT(7);
860 	/* LE Remove Adv Set, LE Clear Adv Sets */
861 	rp->commands[37] |= BIT(0) | BIT(1);
862 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
863 	/* LE Set PA Params, LE Set PA Data, LE Set PA Enable */
864 	rp->commands[37] |= BIT(2) | BIT(3) | BIT(4);
865 #if defined(CONFIG_BT_CTLR_ADV_ISO)
866 	/* LE Create BIG, LE Create BIG Test, LE Terminate BIG */
867 	rp->commands[42] |= BIT(5) | BIT(6) | BIT(7);
868 #endif /* CONFIG_BT_CTLR_ADV_ISO */
869 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
870 #endif /* CONFIG_BT_CTLR_ADV_EXT */
871 #endif /* CONFIG_BT_BROADCASTER */
872 
873 #if defined(CONFIG_BT_OBSERVER)
874 	/* LE Set Scan Params, LE Set Scan Enable */
875 	rp->commands[26] |= BIT(2) | BIT(3);
876 
877 #if defined(CONFIG_BT_CTLR_ADV_EXT)
878 	/* LE Set Extended Scan Params, LE Set Extended Scan Enable */
879 	rp->commands[37] |= BIT(5) | BIT(6);
880 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
881 	/* LE PA Create Sync, LE PA Create Sync Cancel, LE PA Terminate Sync */
882 	rp->commands[38] |= BIT(0) | BIT(1) | BIT(2);
883 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
884 	/* LE PA Add Device to Periodic Advertiser List,
885 	 * LE PA Remove Device from Periodic Advertiser List,
886 	 * LE Clear Periodic Advertiser List,
887 	 * LE Read Periodic Adveritiser List Size
888 	 */
889 	rp->commands[38] |= BIT(3) | BIT(4) | BIT(5) | BIT(6);
890 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
891 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
892 	/* LE Set PA Receive Enable */
893 	rp->commands[40] |= BIT(5);
894 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
895 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
896 	/* LE BIG Create Sync, LE BIG Terminate Sync */
897 	rp->commands[43] |= BIT(0) | BIT(1);
898 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
899 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
900 #endif /* CONFIG_BT_CTLR_ADV_EXT */
901 
902 #endif /* CONFIG_BT_OBSERVER */
903 
904 #if defined(CONFIG_BT_CONN)
905 #if defined(CONFIG_BT_CENTRAL)
906 	/* LE Create Connection, LE Create Connection Cancel */
907 	rp->commands[26] |= BIT(4) | BIT(5);
908 	/* Set Host Channel Classification */
909 	rp->commands[27] |= BIT(3);
910 
911 #if defined(CONFIG_BT_CTLR_ADV_EXT)
912 	/* LE Extended Create Connection */
913 	rp->commands[37] |= BIT(7);
914 #endif /* CONFIG_BT_CTLR_ADV_EXT */
915 
916 #if defined(CONFIG_BT_CTLR_LE_ENC)
917 	/* LE Start Encryption */
918 	rp->commands[28] |= BIT(0);
919 #endif /* CONFIG_BT_CTLR_LE_ENC */
920 
921 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
922 	/* LE Set CIG Parameters */
923 	rp->commands[41] |= BIT(7);
924 	/* LE Set CIG Parameters Test, LE Create CIS, LE Remove CIS */
925 	rp->commands[42] |= BIT(0) | BIT(1) | BIT(2);
926 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
927 #endif /* CONFIG_BT_CENTRAL */
928 
929 #if defined(CONFIG_BT_PERIPHERAL)
930 #if defined(CONFIG_BT_CTLR_LE_ENC)
931 	/* LE LTK Request Reply, LE LTK Request Negative Reply */
932 	rp->commands[28] |= BIT(1) | BIT(2);
933 #endif /* CONFIG_BT_CTLR_LE_ENC */
934 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
935 	/* LE Accept CIS Request, LE Reject CIS Request */
936 	rp->commands[42] |= BIT(3) | BIT(4);
937 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
938 #endif /* CONFIG_BT_PERIPHERAL */
939 
940 	/* Disconnect. */
941 	rp->commands[0] |= BIT(5);
942 	/* LE Connection Update, LE Read Channel Map, LE Read Remote Features */
943 	rp->commands[27] |= BIT(2) | BIT(4) | BIT(5);
944 
945 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
946 	/* LE Remote Conn Param Req and Neg Reply */
947 	rp->commands[33] |= BIT(4) | BIT(5);
948 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
949 
950 #if defined(CONFIG_BT_CTLR_LE_PING)
951 	/* Read and Write authenticated payload timeout */
952 	rp->commands[32] |= BIT(4) | BIT(5);
953 #endif /* CONFIG_BT_CTLR_LE_PING */
954 
955 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
956 	/* LE Set Data Length, and LE Read Suggested Data Length. */
957 	rp->commands[33] |= BIT(6) | BIT(7);
958 	/* LE Write Suggested Data Length. */
959 	rp->commands[34] |= BIT(0);
960 	/* LE Read Maximum Data Length. */
961 	rp->commands[35] |= BIT(3);
962 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
963 
964 #if defined(CONFIG_BT_CTLR_PHY)
965 	/* LE Read PHY Command. */
966 	rp->commands[35] |= BIT(4);
967 	/* LE Set Default PHY Command. */
968 	rp->commands[35] |= BIT(5);
969 	/* LE Set PHY Command. */
970 	rp->commands[35] |= BIT(6);
971 #endif /* CONFIG_BT_CTLR_PHY */
972 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
973 	/* LE Request Peer SCA */
974 	rp->commands[43] |= BIT(2);
975 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
976 #endif /* CONFIG_BT_CONN */
977 
978 #if defined(CONFIG_BT_CTLR_DTM_HCI)
979 	/* LE RX Test, LE TX Test, LE Test End */
980 	rp->commands[28] |= BIT(4) | BIT(5) | BIT(6);
981 	/* LE Enhanced RX Test. */
982 	rp->commands[35] |= BIT(7);
983 	/* LE Enhanced TX Test. */
984 	rp->commands[36] |= BIT(0);
985 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
986 	rp->commands[39] |= BIT(3);
987 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
988 
989 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
990 	rp->commands[39] |= BIT(4);
991 #endif
992 
993 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
994 	rp->commands[45] |= BIT(0);
995 #endif
996 #endif /* CONFIG_BT_CTLR_DTM_HCI */
997 
998 #if defined(CONFIG_BT_CTLR_PRIVACY)
999 	/* LE resolving list commands, LE Read Peer RPA */
1000 	rp->commands[34] |= BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7);
1001 	/* LE Read Local RPA, LE Set AR Enable, Set RPA Timeout */
1002 	rp->commands[35] |= BIT(0) | BIT(1) | BIT(2);
1003 	/* LE Set Privacy Mode */
1004 	rp->commands[39] |= BIT(2);
1005 #endif /* CONFIG_BT_CTLR_PRIVACY */
1006 
1007 #if defined(CONFIG_BT_CTLR_DF)
1008 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1009 	/* LE Set Connectionless CTE Transmit Parameters,
1010 	 * LE Set Connectionless CTE Transmit Enable
1011 	 */
1012 	rp->commands[39] |= BIT(5) | BIT(6);
1013 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1014 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1015 	/* LE Set Connectionless IQ Sampling Enable */
1016 	rp->commands[39] |= BIT(7);
1017 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1018 	/* LE Read Antenna Information */
1019 	rp->commands[40] |= BIT(4);
1020 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1021 	/* LE Set Connection CTE Transmit Parameters */
1022 	rp->commands[40] |= BIT(1);
1023 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1024 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1025 	/* LE Set Connection CTE Receive Parameters */
1026 	rp->commands[40] |= BIT(0);
1027 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1028 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1029 	/* LE Connection CTE Request Enable */
1030 	rp->commands[40] |= BIT(2);
1031 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1032 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1033 	/* LE Connection CTE Response Enable */
1034 	rp->commands[40] |= BIT(3);
1035 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1036 
1037 #endif /* CONFIG_BT_CTLR_DF */
1038 
1039 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1040 	/* LE Periodic Advertising Sync Transfer */
1041 	rp->commands[40] |= BIT(6);
1042 	/* LE Periodic Advertising Set Info Transfer */
1043 	rp->commands[40] |= BIT(7);
1044 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1045 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1046 	/* LE Set Periodic Advertising Sync Transfer Parameters */
1047 	rp->commands[41] |= BIT(0);
1048 	/* LE Set Default Periodic Advertising Sync Transfer Parameters */
1049 	rp->commands[41] |= BIT(1);
1050 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1051 
1052 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_SEND_ECC_EMULATION)
1053 	bt_hci_ecc_supported_commands(rp->commands);
1054 #endif /* CONFIG_BT_HCI_RAW && CONFIG_BT_SEND_ECC_EMULATION */
1055 
1056 	/* LE Read TX Power. */
1057 	rp->commands[38] |= BIT(7);
1058 
1059 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1060 	/* LE Read Buffer Size v2, LE Read ISO TX Sync */
1061 	rp->commands[41] |= BIT(5) | BIT(6);
1062 	/* LE ISO Transmit Test */
1063 	rp->commands[43] |= BIT(5);
1064 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1065 
1066 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1067 	/* LE ISO Receive Test, LE ISO Read Test Counters */
1068 	rp->commands[43] |= BIT(6) | BIT(7);
1069 
1070 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
1071 	/* LE Read ISO Link Quality */
1072 	rp->commands[44] |= BIT(2);
1073 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
1074 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
1075 
1076 #if defined(CONFIG_BT_CTLR_ISO)
1077 	/* LE Setup ISO Data Path, LE Remove ISO Data Path */
1078 	rp->commands[43] |= BIT(3) | BIT(4);
1079 	/* LE ISO Test End */
1080 	rp->commands[44] |= BIT(0);
1081 #endif /* CONFIG_BT_CTLR_ISO */
1082 
1083 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
1084 	/* LE Set Host Feature */
1085 	rp->commands[44] |= BIT(1);
1086 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
1087 
1088 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1089 	/* Read Supported Codecs [v2], Codec Capabilities, Controller Delay, Configure Data Path */
1090 	rp->commands[45] |= BIT(2) | BIT(3) | BIT(4) | BIT(5);
1091 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1092 }
1093 
read_local_features(struct net_buf * buf,struct net_buf ** evt)1094 static void read_local_features(struct net_buf *buf, struct net_buf **evt)
1095 {
1096 	struct bt_hci_rp_read_local_features *rp;
1097 
1098 	rp = hci_cmd_complete(evt, sizeof(*rp));
1099 
1100 	rp->status = 0x00;
1101 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1102 	/* BR/EDR not supported and LE supported */
1103 	rp->features[4] = (1 << 5) | (1 << 6);
1104 }
1105 
read_bd_addr(struct net_buf * buf,struct net_buf ** evt)1106 static void read_bd_addr(struct net_buf *buf, struct net_buf **evt)
1107 {
1108 	struct bt_hci_rp_read_bd_addr *rp;
1109 
1110 	rp = hci_cmd_complete(evt, sizeof(*rp));
1111 
1112 	rp->status = 0x00;
1113 
1114 	(void)ll_addr_read(0, &rp->bdaddr.val[0]);
1115 }
1116 
1117 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
hci_vendor_read_std_codecs(const struct bt_hci_std_codec_info_v2 ** codecs)1118 uint8_t __weak hci_vendor_read_std_codecs(
1119 	const struct bt_hci_std_codec_info_v2 **codecs)
1120 {
1121 	ARG_UNUSED(codecs);
1122 
1123 	/* return number of supported codecs */
1124 	return 0;
1125 }
1126 
hci_vendor_read_vs_codecs(const struct bt_hci_vs_codec_info_v2 ** codecs)1127 uint8_t __weak hci_vendor_read_vs_codecs(
1128 	const struct bt_hci_vs_codec_info_v2 **codecs)
1129 {
1130 	ARG_UNUSED(codecs);
1131 
1132 	/* return number of supported codecs */
1133 	return 0;
1134 }
1135 
1136 /* NOTE: Not implementing the [v1] version.
1137  * Refer to BT Spec v5.3 Vol 4, Part E 7.4.8 Read Local Supported Codecs command
1138  * The [v1] version of this command shall only return codecs supported on the
1139  * BR/EDR physical transport, while the [v2] version shall return codecs
1140  * supported on all physical transports.
1141  */
read_codecs_v2(struct net_buf * buf,struct net_buf ** evt)1142 static void read_codecs_v2(struct net_buf *buf, struct net_buf **evt)
1143 {
1144 	struct bt_hci_rp_read_codecs_v2 *rp;
1145 	const struct bt_hci_std_codec_info_v2 *std_codec_info;
1146 	const struct bt_hci_vs_codec_info_v2 *vs_codec_info;
1147 	struct bt_hci_std_codecs_v2 *std_codecs;
1148 	struct bt_hci_vs_codecs_v2 *vs_codecs;
1149 	size_t std_codecs_bytes;
1150 	size_t vs_codecs_bytes;
1151 	uint8_t num_std_codecs;
1152 	uint8_t num_vs_codecs;
1153 	uint8_t i;
1154 
1155 	/* read standard codec information */
1156 	num_std_codecs = hci_vendor_read_std_codecs(&std_codec_info);
1157 	std_codecs_bytes = sizeof(struct bt_hci_std_codecs_v2) +
1158 		num_std_codecs * sizeof(struct bt_hci_std_codec_info_v2);
1159 	/* read vendor-specific codec information */
1160 	num_vs_codecs = hci_vendor_read_vs_codecs(&vs_codec_info);
1161 	vs_codecs_bytes = sizeof(struct bt_hci_vs_codecs_v2) +
1162 		num_vs_codecs *	sizeof(struct bt_hci_vs_codec_info_v2);
1163 
1164 	/* allocate response packet */
1165 	rp = hci_cmd_complete(evt, sizeof(*rp) +
1166 			      std_codecs_bytes +
1167 			      vs_codecs_bytes);
1168 	rp->status = 0x00;
1169 
1170 	/* copy standard codec information */
1171 	std_codecs = (struct bt_hci_std_codecs_v2 *)&rp->codecs[0];
1172 	std_codecs->num_codecs = num_std_codecs;
1173 	for (i = 0; i < num_std_codecs; i++) {
1174 		struct bt_hci_std_codec_info_v2 *codec;
1175 
1176 		codec = &std_codecs->codec_info[i];
1177 		codec->codec_id = std_codec_info[i].codec_id;
1178 		codec->transports = std_codec_info[i].transports;
1179 	}
1180 
1181 	/* copy vendor specific codec information  */
1182 	vs_codecs = (struct bt_hci_vs_codecs_v2 *)&rp->codecs[std_codecs_bytes];
1183 	vs_codecs->num_codecs = num_vs_codecs;
1184 	for (i = 0; i < num_vs_codecs; i++) {
1185 		struct bt_hci_vs_codec_info_v2 *codec;
1186 
1187 		codec = &vs_codecs->codec_info[i];
1188 		codec->company_id =
1189 			sys_cpu_to_le16(vs_codec_info[i].company_id);
1190 		codec->codec_id = sys_cpu_to_le16(vs_codec_info[i].codec_id);
1191 		codec->transports = vs_codec_info[i].transports;
1192 	}
1193 }
1194 
hci_vendor_read_codec_capabilities(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t * num_capabilities,size_t * capabilities_bytes,const uint8_t ** capabilities)1195 uint8_t __weak hci_vendor_read_codec_capabilities(uint8_t coding_format,
1196 						  uint16_t company_id,
1197 						  uint16_t vs_codec_id,
1198 						  uint8_t transport,
1199 						  uint8_t direction,
1200 						  uint8_t *num_capabilities,
1201 						  size_t *capabilities_bytes,
1202 						  const uint8_t **capabilities)
1203 {
1204 	ARG_UNUSED(coding_format);
1205 	ARG_UNUSED(company_id);
1206 	ARG_UNUSED(vs_codec_id);
1207 	ARG_UNUSED(transport);
1208 	ARG_UNUSED(direction);
1209 	ARG_UNUSED(capabilities);
1210 
1211 	*num_capabilities = 0;
1212 	*capabilities_bytes = 0;
1213 
1214 	/* return status */
1215 	return 0x00;
1216 }
1217 
read_codec_capabilities(struct net_buf * buf,struct net_buf ** evt)1218 static void read_codec_capabilities(struct net_buf *buf, struct net_buf **evt)
1219 {
1220 	struct bt_hci_cp_read_codec_capabilities *cmd = (void *)buf->data;
1221 	struct bt_hci_rp_read_codec_capabilities *rp;
1222 	const uint8_t *capabilities;
1223 	size_t capabilities_bytes;
1224 	uint8_t num_capabilities;
1225 	uint16_t vs_codec_id;
1226 	uint16_t company_id;
1227 	uint8_t status;
1228 
1229 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1230 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1231 
1232 	/* read codec capabilities */
1233 	status = hci_vendor_read_codec_capabilities(cmd->codec_id.coding_format,
1234 						    company_id,
1235 						    vs_codec_id,
1236 						    cmd->transport,
1237 						    cmd->direction,
1238 						    &num_capabilities,
1239 						    &capabilities_bytes,
1240 						    &capabilities);
1241 
1242 	/* allocate response packet */
1243 	rp = hci_cmd_complete(evt, sizeof(*rp) + capabilities_bytes);
1244 	rp->status = status;
1245 
1246 	/* copy codec capabilities information */
1247 	rp->num_capabilities = num_capabilities;
1248 	memcpy(&rp->capabilities, capabilities, capabilities_bytes);
1249 }
1250 
hci_vendor_read_ctlr_delay(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t codec_config_len,const uint8_t * codec_config,uint32_t * min_delay,uint32_t * max_delay)1251 uint8_t __weak hci_vendor_read_ctlr_delay(uint8_t coding_format,
1252 					  uint16_t company_id,
1253 					  uint16_t vs_codec_id,
1254 					  uint8_t transport,
1255 					  uint8_t direction,
1256 					  uint8_t codec_config_len,
1257 					  const uint8_t *codec_config,
1258 					  uint32_t *min_delay,
1259 					  uint32_t *max_delay)
1260 {
1261 	ARG_UNUSED(coding_format);
1262 	ARG_UNUSED(company_id);
1263 	ARG_UNUSED(vs_codec_id);
1264 	ARG_UNUSED(transport);
1265 	ARG_UNUSED(direction);
1266 	ARG_UNUSED(codec_config_len);
1267 	ARG_UNUSED(codec_config);
1268 
1269 	*min_delay = 0;
1270 	*max_delay = 0x3D0900; /* 4 seconds, maximum value allowed by spec */
1271 
1272 	/* return status */
1273 	return 0x00;
1274 }
1275 
read_ctlr_delay(struct net_buf * buf,struct net_buf ** evt)1276 static void read_ctlr_delay(struct net_buf *buf, struct net_buf **evt)
1277 {
1278 	struct bt_hci_cp_read_ctlr_delay *cmd = (void *)buf->data;
1279 	struct bt_hci_rp_read_ctlr_delay *rp;
1280 	uint16_t vs_codec_id;
1281 	uint16_t company_id;
1282 	uint32_t min_delay;
1283 	uint32_t max_delay;
1284 	uint8_t status;
1285 
1286 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1287 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1288 
1289 	status = hci_vendor_read_ctlr_delay(cmd->codec_id.coding_format,
1290 					    company_id,
1291 					    vs_codec_id,
1292 					    cmd->transport,
1293 					    cmd->direction,
1294 					    cmd->codec_config_len,
1295 					    cmd->codec_config,
1296 					    &min_delay,
1297 					    &max_delay);
1298 
1299 	rp = hci_cmd_complete(evt, sizeof(*rp));
1300 	rp->status = status;
1301 	sys_put_le24(min_delay, rp->min_ctlr_delay);
1302 	sys_put_le24(max_delay, rp->max_ctlr_delay);
1303 }
1304 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1305 
info_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1306 static int info_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
1307 			   struct net_buf **evt)
1308 {
1309 	switch (ocf) {
1310 	case BT_OCF(BT_HCI_OP_READ_LOCAL_VERSION_INFO):
1311 		read_local_version_info(cmd, evt);
1312 		break;
1313 
1314 	case BT_OCF(BT_HCI_OP_READ_SUPPORTED_COMMANDS):
1315 		read_supported_commands(cmd, evt);
1316 		break;
1317 
1318 	case BT_OCF(BT_HCI_OP_READ_LOCAL_FEATURES):
1319 		read_local_features(cmd, evt);
1320 		break;
1321 
1322 	case BT_OCF(BT_HCI_OP_READ_BD_ADDR):
1323 		read_bd_addr(cmd, evt);
1324 		break;
1325 
1326 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1327 	case BT_OCF(BT_HCI_OP_READ_CODECS_V2):
1328 		read_codecs_v2(cmd, evt);
1329 		break;
1330 
1331 	case BT_OCF(BT_HCI_OP_READ_CODEC_CAPABILITIES):
1332 		read_codec_capabilities(cmd, evt);
1333 		break;
1334 
1335 	case BT_OCF(BT_HCI_OP_READ_CTLR_DELAY):
1336 		read_ctlr_delay(cmd, evt);
1337 		break;
1338 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1339 
1340 	default:
1341 		return -EINVAL;
1342 	}
1343 
1344 	return 0;
1345 }
1346 
1347 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
read_rssi(struct net_buf * buf,struct net_buf ** evt)1348 static void read_rssi(struct net_buf *buf, struct net_buf **evt)
1349 {
1350 	struct bt_hci_cp_read_rssi *cmd = (void *)buf->data;
1351 	struct bt_hci_rp_read_rssi *rp;
1352 	uint16_t handle;
1353 
1354 	handle = sys_le16_to_cpu(cmd->handle);
1355 
1356 	rp = hci_cmd_complete(evt, sizeof(*rp));
1357 
1358 	rp->status = ll_rssi_get(handle, &rp->rssi);
1359 
1360 	rp->handle = sys_cpu_to_le16(handle);
1361 	/* The Link Layer currently returns RSSI as an absolute value */
1362 	rp->rssi = (!rp->status) ? -rp->rssi : 127;
1363 }
1364 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1365 
status_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1366 static int status_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
1367 			     struct net_buf **evt)
1368 {
1369 	switch (ocf) {
1370 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1371 	case BT_OCF(BT_HCI_OP_READ_RSSI):
1372 		read_rssi(cmd, evt);
1373 		break;
1374 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1375 
1376 	default:
1377 		return -EINVAL;
1378 	}
1379 
1380 	return 0;
1381 }
1382 
le_set_event_mask(struct net_buf * buf,struct net_buf ** evt)1383 static void le_set_event_mask(struct net_buf *buf, struct net_buf **evt)
1384 {
1385 	struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
1386 
1387 	le_event_mask = sys_get_le64(cmd->events);
1388 
1389 	*evt = cmd_complete_status(0x00);
1390 }
1391 
le_read_buffer_size(struct net_buf * buf,struct net_buf ** evt)1392 static void le_read_buffer_size(struct net_buf *buf, struct net_buf **evt)
1393 {
1394 	struct bt_hci_rp_le_read_buffer_size *rp;
1395 
1396 	rp = hci_cmd_complete(evt, sizeof(*rp));
1397 
1398 	rp->status = 0x00;
1399 
1400 	rp->le_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1401 	rp->le_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1402 }
1403 
1404 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_read_buffer_size_v2(struct net_buf * buf,struct net_buf ** evt)1405 static void le_read_buffer_size_v2(struct net_buf *buf, struct net_buf **evt)
1406 {
1407 	struct bt_hci_rp_le_read_buffer_size_v2 *rp;
1408 
1409 	rp = hci_cmd_complete(evt, sizeof(*rp));
1410 
1411 	rp->status = 0x00;
1412 
1413 	rp->acl_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1414 	rp->acl_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1415 	rp->iso_max_len = sys_cpu_to_le16(BT_CTLR_ISO_TX_BUFFER_SIZE);
1416 	rp->iso_max_num = CONFIG_BT_CTLR_ISO_TX_BUFFERS;
1417 }
1418 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1419 
le_read_local_features(struct net_buf * buf,struct net_buf ** evt)1420 static void le_read_local_features(struct net_buf *buf, struct net_buf **evt)
1421 {
1422 	struct bt_hci_rp_le_read_local_features *rp;
1423 
1424 	rp = hci_cmd_complete(evt, sizeof(*rp));
1425 
1426 	rp->status = 0x00;
1427 
1428 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1429 	sys_put_le64(ll_feat_get(), rp->features);
1430 }
1431 
le_set_random_address(struct net_buf * buf,struct net_buf ** evt)1432 static void le_set_random_address(struct net_buf *buf, struct net_buf **evt)
1433 {
1434 	struct bt_hci_cp_le_set_random_address *cmd = (void *)buf->data;
1435 	uint8_t status;
1436 
1437 	status = ll_addr_set(1, &cmd->bdaddr.val[0]);
1438 
1439 	*evt = cmd_complete_status(status);
1440 }
1441 
1442 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
le_read_fal_size(struct net_buf * buf,struct net_buf ** evt)1443 static void le_read_fal_size(struct net_buf *buf, struct net_buf **evt)
1444 {
1445 	struct bt_hci_rp_le_read_fal_size *rp;
1446 
1447 	rp = hci_cmd_complete(evt, sizeof(*rp));
1448 	rp->status = 0x00;
1449 
1450 	rp->fal_size = ll_fal_size_get();
1451 }
1452 
le_clear_fal(struct net_buf * buf,struct net_buf ** evt)1453 static void le_clear_fal(struct net_buf *buf, struct net_buf **evt)
1454 {
1455 	uint8_t status;
1456 
1457 	status = ll_fal_clear();
1458 
1459 	*evt = cmd_complete_status(status);
1460 }
1461 
le_add_dev_to_fal(struct net_buf * buf,struct net_buf ** evt)1462 static void le_add_dev_to_fal(struct net_buf *buf, struct net_buf **evt)
1463 {
1464 	struct bt_hci_cp_le_add_dev_to_fal *cmd = (void *)buf->data;
1465 	uint8_t status;
1466 
1467 	status = ll_fal_add(&cmd->addr);
1468 
1469 	*evt = cmd_complete_status(status);
1470 }
1471 
le_rem_dev_from_fal(struct net_buf * buf,struct net_buf ** evt)1472 static void le_rem_dev_from_fal(struct net_buf *buf, struct net_buf **evt)
1473 {
1474 	struct bt_hci_cp_le_rem_dev_from_fal *cmd = (void *)buf->data;
1475 	uint8_t status;
1476 
1477 	status = ll_fal_remove(&cmd->addr);
1478 
1479 	*evt = cmd_complete_status(status);
1480 }
1481 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1482 
1483 #if defined(CONFIG_BT_CTLR_CRYPTO)
le_encrypt(struct net_buf * buf,struct net_buf ** evt)1484 static void le_encrypt(struct net_buf *buf, struct net_buf **evt)
1485 {
1486 	struct bt_hci_cp_le_encrypt *cmd = (void *)buf->data;
1487 	struct bt_hci_rp_le_encrypt *rp;
1488 	uint8_t enc_data[16];
1489 
1490 	ecb_encrypt(cmd->key, cmd->plaintext, enc_data, NULL);
1491 
1492 	rp = hci_cmd_complete(evt, sizeof(*rp));
1493 
1494 	rp->status = 0x00;
1495 	memcpy(rp->enc_data, enc_data, 16);
1496 }
1497 #endif /* CONFIG_BT_CTLR_CRYPTO */
1498 
le_rand(struct net_buf * buf,struct net_buf ** evt)1499 static void le_rand(struct net_buf *buf, struct net_buf **evt)
1500 {
1501 	struct bt_hci_rp_le_rand *rp;
1502 	uint8_t count = sizeof(rp->rand);
1503 
1504 	rp = hci_cmd_complete(evt, sizeof(*rp));
1505 	rp->status = 0x00;
1506 
1507 	lll_csrand_get(rp->rand, count);
1508 }
1509 
le_read_supp_states(struct net_buf * buf,struct net_buf ** evt)1510 static void le_read_supp_states(struct net_buf *buf, struct net_buf **evt)
1511 {
1512 	struct bt_hci_rp_le_read_supp_states *rp;
1513 	uint64_t states = 0U;
1514 
1515 	rp = hci_cmd_complete(evt, sizeof(*rp));
1516 	rp->status = 0x00;
1517 
1518 #define ST_ADV (BIT64(0)  | BIT64(1)  | BIT64(8)  | BIT64(9)  | BIT64(12) | \
1519 		BIT64(13) | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1520 		BIT64(20) | BIT64(21))
1521 
1522 #define ST_SCA (BIT64(4)  | BIT64(5)  | BIT64(8)  | BIT64(9)  | BIT64(10) | \
1523 		BIT64(11) | BIT64(12) | BIT64(13) | BIT64(14) | BIT64(15) | \
1524 		BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(26) | \
1525 		BIT64(27) | BIT64(30) | BIT64(31))
1526 
1527 #define ST_PER (BIT64(2)  | BIT64(3)  | BIT64(7)  | BIT64(10) | BIT64(11) | \
1528 		BIT64(14) | BIT64(15) | BIT64(20) | BIT64(21) | BIT64(26) | \
1529 		BIT64(27) | BIT64(29) | BIT64(30) | BIT64(31) | BIT64(32) | \
1530 		BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | BIT64(37) | \
1531 		BIT64(38) | BIT64(39) | BIT64(40) | BIT64(41))
1532 
1533 #define ST_CEN (BIT64(6)  | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1534 		BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(28) | \
1535 		BIT64(32) | BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | \
1536 		BIT64(37) | BIT64(41))
1537 
1538 #if defined(CONFIG_BT_BROADCASTER)
1539 	states |= ST_ADV;
1540 #else
1541 	states &= ~ST_ADV;
1542 #endif
1543 #if defined(CONFIG_BT_OBSERVER)
1544 	states |= ST_SCA;
1545 #else
1546 	states &= ~ST_SCA;
1547 #endif
1548 #if defined(CONFIG_BT_PERIPHERAL)
1549 	states |= ST_PER;
1550 #else
1551 	states &= ~ST_PER;
1552 #endif
1553 #if defined(CONFIG_BT_CENTRAL)
1554 	states |= ST_CEN;
1555 #else
1556 	states &= ~ST_CEN;
1557 #endif
1558 	/* All states and combinations supported except:
1559 	 * Initiating State + Passive Scanning
1560 	 * Initiating State + Active Scanning
1561 	 */
1562 	states &= ~(BIT64(22) | BIT64(23));
1563 	LOG_DBG("states: 0x%08x%08x", (uint32_t)(states >> 32), (uint32_t)(states & 0xffffffff));
1564 	sys_put_le64(states, rp->le_states);
1565 }
1566 
1567 #if defined(CONFIG_BT_BROADCASTER)
le_set_adv_param(struct net_buf * buf,struct net_buf ** evt)1568 static void le_set_adv_param(struct net_buf *buf, struct net_buf **evt)
1569 {
1570 	struct bt_hci_cp_le_set_adv_param *cmd = (void *)buf->data;
1571 	uint16_t min_interval;
1572 	uint8_t status;
1573 
1574 	if (adv_cmds_legacy_check(evt)) {
1575 		return;
1576 	}
1577 
1578 	min_interval = sys_le16_to_cpu(cmd->min_interval);
1579 
1580 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
1581 	    (cmd->type != BT_HCI_ADV_DIRECT_IND)) {
1582 		uint16_t max_interval = sys_le16_to_cpu(cmd->max_interval);
1583 
1584 		if ((min_interval > max_interval) ||
1585 		    (min_interval < 0x0020) ||
1586 		    (max_interval > 0x4000)) {
1587 			*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
1588 			return;
1589 		}
1590 	}
1591 
1592 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1593 	status = ll_adv_params_set(0, 0, min_interval, cmd->type,
1594 				   cmd->own_addr_type, cmd->direct_addr.type,
1595 				   &cmd->direct_addr.a.val[0], cmd->channel_map,
1596 				   cmd->filter_policy, 0, 0, 0, 0, 0, 0);
1597 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1598 	status = ll_adv_params_set(min_interval, cmd->type,
1599 				   cmd->own_addr_type, cmd->direct_addr.type,
1600 				   &cmd->direct_addr.a.val[0], cmd->channel_map,
1601 				   cmd->filter_policy);
1602 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1603 
1604 	*evt = cmd_complete_status(status);
1605 }
1606 
le_read_adv_chan_tx_power(struct net_buf * buf,struct net_buf ** evt)1607 static void le_read_adv_chan_tx_power(struct net_buf *buf, struct net_buf **evt)
1608 {
1609 	struct bt_hci_rp_le_read_chan_tx_power *rp;
1610 
1611 	if (adv_cmds_legacy_check(evt)) {
1612 		return;
1613 	}
1614 
1615 	rp = hci_cmd_complete(evt, sizeof(*rp));
1616 
1617 	rp->status = 0x00;
1618 
1619 	rp->tx_power_level = 0;
1620 }
1621 
le_set_adv_data(struct net_buf * buf,struct net_buf ** evt)1622 static void le_set_adv_data(struct net_buf *buf, struct net_buf **evt)
1623 {
1624 	struct bt_hci_cp_le_set_adv_data *cmd = (void *)buf->data;
1625 	uint8_t status;
1626 
1627 	if (adv_cmds_legacy_check(evt)) {
1628 		return;
1629 	}
1630 
1631 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1632 	status = ll_adv_data_set(0, cmd->len, &cmd->data[0]);
1633 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1634 	status = ll_adv_data_set(cmd->len, &cmd->data[0]);
1635 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1636 
1637 	*evt = cmd_complete_status(status);
1638 }
1639 
le_set_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)1640 static void le_set_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
1641 {
1642 	struct bt_hci_cp_le_set_scan_rsp_data *cmd = (void *)buf->data;
1643 	uint8_t status;
1644 
1645 	if (adv_cmds_legacy_check(evt)) {
1646 		return;
1647 	}
1648 
1649 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1650 	status = ll_adv_scan_rsp_set(0, cmd->len, &cmd->data[0]);
1651 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1652 	status = ll_adv_scan_rsp_set(cmd->len, &cmd->data[0]);
1653 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1654 
1655 	*evt = cmd_complete_status(status);
1656 }
1657 
le_set_adv_enable(struct net_buf * buf,struct net_buf ** evt)1658 static void le_set_adv_enable(struct net_buf *buf, struct net_buf **evt)
1659 {
1660 	struct bt_hci_cp_le_set_adv_enable *cmd = (void *)buf->data;
1661 	uint8_t status;
1662 
1663 	if (adv_cmds_legacy_check(evt)) {
1664 		return;
1665 	}
1666 
1667 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
1668 #if defined(CONFIG_BT_HCI_MESH_EXT)
1669 	status = ll_adv_enable(0, cmd->enable, 0, 0, 0, 0, 0);
1670 #else /* !CONFIG_BT_HCI_MESH_EXT */
1671 	status = ll_adv_enable(0, cmd->enable, 0, 0);
1672 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1673 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1674 	status = ll_adv_enable(cmd->enable);
1675 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1676 
1677 	*evt = cmd_complete_status(status);
1678 }
1679 
1680 #if defined(CONFIG_BT_CTLR_ADV_ISO)
le_create_big(struct net_buf * buf,struct net_buf ** evt)1681 static void le_create_big(struct net_buf *buf, struct net_buf **evt)
1682 {
1683 	struct bt_hci_cp_le_create_big *cmd = (void *)buf->data;
1684 	uint32_t sdu_interval;
1685 	uint16_t max_latency;
1686 	uint8_t big_handle;
1687 	uint8_t adv_handle;
1688 	uint16_t max_sdu;
1689 	uint8_t status;
1690 
1691 	status = ll_adv_iso_by_hci_handle_new(cmd->big_handle, &big_handle);
1692 	if (status) {
1693 		*evt = cmd_status(status);
1694 		return;
1695 	}
1696 
1697 	status = ll_adv_set_by_hci_handle_get(cmd->adv_handle, &adv_handle);
1698 	if (status) {
1699 		*evt = cmd_status(status);
1700 		return;
1701 	}
1702 
1703 	sdu_interval = sys_get_le24(cmd->sdu_interval);
1704 	max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1705 	max_latency = sys_le16_to_cpu(cmd->max_latency);
1706 
1707 	status = ll_big_create(big_handle, adv_handle, cmd->num_bis,
1708 			       sdu_interval, max_sdu, max_latency, cmd->rtn,
1709 			       cmd->phy, cmd->packing, cmd->framing,
1710 			       cmd->encryption, cmd->bcode);
1711 
1712 	*evt = cmd_status(status);
1713 }
1714 
le_create_big_test(struct net_buf * buf,struct net_buf ** evt)1715 static void le_create_big_test(struct net_buf *buf, struct net_buf **evt)
1716 {
1717 	struct bt_hci_cp_le_create_big_test *cmd = (void *)buf->data;
1718 	uint32_t sdu_interval;
1719 	uint16_t iso_interval;
1720 	uint16_t max_sdu;
1721 	uint16_t max_pdu;
1722 	uint8_t status;
1723 
1724 	sdu_interval = sys_get_le24(cmd->sdu_interval);
1725 	iso_interval = sys_le16_to_cpu(cmd->iso_interval);
1726 	max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1727 	max_pdu = sys_le16_to_cpu(cmd->max_pdu);
1728 
1729 	status = ll_big_test_create(cmd->big_handle, cmd->adv_handle,
1730 				    cmd->num_bis, sdu_interval, iso_interval,
1731 				    cmd->nse, max_sdu, max_pdu, cmd->phy,
1732 				    cmd->packing, cmd->framing, cmd->bn,
1733 				    cmd->irc, cmd->pto, cmd->encryption,
1734 				    cmd->bcode);
1735 
1736 	*evt = cmd_status(status);
1737 }
1738 
le_terminate_big(struct net_buf * buf,struct net_buf ** evt)1739 static void le_terminate_big(struct net_buf *buf, struct net_buf **evt)
1740 {
1741 	struct bt_hci_cp_le_terminate_big *cmd = (void *)buf->data;
1742 	uint8_t status;
1743 
1744 	status = ll_big_terminate(cmd->big_handle, cmd->reason);
1745 
1746 	*evt = cmd_status(status);
1747 }
1748 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1749 #endif /* CONFIG_BT_BROADCASTER */
1750 
1751 #if defined(CONFIG_BT_OBSERVER)
le_set_scan_param(struct net_buf * buf,struct net_buf ** evt)1752 static void le_set_scan_param(struct net_buf *buf, struct net_buf **evt)
1753 {
1754 	struct bt_hci_cp_le_set_scan_param *cmd = (void *)buf->data;
1755 	uint16_t interval;
1756 	uint16_t window;
1757 	uint8_t status;
1758 
1759 	if (adv_cmds_legacy_check(evt)) {
1760 		return;
1761 	}
1762 
1763 	interval = sys_le16_to_cpu(cmd->interval);
1764 	window = sys_le16_to_cpu(cmd->window);
1765 
1766 	status = ll_scan_params_set(cmd->scan_type, interval, window,
1767 				    cmd->addr_type, cmd->filter_policy);
1768 
1769 	*evt = cmd_complete_status(status);
1770 }
1771 
le_set_scan_enable(struct net_buf * buf,struct net_buf ** evt)1772 static void le_set_scan_enable(struct net_buf *buf, struct net_buf **evt)
1773 {
1774 	struct bt_hci_cp_le_set_scan_enable *cmd = (void *)buf->data;
1775 	uint8_t status;
1776 
1777 	if (adv_cmds_legacy_check(evt)) {
1778 		return;
1779 	}
1780 
1781 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
1782 	/* Initialize duplicate filtering */
1783 	if (cmd->enable && cmd->filter_dup) {
1784 		if (0) {
1785 
1786 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1787 		} else if (dup_count == DUP_FILTER_DISABLED) {
1788 			dup_scan = true;
1789 
1790 			/* All entries reset */
1791 			dup_count = 0;
1792 			dup_curr = 0U;
1793 		} else if (!dup_scan) {
1794 			dup_scan = true;
1795 			dup_ext_adv_reset();
1796 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1797 
1798 		} else {
1799 			/* All entries reset */
1800 			dup_count = 0;
1801 			dup_curr = 0U;
1802 		}
1803 	} else {
1804 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1805 		dup_scan = false;
1806 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1807 		dup_count = DUP_FILTER_DISABLED;
1808 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1809 	}
1810 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
1811 
1812 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1813 	status = ll_scan_enable(cmd->enable, 0, 0);
1814 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1815 	status = ll_scan_enable(cmd->enable);
1816 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1817 
1818 	/* NOTE: As filter duplicates is implemented here in HCI source code,
1819 	 *       enabling of already enabled scanning shall succeed after
1820 	 *       updates to filter duplicates is handled in the above
1821 	 *       statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
1822 	 */
1823 	if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
1824 	    (status == BT_HCI_ERR_CMD_DISALLOWED)) {
1825 		status = BT_HCI_ERR_SUCCESS;
1826 	}
1827 
1828 	*evt = cmd_complete_status(status);
1829 }
1830 
1831 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
le_big_create_sync(struct net_buf * buf,struct net_buf ** evt)1832 static void le_big_create_sync(struct net_buf *buf, struct net_buf **evt)
1833 {
1834 	struct bt_hci_cp_le_big_create_sync *cmd = (void *)buf->data;
1835 	uint8_t status;
1836 	uint16_t sync_handle;
1837 	uint16_t sync_timeout;
1838 
1839 	sync_handle = sys_le16_to_cpu(cmd->sync_handle);
1840 	sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
1841 
1842 	status = ll_big_sync_create(cmd->big_handle, sync_handle,
1843 				    cmd->encryption, cmd->bcode, cmd->mse,
1844 				    sync_timeout, cmd->num_bis, cmd->bis);
1845 
1846 	*evt = cmd_status(status);
1847 }
1848 
1849 
le_big_terminate_sync(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1850 static void le_big_terminate_sync(struct net_buf *buf, struct net_buf **evt,
1851 				  void **node_rx)
1852 {
1853 	struct bt_hci_cp_le_big_terminate_sync *cmd = (void *)buf->data;
1854 	struct bt_hci_rp_le_big_terminate_sync *rp;
1855 	uint8_t big_handle;
1856 	uint8_t status;
1857 
1858 	big_handle = cmd->big_handle;
1859 	status = ll_big_sync_terminate(big_handle, node_rx);
1860 
1861 	rp = hci_cmd_complete(evt, sizeof(*rp));
1862 	rp->status = status;
1863 	rp->big_handle = big_handle;
1864 }
1865 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1866 #endif /* CONFIG_BT_OBSERVER */
1867 
1868 #if defined(CONFIG_BT_CENTRAL)
check_cconn_params(bool ext,uint16_t scan_interval,uint16_t scan_window,uint16_t conn_interval_max,uint16_t conn_latency,uint16_t supervision_timeout)1869 static uint8_t check_cconn_params(bool ext, uint16_t scan_interval,
1870 				  uint16_t scan_window,
1871 				  uint16_t conn_interval_max,
1872 				  uint16_t conn_latency,
1873 				  uint16_t supervision_timeout)
1874 {
1875 	if (scan_interval < 0x0004 || scan_window < 0x0004 ||
1876 	    (!ext && (scan_interval > 0x4000 || scan_window > 0x4000))) {
1877 		return BT_HCI_ERR_INVALID_PARAM;
1878 	}
1879 
1880 	if (conn_interval_max < 0x0006 || conn_interval_max > 0x0C80) {
1881 		return BT_HCI_ERR_INVALID_PARAM;
1882 	}
1883 
1884 	if (conn_latency > 0x01F3) {
1885 		return BT_HCI_ERR_INVALID_PARAM;
1886 	}
1887 
1888 	if (supervision_timeout < 0x000A || supervision_timeout > 0x0C80) {
1889 		return BT_HCI_ERR_INVALID_PARAM;
1890 	}
1891 
1892 	/* sto * 10ms > (1 + lat) * ci * 1.25ms * 2
1893 	 * sto * 10 > (1 + lat) * ci * 2.5
1894 	 * sto * 2 > (1 + lat) * ci * 0.5
1895 	 * sto * 4 > (1 + lat) * ci
1896 	 */
1897 	if ((supervision_timeout << 2) <= ((1 + conn_latency) *
1898 					   conn_interval_max)) {
1899 		return BT_HCI_ERR_INVALID_PARAM;
1900 	}
1901 
1902 	return 0;
1903 }
1904 
le_create_connection(struct net_buf * buf,struct net_buf ** evt)1905 static void le_create_connection(struct net_buf *buf, struct net_buf **evt)
1906 {
1907 	struct bt_hci_cp_le_create_conn *cmd = (void *)buf->data;
1908 	uint16_t supervision_timeout;
1909 	uint16_t conn_interval_max;
1910 	uint16_t scan_interval;
1911 	uint16_t conn_latency;
1912 	uint16_t scan_window;
1913 	uint8_t status;
1914 
1915 	if (adv_cmds_legacy_check(NULL)) {
1916 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
1917 		return;
1918 	}
1919 
1920 	scan_interval = sys_le16_to_cpu(cmd->scan_interval);
1921 	scan_window = sys_le16_to_cpu(cmd->scan_window);
1922 	conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
1923 	conn_latency = sys_le16_to_cpu(cmd->conn_latency);
1924 	supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
1925 
1926 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
1927 		status = check_cconn_params(false, scan_interval,
1928 					    scan_window,
1929 					    conn_interval_max,
1930 					    conn_latency,
1931 					    supervision_timeout);
1932 		if (status) {
1933 			*evt = cmd_status(status);
1934 			return;
1935 		}
1936 	}
1937 
1938 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1939 	status = ll_create_connection(scan_interval, scan_window,
1940 				      cmd->filter_policy,
1941 				      cmd->peer_addr.type,
1942 				      &cmd->peer_addr.a.val[0],
1943 				      cmd->own_addr_type, conn_interval_max,
1944 				      conn_latency, supervision_timeout,
1945 				      PHY_LEGACY);
1946 	if (status) {
1947 		*evt = cmd_status(status);
1948 		return;
1949 	}
1950 
1951 	status = ll_connect_enable(0U);
1952 
1953 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1954 	status = ll_create_connection(scan_interval, scan_window,
1955 				      cmd->filter_policy,
1956 				      cmd->peer_addr.type,
1957 				      &cmd->peer_addr.a.val[0],
1958 				      cmd->own_addr_type, conn_interval_max,
1959 				      conn_latency, supervision_timeout);
1960 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1961 
1962 	*evt = cmd_status(status);
1963 }
1964 
le_create_conn_cancel(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1965 static void le_create_conn_cancel(struct net_buf *buf, struct net_buf **evt,
1966 				  void **node_rx)
1967 {
1968 	uint8_t status;
1969 
1970 	status = ll_connect_disable(node_rx);
1971 
1972 	*evt = cmd_complete_status(status);
1973 }
1974 
le_set_host_chan_classif(struct net_buf * buf,struct net_buf ** evt)1975 static void le_set_host_chan_classif(struct net_buf *buf, struct net_buf **evt)
1976 {
1977 	struct bt_hci_cp_le_set_host_chan_classif *cmd = (void *)buf->data;
1978 	uint8_t status;
1979 
1980 	status = ll_chm_update(&cmd->ch_map[0]);
1981 
1982 	*evt = cmd_complete_status(status);
1983 }
1984 
1985 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_start_encryption(struct net_buf * buf,struct net_buf ** evt)1986 static void le_start_encryption(struct net_buf *buf, struct net_buf **evt)
1987 {
1988 	struct bt_hci_cp_le_start_encryption *cmd = (void *)buf->data;
1989 	uint16_t handle;
1990 	uint8_t status;
1991 
1992 	handle = sys_le16_to_cpu(cmd->handle);
1993 	status = ll_enc_req_send(handle,
1994 				 (uint8_t *)&cmd->rand,
1995 				 (uint8_t *)&cmd->ediv,
1996 				 &cmd->ltk[0]);
1997 
1998 	*evt = cmd_status(status);
1999 }
2000 #endif /* CONFIG_BT_CTLR_LE_ENC */
2001 
2002 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
le_set_cig_parameters(struct net_buf * buf,struct net_buf ** evt)2003 static void le_set_cig_parameters(struct net_buf *buf, struct net_buf **evt)
2004 {
2005 	struct bt_hci_cp_le_set_cig_params *cmd = (void *)buf->data;
2006 	struct bt_hci_rp_le_set_cig_params *rp;
2007 	uint32_t c_interval;
2008 	uint32_t p_interval;
2009 	uint16_t c_latency;
2010 	uint16_t p_latency;
2011 	uint8_t cis_count;
2012 	uint8_t cig_id;
2013 	uint8_t status;
2014 	uint8_t i;
2015 
2016 	c_interval = sys_get_le24(cmd->c_interval);
2017 	p_interval = sys_get_le24(cmd->p_interval);
2018 	c_latency = sys_le16_to_cpu(cmd->c_latency);
2019 	p_latency = sys_le16_to_cpu(cmd->p_latency);
2020 
2021 	cig_id = cmd->cig_id;
2022 	cis_count = cmd->num_cis;
2023 
2024 	/* Create CIG or start modifying existing CIG */
2025 	status = ll_cig_parameters_open(cig_id, c_interval, p_interval,
2026 					cmd->sca, cmd->packing, cmd->framing,
2027 					c_latency, p_latency, cis_count);
2028 
2029 	/* Configure individual CISes */
2030 	for (i = 0; !status && i < cis_count; i++) {
2031 		struct bt_hci_cis_params *params = &cmd->cis[i];
2032 		uint16_t c_sdu;
2033 		uint16_t p_sdu;
2034 
2035 		c_sdu = sys_le16_to_cpu(params->c_sdu);
2036 		p_sdu = sys_le16_to_cpu(params->p_sdu);
2037 
2038 		status = ll_cis_parameters_set(params->cis_id, c_sdu, p_sdu,
2039 					       params->c_phy, params->p_phy,
2040 					       params->c_rtn, params->p_rtn);
2041 	}
2042 
2043 	rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2044 	rp->cig_id = cig_id;
2045 
2046 	/* Only apply parameters if all went well */
2047 	if (!status) {
2048 		uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2049 
2050 		status = ll_cig_parameters_commit(cig_id, handles);
2051 
2052 		if (status == BT_HCI_ERR_SUCCESS) {
2053 			for (uint8_t j = 0; j < cis_count; j++) {
2054 				rp->handle[j] = sys_cpu_to_le16(handles[j]);
2055 			}
2056 		}
2057 	}
2058 
2059 	rp->num_handles = status ? 0U : cis_count;
2060 	rp->status = status;
2061 }
2062 
le_set_cig_params_test(struct net_buf * buf,struct net_buf ** evt)2063 static void le_set_cig_params_test(struct net_buf *buf, struct net_buf **evt)
2064 {
2065 	struct bt_hci_cp_le_set_cig_params_test *cmd = (void *)buf->data;
2066 	struct bt_hci_rp_le_set_cig_params_test *rp;
2067 
2068 	uint32_t c_interval;
2069 	uint32_t p_interval;
2070 	uint16_t iso_interval;
2071 	uint8_t cis_count;
2072 	uint8_t cig_id;
2073 	uint8_t status;
2074 	uint8_t i;
2075 
2076 	c_interval = sys_get_le24(cmd->c_interval);
2077 	p_interval = sys_get_le24(cmd->p_interval);
2078 	iso_interval = sys_le16_to_cpu(cmd->iso_interval);
2079 
2080 	cig_id = cmd->cig_id;
2081 	cis_count = cmd->num_cis;
2082 
2083 	/* Create CIG or start modifying existing CIG */
2084 	status = ll_cig_parameters_test_open(cig_id, c_interval,
2085 					     p_interval, cmd->c_ft,
2086 					     cmd->p_ft, iso_interval,
2087 					     cmd->sca, cmd->packing,
2088 					     cmd->framing,
2089 					     cis_count);
2090 
2091 	/* Configure individual CISes */
2092 	for (i = 0; !status && i < cis_count; i++) {
2093 		struct bt_hci_cis_params_test *params = &cmd->cis[i];
2094 		uint16_t c_sdu;
2095 		uint16_t p_sdu;
2096 		uint16_t c_pdu;
2097 		uint16_t p_pdu;
2098 		uint8_t  nse;
2099 
2100 		nse   = params->nse;
2101 		c_sdu = sys_le16_to_cpu(params->c_sdu);
2102 		p_sdu = sys_le16_to_cpu(params->p_sdu);
2103 		c_pdu = sys_le16_to_cpu(params->c_pdu);
2104 		p_pdu = sys_le16_to_cpu(params->p_pdu);
2105 
2106 		status = ll_cis_parameters_test_set(params->cis_id, nse,
2107 						    c_sdu, p_sdu,
2108 						    c_pdu, p_pdu,
2109 						    params->c_phy,
2110 						    params->p_phy,
2111 						    params->c_bn,
2112 						    params->p_bn);
2113 	}
2114 
2115 	rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2116 	rp->cig_id = cig_id;
2117 
2118 	/* Only apply parameters if all went well */
2119 	if (!status) {
2120 		uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2121 
2122 		status = ll_cig_parameters_commit(cig_id, handles);
2123 
2124 		if (status == BT_HCI_ERR_SUCCESS) {
2125 			for (uint8_t j = 0; j < cis_count; j++) {
2126 				rp->handle[j] = sys_cpu_to_le16(handles[j]);
2127 			}
2128 		}
2129 	}
2130 
2131 	rp->num_handles = status ? 0U : cis_count;
2132 	rp->status = status;
2133 }
2134 
le_create_cis(struct net_buf * buf,struct net_buf ** evt)2135 static void le_create_cis(struct net_buf *buf, struct net_buf **evt)
2136 {
2137 	uint16_t handle_used[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP] = {0};
2138 	struct bt_hci_cp_le_create_cis *cmd = (void *)buf->data;
2139 	uint8_t status;
2140 	uint8_t i;
2141 
2142 	/*
2143 	 * Only create a CIS if the Isochronous Channels (Host Support) feature bit
2144 	 * is set. Refer to BT Spec v5.4 Vol 6 Part B Section 4.6.33.1.
2145 	 */
2146 	if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS))) {
2147 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2148 		return;
2149 	}
2150 
2151 	/*
2152 	 * Creating new CISes is disallowed until all previous CIS
2153 	 * established events have been generated
2154 	 */
2155 	if (cis_pending_count) {
2156 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2157 		return;
2158 	}
2159 
2160 	/* Check all handles before actually starting to create CISes */
2161 	status = 0x00;
2162 	for (i = 0; !status && i < cmd->num_cis; i++) {
2163 		uint16_t cis_handle;
2164 		uint16_t acl_handle;
2165 		uint8_t cis_idx;
2166 
2167 		cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2168 		acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2169 
2170 		cis_idx = LL_CIS_IDX_FROM_HANDLE(cis_handle);
2171 		if (handle_used[cis_idx]) {
2172 			/* Handle must be unique in request */
2173 			status = BT_HCI_ERR_INVALID_PARAM;
2174 			break;
2175 		}
2176 
2177 		handle_used[cis_idx]++;
2178 		status = ll_cis_create_check(cis_handle, acl_handle);
2179 	}
2180 
2181 	if (status) {
2182 		*evt = cmd_status(status);
2183 		return;
2184 	}
2185 
2186 	/*
2187 	 * Actually create CISes, any errors are to be reported
2188 	 * through CIS established events
2189 	 */
2190 	cis_pending_count = cmd->num_cis;
2191 	for (i = 0; i < cmd->num_cis; i++) {
2192 		uint16_t cis_handle;
2193 		uint16_t acl_handle;
2194 
2195 		cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2196 		acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2197 		ll_cis_create(cis_handle, acl_handle);
2198 	}
2199 
2200 	*evt = cmd_status(status);
2201 }
2202 
le_remove_cig(struct net_buf * buf,struct net_buf ** evt)2203 static void le_remove_cig(struct net_buf *buf, struct net_buf **evt)
2204 {
2205 	struct bt_hci_cp_le_remove_cig *cmd = (void *)buf->data;
2206 	struct bt_hci_rp_le_remove_cig *rp;
2207 	uint8_t status;
2208 
2209 	status = ll_cig_remove(cmd->cig_id);
2210 
2211 	rp = hci_cmd_complete(evt, sizeof(*rp));
2212 	rp->status = status;
2213 	rp->cig_id = cmd->cig_id;
2214 }
2215 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
2216 
2217 #endif /* CONFIG_BT_CENTRAL */
2218 
2219 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_transmit_test(struct net_buf * buf,struct net_buf ** evt)2220 static void le_iso_transmit_test(struct net_buf *buf, struct net_buf **evt)
2221 {
2222 	struct bt_hci_cp_le_iso_transmit_test *cmd = (void *)buf->data;
2223 	struct bt_hci_rp_le_iso_transmit_test *rp;
2224 	uint16_t handle;
2225 	uint8_t status;
2226 
2227 	handle = sys_le16_to_cpu(cmd->handle);
2228 
2229 	status = ll_iso_transmit_test(handle, cmd->payload_type);
2230 
2231 	rp = hci_cmd_complete(evt, sizeof(*rp));
2232 	rp->status = status;
2233 	rp->handle = sys_cpu_to_le16(handle);
2234 }
2235 
le_read_iso_tx_sync(struct net_buf * buf,struct net_buf ** evt)2236 static void le_read_iso_tx_sync(struct net_buf *buf, struct net_buf **evt)
2237 {
2238 	struct bt_hci_cp_le_read_iso_tx_sync *cmd = (void *)buf->data;
2239 	struct bt_hci_rp_le_read_iso_tx_sync *rp;
2240 	uint16_t handle_le16;
2241 	uint32_t timestamp;
2242 	uint32_t offset;
2243 	uint16_t handle;
2244 	uint8_t status;
2245 	uint16_t seq;
2246 
2247 	handle_le16 = cmd->handle;
2248 	handle = sys_le16_to_cpu(handle_le16);
2249 
2250 	status = ll_read_iso_tx_sync(handle, &seq, &timestamp, &offset);
2251 
2252 	rp = hci_cmd_complete(evt, sizeof(*rp));
2253 	rp->status = status;
2254 	rp->handle = handle_le16;
2255 	rp->seq       = sys_cpu_to_le16(seq);
2256 	rp->timestamp = sys_cpu_to_le32(timestamp);
2257 	sys_put_le24(offset, rp->offset);
2258 }
2259 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2260 
2261 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_receive_test(struct net_buf * buf,struct net_buf ** evt)2262 static void le_iso_receive_test(struct net_buf *buf, struct net_buf **evt)
2263 {
2264 	struct bt_hci_cp_le_iso_receive_test *cmd = (void *)buf->data;
2265 	struct bt_hci_rp_le_iso_receive_test *rp;
2266 	uint16_t handle;
2267 	uint8_t status;
2268 
2269 	handle = sys_le16_to_cpu(cmd->handle);
2270 
2271 	status = ll_iso_receive_test(handle, cmd->payload_type);
2272 
2273 	rp = hci_cmd_complete(evt, sizeof(*rp));
2274 	rp->status = status;
2275 	rp->handle = sys_cpu_to_le16(handle);
2276 }
2277 
le_iso_read_test_counters(struct net_buf * buf,struct net_buf ** evt)2278 static void le_iso_read_test_counters(struct net_buf *buf, struct net_buf **evt)
2279 {
2280 	struct bt_hci_cp_le_read_test_counters *cmd = (void *)buf->data;
2281 	struct bt_hci_rp_le_read_test_counters *rp;
2282 	uint32_t received_cnt;
2283 	uint32_t missed_cnt;
2284 	uint32_t failed_cnt;
2285 	uint16_t handle;
2286 	uint8_t status;
2287 
2288 	handle = sys_le16_to_cpu(cmd->handle);
2289 	status = ll_iso_read_test_counters(handle, &received_cnt,
2290 					   &missed_cnt, &failed_cnt);
2291 
2292 	rp = hci_cmd_complete(evt, sizeof(*rp));
2293 	rp->status = status;
2294 	rp->handle = sys_cpu_to_le16(handle);
2295 	rp->received_cnt = sys_cpu_to_le32(received_cnt);
2296 	rp->missed_cnt   = sys_cpu_to_le32(missed_cnt);
2297 	rp->failed_cnt   = sys_cpu_to_le32(failed_cnt);
2298 }
2299 
2300 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
le_read_iso_link_quality(struct net_buf * buf,struct net_buf ** evt)2301 static void le_read_iso_link_quality(struct net_buf *buf, struct net_buf **evt)
2302 {
2303 	struct bt_hci_cp_le_read_iso_link_quality *cmd = (void *)buf->data;
2304 	struct bt_hci_rp_le_read_iso_link_quality *rp;
2305 	uint32_t tx_last_subevent_packets;
2306 	uint32_t retransmitted_packets;
2307 	uint32_t rx_unreceived_packets;
2308 	uint32_t tx_unacked_packets;
2309 	uint32_t tx_flushed_packets;
2310 	uint32_t crc_error_packets;
2311 	uint32_t duplicate_packets;
2312 	uint16_t handle_le16;
2313 	uint16_t handle;
2314 	uint8_t status;
2315 
2316 	handle_le16 = cmd->handle;
2317 	handle = sys_le16_to_cpu(handle_le16);
2318 	status = ll_read_iso_link_quality(handle, &tx_unacked_packets,
2319 					  &tx_flushed_packets,
2320 					  &tx_last_subevent_packets,
2321 					  &retransmitted_packets,
2322 					  &crc_error_packets,
2323 					  &rx_unreceived_packets,
2324 					  &duplicate_packets);
2325 
2326 	rp = hci_cmd_complete(evt, sizeof(*rp));
2327 	rp->status = status;
2328 	rp->handle = handle_le16;
2329 	rp->tx_unacked_packets = sys_cpu_to_le32(tx_unacked_packets);
2330 	rp->tx_flushed_packets = sys_cpu_to_le32(tx_flushed_packets);
2331 	rp->tx_last_subevent_packets =
2332 		sys_cpu_to_le32(tx_last_subevent_packets);
2333 	rp->retransmitted_packets = sys_cpu_to_le32(retransmitted_packets);
2334 	rp->crc_error_packets     = sys_cpu_to_le32(crc_error_packets);
2335 	rp->rx_unreceived_packets = sys_cpu_to_le32(rx_unreceived_packets);
2336 	rp->duplicate_packets     = sys_cpu_to_le32(duplicate_packets);
2337 }
2338 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
2339 
2340 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
2341 
2342 #if defined(CONFIG_BT_CTLR_ISO)
le_setup_iso_path(struct net_buf * buf,struct net_buf ** evt)2343 static void le_setup_iso_path(struct net_buf *buf, struct net_buf **evt)
2344 {
2345 	struct bt_hci_cp_le_setup_iso_path *cmd = (void *)buf->data;
2346 	struct bt_hci_rp_le_setup_iso_path *rp;
2347 	uint32_t controller_delay;
2348 	uint8_t *codec_config;
2349 	uint8_t coding_format;
2350 	uint16_t vs_codec_id;
2351 	uint16_t company_id;
2352 	uint16_t handle;
2353 	uint8_t status;
2354 
2355 	handle = sys_le16_to_cpu(cmd->handle);
2356 	coding_format = cmd->codec_id.coding_format;
2357 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
2358 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
2359 	controller_delay = sys_get_le24(cmd->controller_delay);
2360 	codec_config = &cmd->codec_config[0];
2361 
2362 	status = ll_setup_iso_path(handle, cmd->path_dir, cmd->path_id,
2363 				   coding_format, company_id, vs_codec_id,
2364 				   controller_delay, cmd->codec_config_len,
2365 				   codec_config);
2366 
2367 	rp = hci_cmd_complete(evt, sizeof(*rp));
2368 	rp->status = status;
2369 	rp->handle = sys_cpu_to_le16(handle);
2370 }
2371 
le_remove_iso_path(struct net_buf * buf,struct net_buf ** evt)2372 static void le_remove_iso_path(struct net_buf *buf, struct net_buf **evt)
2373 {
2374 	struct bt_hci_cp_le_remove_iso_path *cmd = (void *)buf->data;
2375 	struct bt_hci_rp_le_remove_iso_path *rp;
2376 	uint16_t handle;
2377 	uint8_t status;
2378 
2379 	handle = sys_le16_to_cpu(cmd->handle);
2380 
2381 	status = ll_remove_iso_path(handle, cmd->path_dir);
2382 
2383 	rp = hci_cmd_complete(evt, sizeof(*rp));
2384 	rp->status = status;
2385 	rp->handle = sys_cpu_to_le16(handle);
2386 }
2387 
le_iso_test_end(struct net_buf * buf,struct net_buf ** evt)2388 static void le_iso_test_end(struct net_buf *buf, struct net_buf **evt)
2389 {
2390 	struct bt_hci_cp_le_iso_test_end *cmd = (void *)buf->data;
2391 	struct bt_hci_rp_le_iso_test_end *rp;
2392 	uint32_t received_cnt;
2393 	uint32_t missed_cnt;
2394 	uint32_t failed_cnt;
2395 	uint16_t handle;
2396 	uint8_t status;
2397 
2398 	handle = sys_le16_to_cpu(cmd->handle);
2399 	status = ll_iso_test_end(handle, &received_cnt, &missed_cnt,
2400 				 &failed_cnt);
2401 
2402 	rp = hci_cmd_complete(evt, sizeof(*rp));
2403 	rp->status = status;
2404 	rp->handle = sys_cpu_to_le16(handle);
2405 	rp->received_cnt = sys_cpu_to_le32(received_cnt);
2406 	rp->missed_cnt   = sys_cpu_to_le32(missed_cnt);
2407 	rp->failed_cnt   = sys_cpu_to_le32(failed_cnt);
2408 }
2409 #endif /* CONFIG_BT_CTLR_ISO */
2410 
2411 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
le_set_host_feature(struct net_buf * buf,struct net_buf ** evt)2412 static void le_set_host_feature(struct net_buf *buf, struct net_buf **evt)
2413 {
2414 	struct bt_hci_cp_le_set_host_feature *cmd = (void *)buf->data;
2415 	struct bt_hci_rp_le_set_host_feature *rp;
2416 	uint8_t status;
2417 
2418 	status = ll_set_host_feature(cmd->bit_number, cmd->bit_value);
2419 
2420 	rp = hci_cmd_complete(evt, sizeof(*rp));
2421 	rp->status = status;
2422 }
2423 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
2424 
2425 #if defined(CONFIG_BT_PERIPHERAL)
2426 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_ltk_req_reply(struct net_buf * buf,struct net_buf ** evt)2427 static void le_ltk_req_reply(struct net_buf *buf, struct net_buf **evt)
2428 {
2429 	struct bt_hci_cp_le_ltk_req_reply *cmd = (void *)buf->data;
2430 	struct bt_hci_rp_le_ltk_req_reply *rp;
2431 	uint16_t handle;
2432 	uint8_t status;
2433 
2434 	handle = sys_le16_to_cpu(cmd->handle);
2435 	status = ll_start_enc_req_send(handle, 0x00, &cmd->ltk[0]);
2436 
2437 	rp = hci_cmd_complete(evt, sizeof(*rp));
2438 	rp->status = status;
2439 	rp->handle = sys_cpu_to_le16(handle);
2440 }
2441 
le_ltk_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2442 static void le_ltk_req_neg_reply(struct net_buf *buf, struct net_buf **evt)
2443 {
2444 	struct bt_hci_cp_le_ltk_req_neg_reply *cmd = (void *)buf->data;
2445 	struct bt_hci_rp_le_ltk_req_neg_reply *rp;
2446 	uint16_t handle;
2447 	uint8_t status;
2448 
2449 	handle = sys_le16_to_cpu(cmd->handle);
2450 	status = ll_start_enc_req_send(handle, BT_HCI_ERR_PIN_OR_KEY_MISSING,
2451 				       NULL);
2452 
2453 	rp = hci_cmd_complete(evt, sizeof(*rp));
2454 	rp->status = status;
2455 	rp->handle = sys_le16_to_cpu(handle);
2456 }
2457 #endif /* CONFIG_BT_CTLR_LE_ENC */
2458 
2459 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
le_accept_cis(struct net_buf * buf,struct net_buf ** evt)2460 static void le_accept_cis(struct net_buf *buf, struct net_buf **evt)
2461 {
2462 	struct bt_hci_cp_le_accept_cis *cmd = (void *)buf->data;
2463 	uint16_t handle;
2464 	uint8_t status;
2465 
2466 	handle = sys_le16_to_cpu(cmd->handle);
2467 	status = ll_cis_accept(handle);
2468 	*evt = cmd_status(status);
2469 }
2470 
le_reject_cis(struct net_buf * buf,struct net_buf ** evt)2471 static void le_reject_cis(struct net_buf *buf, struct net_buf **evt)
2472 {
2473 	struct bt_hci_cp_le_reject_cis *cmd = (void *)buf->data;
2474 	struct bt_hci_rp_le_reject_cis *rp;
2475 	uint16_t handle;
2476 	uint8_t status;
2477 
2478 	handle = sys_le16_to_cpu(cmd->handle);
2479 	status = ll_cis_reject(handle, cmd->reason);
2480 
2481 	rp = hci_cmd_complete(evt, sizeof(*rp));
2482 	rp->status = status;
2483 	rp->handle = sys_cpu_to_le16(handle);
2484 }
2485 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
2486 
2487 #endif /* CONFIG_BT_PERIPHERAL */
2488 
2489 #if defined(CONFIG_BT_CONN)
2490 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
le_req_peer_sca(struct net_buf * buf,struct net_buf ** evt)2491 static void le_req_peer_sca(struct net_buf *buf, struct net_buf **evt)
2492 {
2493 	struct bt_hci_cp_le_req_peer_sca *cmd = (void *)buf->data;
2494 	uint16_t handle;
2495 	uint8_t status;
2496 
2497 	handle = sys_le16_to_cpu(cmd->handle);
2498 	status = ll_req_peer_sca(handle);
2499 
2500 	*evt = cmd_status(status);
2501 }
2502 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
2503 
2504 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
le_read_remote_features(struct net_buf * buf,struct net_buf ** evt)2505 static void le_read_remote_features(struct net_buf *buf, struct net_buf **evt)
2506 {
2507 	struct bt_hci_cp_le_read_remote_features *cmd = (void *)buf->data;
2508 	uint16_t handle;
2509 	uint8_t status;
2510 
2511 	handle = sys_le16_to_cpu(cmd->handle);
2512 	status = ll_feature_req_send(handle);
2513 
2514 	*evt = cmd_status(status);
2515 }
2516 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
2517 
le_read_chan_map(struct net_buf * buf,struct net_buf ** evt)2518 static void le_read_chan_map(struct net_buf *buf, struct net_buf **evt)
2519 {
2520 	struct bt_hci_cp_le_read_chan_map *cmd = (void *)buf->data;
2521 	struct bt_hci_rp_le_read_chan_map *rp;
2522 	uint16_t handle;
2523 	uint8_t status;
2524 
2525 	handle = sys_le16_to_cpu(cmd->handle);
2526 
2527 	rp = hci_cmd_complete(evt, sizeof(*rp));
2528 
2529 	status = ll_chm_get(handle, rp->ch_map);
2530 
2531 	rp->status = status;
2532 	rp->handle = sys_le16_to_cpu(handle);
2533 }
2534 
le_conn_update(struct net_buf * buf,struct net_buf ** evt)2535 static void le_conn_update(struct net_buf *buf, struct net_buf **evt)
2536 {
2537 	struct hci_cp_le_conn_update *cmd = (void *)buf->data;
2538 	uint16_t supervision_timeout;
2539 	uint16_t conn_interval_min;
2540 	uint16_t conn_interval_max;
2541 	uint16_t conn_latency;
2542 	uint16_t handle;
2543 	uint8_t status;
2544 
2545 	handle = sys_le16_to_cpu(cmd->handle);
2546 	conn_interval_min = sys_le16_to_cpu(cmd->conn_interval_min);
2547 	conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
2548 	conn_latency = sys_le16_to_cpu(cmd->conn_latency);
2549 	supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
2550 
2551 	status = ll_conn_update(handle, 0, 0, conn_interval_min,
2552 				conn_interval_max, conn_latency,
2553 				supervision_timeout, NULL);
2554 
2555 	*evt = cmd_status(status);
2556 }
2557 
2558 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
le_conn_param_req_reply(struct net_buf * buf,struct net_buf ** evt)2559 static void le_conn_param_req_reply(struct net_buf *buf, struct net_buf **evt)
2560 {
2561 	struct bt_hci_cp_le_conn_param_req_reply *cmd = (void *)buf->data;
2562 	struct bt_hci_rp_le_conn_param_req_reply *rp;
2563 	uint16_t interval_min;
2564 	uint16_t interval_max;
2565 	uint16_t latency;
2566 	uint16_t timeout;
2567 	uint16_t handle;
2568 	uint8_t status;
2569 
2570 	handle = sys_le16_to_cpu(cmd->handle);
2571 	interval_min = sys_le16_to_cpu(cmd->interval_min);
2572 	interval_max = sys_le16_to_cpu(cmd->interval_max);
2573 	latency = sys_le16_to_cpu(cmd->latency);
2574 	timeout = sys_le16_to_cpu(cmd->timeout);
2575 
2576 	status = ll_conn_update(handle, 2, 0, interval_min, interval_max,
2577 				latency, timeout, NULL);
2578 
2579 	rp = hci_cmd_complete(evt, sizeof(*rp));
2580 	rp->status = status;
2581 	rp->handle = sys_cpu_to_le16(handle);
2582 }
2583 
le_conn_param_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2584 static void le_conn_param_req_neg_reply(struct net_buf *buf,
2585 					struct net_buf **evt)
2586 {
2587 	struct bt_hci_cp_le_conn_param_req_neg_reply *cmd = (void *)buf->data;
2588 	struct bt_hci_rp_le_conn_param_req_neg_reply *rp;
2589 	uint16_t handle;
2590 	uint8_t status;
2591 
2592 	handle = sys_le16_to_cpu(cmd->handle);
2593 	status = ll_conn_update(handle, 2, cmd->reason, 0, 0, 0, 0, NULL);
2594 
2595 	rp = hci_cmd_complete(evt, sizeof(*rp));
2596 	rp->status = status;
2597 	rp->handle = sys_cpu_to_le16(handle);
2598 }
2599 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2600 
2601 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
le_set_data_len(struct net_buf * buf,struct net_buf ** evt)2602 static void le_set_data_len(struct net_buf *buf, struct net_buf **evt)
2603 {
2604 	struct bt_hci_cp_le_set_data_len *cmd = (void *)buf->data;
2605 	struct bt_hci_rp_le_set_data_len *rp;
2606 	uint16_t tx_octets;
2607 	uint16_t tx_time;
2608 	uint16_t handle;
2609 	uint8_t status;
2610 
2611 	handle = sys_le16_to_cpu(cmd->handle);
2612 	tx_octets = sys_le16_to_cpu(cmd->tx_octets);
2613 	tx_time = sys_le16_to_cpu(cmd->tx_time);
2614 	status = ll_length_req_send(handle, tx_octets, tx_time);
2615 
2616 	rp = hci_cmd_complete(evt, sizeof(*rp));
2617 	rp->status = status;
2618 	rp->handle = sys_cpu_to_le16(handle);
2619 }
2620 
le_read_default_data_len(struct net_buf * buf,struct net_buf ** evt)2621 static void le_read_default_data_len(struct net_buf *buf, struct net_buf **evt)
2622 {
2623 	struct bt_hci_rp_le_read_default_data_len *rp;
2624 	uint16_t max_tx_octets;
2625 	uint16_t max_tx_time;
2626 
2627 	rp = hci_cmd_complete(evt, sizeof(*rp));
2628 
2629 	ll_length_default_get(&max_tx_octets, &max_tx_time);
2630 
2631 	rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2632 	rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2633 	rp->status = 0x00;
2634 }
2635 
le_write_default_data_len(struct net_buf * buf,struct net_buf ** evt)2636 static void le_write_default_data_len(struct net_buf *buf,
2637 				      struct net_buf **evt)
2638 {
2639 	struct bt_hci_cp_le_write_default_data_len *cmd = (void *)buf->data;
2640 	uint16_t max_tx_octets;
2641 	uint16_t max_tx_time;
2642 	uint8_t status;
2643 
2644 	max_tx_octets = sys_le16_to_cpu(cmd->max_tx_octets);
2645 	max_tx_time = sys_le16_to_cpu(cmd->max_tx_time);
2646 	status = ll_length_default_set(max_tx_octets, max_tx_time);
2647 
2648 	*evt = cmd_complete_status(status);
2649 }
2650 
le_read_max_data_len(struct net_buf * buf,struct net_buf ** evt)2651 static void le_read_max_data_len(struct net_buf *buf, struct net_buf **evt)
2652 {
2653 	struct bt_hci_rp_le_read_max_data_len *rp;
2654 	uint16_t max_tx_octets;
2655 	uint16_t max_tx_time;
2656 	uint16_t max_rx_octets;
2657 	uint16_t max_rx_time;
2658 
2659 	rp = hci_cmd_complete(evt, sizeof(*rp));
2660 
2661 	ll_length_max_get(&max_tx_octets, &max_tx_time,
2662 			  &max_rx_octets, &max_rx_time);
2663 
2664 	rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2665 	rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2666 	rp->max_rx_octets = sys_cpu_to_le16(max_rx_octets);
2667 	rp->max_rx_time = sys_cpu_to_le16(max_rx_time);
2668 	rp->status = 0x00;
2669 }
2670 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2671 
2672 #if defined(CONFIG_BT_CTLR_PHY)
le_read_phy(struct net_buf * buf,struct net_buf ** evt)2673 static void le_read_phy(struct net_buf *buf, struct net_buf **evt)
2674 {
2675 	struct bt_hci_cp_le_read_phy *cmd = (void *)buf->data;
2676 	struct bt_hci_rp_le_read_phy *rp;
2677 	uint16_t handle;
2678 	uint8_t status;
2679 
2680 	handle = sys_le16_to_cpu(cmd->handle);
2681 
2682 	rp = hci_cmd_complete(evt, sizeof(*rp));
2683 
2684 	status = ll_phy_get(handle, &rp->tx_phy, &rp->rx_phy);
2685 
2686 	rp->status = status;
2687 	rp->handle = sys_cpu_to_le16(handle);
2688 	rp->tx_phy = find_lsb_set(rp->tx_phy);
2689 	rp->rx_phy = find_lsb_set(rp->rx_phy);
2690 }
2691 
le_set_default_phy(struct net_buf * buf,struct net_buf ** evt)2692 static void le_set_default_phy(struct net_buf *buf, struct net_buf **evt)
2693 {
2694 	struct bt_hci_cp_le_set_default_phy *cmd = (void *)buf->data;
2695 	uint8_t status;
2696 
2697 	if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2698 		cmd->tx_phys = 0x07;
2699 	}
2700 	if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2701 		cmd->rx_phys = 0x07;
2702 	}
2703 
2704 	status = ll_phy_default_set(cmd->tx_phys, cmd->rx_phys);
2705 
2706 	*evt = cmd_complete_status(status);
2707 }
2708 
le_set_phy(struct net_buf * buf,struct net_buf ** evt)2709 static void le_set_phy(struct net_buf *buf, struct net_buf **evt)
2710 {
2711 	struct bt_hci_cp_le_set_phy *cmd = (void *)buf->data;
2712 	uint16_t phy_opts;
2713 	uint8_t mask_phys;
2714 	uint16_t handle;
2715 	uint8_t status;
2716 
2717 	handle = sys_le16_to_cpu(cmd->handle);
2718 	phy_opts = sys_le16_to_cpu(cmd->phy_opts);
2719 
2720 	mask_phys = BT_HCI_LE_PHY_PREFER_1M;
2721 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_2M)) {
2722 		mask_phys |= BT_HCI_LE_PHY_PREFER_2M;
2723 	}
2724 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
2725 		mask_phys |= BT_HCI_LE_PHY_PREFER_CODED;
2726 	}
2727 
2728 	if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2729 		cmd->tx_phys |= mask_phys;
2730 	}
2731 	if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2732 		cmd->rx_phys |= mask_phys;
2733 	}
2734 
2735 	if ((cmd->tx_phys | cmd->rx_phys) & ~mask_phys) {
2736 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
2737 
2738 		return;
2739 	}
2740 
2741 	if (!(cmd->tx_phys & 0x07) ||
2742 	    !(cmd->rx_phys & 0x07)) {
2743 		*evt = cmd_status(BT_HCI_ERR_INVALID_PARAM);
2744 
2745 		return;
2746 	}
2747 
2748 	if (phy_opts & 0x03) {
2749 		phy_opts -= 1U;
2750 		phy_opts &= 1;
2751 	} else {
2752 		phy_opts = 0U;
2753 	}
2754 
2755 	status = ll_phy_req_send(handle, cmd->tx_phys, phy_opts,
2756 				 cmd->rx_phys);
2757 
2758 	*evt = cmd_status(status);
2759 }
2760 #endif /* CONFIG_BT_CTLR_PHY */
2761 #endif /* CONFIG_BT_CONN */
2762 
2763 #if defined(CONFIG_BT_CTLR_PRIVACY)
le_add_dev_to_rl(struct net_buf * buf,struct net_buf ** evt)2764 static void le_add_dev_to_rl(struct net_buf *buf, struct net_buf **evt)
2765 {
2766 	struct bt_hci_cp_le_add_dev_to_rl *cmd = (void *)buf->data;
2767 	uint8_t status;
2768 
2769 	status = ll_rl_add(&cmd->peer_id_addr, cmd->peer_irk, cmd->local_irk);
2770 
2771 	*evt = cmd_complete_status(status);
2772 }
2773 
le_rem_dev_from_rl(struct net_buf * buf,struct net_buf ** evt)2774 static void le_rem_dev_from_rl(struct net_buf *buf, struct net_buf **evt)
2775 {
2776 	struct bt_hci_cp_le_rem_dev_from_rl *cmd = (void *)buf->data;
2777 	uint8_t status;
2778 
2779 	status = ll_rl_remove(&cmd->peer_id_addr);
2780 
2781 	*evt = cmd_complete_status(status);
2782 }
2783 
le_clear_rl(struct net_buf * buf,struct net_buf ** evt)2784 static void le_clear_rl(struct net_buf *buf, struct net_buf **evt)
2785 {
2786 	uint8_t status;
2787 
2788 	status = ll_rl_clear();
2789 
2790 	*evt = cmd_complete_status(status);
2791 }
2792 
le_read_rl_size(struct net_buf * buf,struct net_buf ** evt)2793 static void le_read_rl_size(struct net_buf *buf, struct net_buf **evt)
2794 {
2795 	struct bt_hci_rp_le_read_rl_size *rp;
2796 
2797 	rp = hci_cmd_complete(evt, sizeof(*rp));
2798 
2799 	rp->rl_size = ll_rl_size_get();
2800 	rp->status = 0x00;
2801 }
2802 
le_read_peer_rpa(struct net_buf * buf,struct net_buf ** evt)2803 static void le_read_peer_rpa(struct net_buf *buf, struct net_buf **evt)
2804 {
2805 	struct bt_hci_cp_le_read_peer_rpa *cmd = (void *)buf->data;
2806 	struct bt_hci_rp_le_read_peer_rpa *rp;
2807 	bt_addr_le_t peer_id_addr;
2808 
2809 	bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2810 	rp = hci_cmd_complete(evt, sizeof(*rp));
2811 
2812 	rp->status = ll_rl_crpa_get(&peer_id_addr, &rp->peer_rpa);
2813 }
2814 
le_read_local_rpa(struct net_buf * buf,struct net_buf ** evt)2815 static void le_read_local_rpa(struct net_buf *buf, struct net_buf **evt)
2816 {
2817 	struct bt_hci_cp_le_read_local_rpa *cmd = (void *)buf->data;
2818 	struct bt_hci_rp_le_read_local_rpa *rp;
2819 	bt_addr_le_t peer_id_addr;
2820 
2821 	bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2822 	rp = hci_cmd_complete(evt, sizeof(*rp));
2823 
2824 	rp->status = ll_rl_lrpa_get(&peer_id_addr, &rp->local_rpa);
2825 }
2826 
le_set_addr_res_enable(struct net_buf * buf,struct net_buf ** evt)2827 static void le_set_addr_res_enable(struct net_buf *buf, struct net_buf **evt)
2828 {
2829 	struct bt_hci_cp_le_set_addr_res_enable *cmd = (void *)buf->data;
2830 	uint8_t status;
2831 
2832 	status = ll_rl_enable(cmd->enable);
2833 
2834 	*evt = cmd_complete_status(status);
2835 }
2836 
le_set_rpa_timeout(struct net_buf * buf,struct net_buf ** evt)2837 static void le_set_rpa_timeout(struct net_buf *buf, struct net_buf **evt)
2838 {
2839 	struct bt_hci_cp_le_set_rpa_timeout *cmd = (void *)buf->data;
2840 	uint16_t timeout = sys_le16_to_cpu(cmd->rpa_timeout);
2841 
2842 	ll_rl_timeout_set(timeout);
2843 
2844 	*evt = cmd_complete_status(0x00);
2845 }
2846 
le_set_privacy_mode(struct net_buf * buf,struct net_buf ** evt)2847 static void le_set_privacy_mode(struct net_buf *buf, struct net_buf **evt)
2848 {
2849 	struct bt_hci_cp_le_set_privacy_mode *cmd = (void *)buf->data;
2850 	uint8_t status;
2851 
2852 	status = ll_priv_mode_set(&cmd->id_addr, cmd->mode);
2853 
2854 	*evt = cmd_complete_status(status);
2855 }
2856 #endif /* CONFIG_BT_CTLR_PRIVACY */
2857 
le_read_tx_power(struct net_buf * buf,struct net_buf ** evt)2858 static void le_read_tx_power(struct net_buf *buf, struct net_buf **evt)
2859 {
2860 	struct bt_hci_rp_le_read_tx_power *rp;
2861 
2862 	rp = hci_cmd_complete(evt, sizeof(*rp));
2863 	rp->status = 0x00;
2864 	ll_tx_pwr_get(&rp->min_tx_power, &rp->max_tx_power);
2865 }
2866 
2867 #if defined(CONFIG_BT_CTLR_DF)
2868 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
le_df_set_cl_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)2869 static void le_df_set_cl_cte_tx_params(struct net_buf *buf,
2870 				       struct net_buf **evt)
2871 {
2872 	struct bt_hci_cp_le_set_cl_cte_tx_params *cmd = (void *)buf->data;
2873 	uint8_t adv_handle;
2874 	uint8_t status;
2875 
2876 	if (adv_cmds_ext_check(evt)) {
2877 		return;
2878 	}
2879 
2880 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &adv_handle);
2881 	if (status) {
2882 		*evt = cmd_complete_status(status);
2883 		return;
2884 	}
2885 
2886 	status = ll_df_set_cl_cte_tx_params(adv_handle, cmd->cte_len,
2887 					    cmd->cte_type, cmd->cte_count,
2888 					    cmd->switch_pattern_len,
2889 					    cmd->ant_ids);
2890 
2891 	*evt = cmd_complete_status(status);
2892 }
2893 
le_df_set_cl_cte_enable(struct net_buf * buf,struct net_buf ** evt)2894 static void le_df_set_cl_cte_enable(struct net_buf *buf, struct net_buf **evt)
2895 {
2896 	struct bt_hci_cp_le_set_cl_cte_tx_enable *cmd = (void *)buf->data;
2897 	uint8_t status;
2898 	uint8_t handle;
2899 
2900 	if (adv_cmds_ext_check(evt)) {
2901 		return;
2902 	}
2903 
2904 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
2905 	if (status) {
2906 		*evt = cmd_complete_status(status);
2907 		return;
2908 	}
2909 
2910 	status = ll_df_set_cl_cte_tx_enable(handle, cmd->cte_enable);
2911 
2912 	*evt = cmd_complete_status(status);
2913 }
2914 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2915 
2916 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
le_df_set_cl_iq_sampling_enable(struct net_buf * buf,struct net_buf ** evt)2917 static void le_df_set_cl_iq_sampling_enable(struct net_buf *buf, struct net_buf **evt)
2918 {
2919 	struct bt_hci_cp_le_set_cl_cte_sampling_enable *cmd = (void *)buf->data;
2920 	struct bt_hci_rp_le_set_cl_cte_sampling_enable *rp;
2921 	uint16_t sync_handle;
2922 	uint8_t status;
2923 
2924 	sync_handle = sys_le16_to_cpu(cmd->sync_handle);
2925 
2926 	status = ll_df_set_cl_iq_sampling_enable(sync_handle,
2927 						 cmd->sampling_enable,
2928 						 cmd->slot_durations,
2929 						 cmd->max_sampled_cte,
2930 						 cmd->switch_pattern_len,
2931 						 cmd->ant_ids);
2932 
2933 	rp = hci_cmd_complete(evt, sizeof(*rp));
2934 
2935 	rp->status = status;
2936 	rp->sync_handle = sys_cpu_to_le16(sync_handle);
2937 }
2938 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2939 
2940 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) ||      \
2941 	defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
iq_convert_12_to_8_bits(int16_t data)2942 static int8_t iq_convert_12_to_8_bits(int16_t data)
2943 {
2944 	if (data == IQ_SAMPLE_SATURATED_16_BIT) {
2945 		return IQ_SAMPLE_SATURATED_8_BIT;
2946 	}
2947 
2948 #if defined(CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB)
2949 	return (data > INT8_MAX || data < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2950 						    : IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2951 #else  /* !CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2952 	int16_t data_conv = IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2953 
2954 	return (data_conv > INT8_MAX || data_conv < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2955 							      : (int8_t)data_conv;
2956 #endif /* CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2957 }
2958 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT
2959 	* || CONFIG_BT_CTLR_DF_CONN_CTE_RX
2960 	*/
2961 
2962 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
le_df_connectionless_iq_report(struct pdu_data * pdu_rx,struct node_rx_pdu * node_rx,struct net_buf * buf)2963 static void le_df_connectionless_iq_report(struct pdu_data *pdu_rx,
2964 					   struct node_rx_pdu *node_rx,
2965 					   struct net_buf *buf)
2966 {
2967 	struct bt_hci_evt_le_connectionless_iq_report *sep;
2968 	struct node_rx_iq_report *iq_report;
2969 	struct lll_sync *lll;
2970 	uint8_t samples_cnt;
2971 	int16_t rssi;
2972 	uint16_t sync_handle;
2973 	uint16_t per_evt_counter;
2974 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
2975 	struct ll_sync_set *sync = NULL;
2976 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2977 
2978 	iq_report =  (struct node_rx_iq_report *)node_rx;
2979 
2980 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
2981 	    !(le_event_mask & BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT)) {
2982 		return;
2983 	}
2984 
2985 	lll = iq_report->rx.rx_ftr.param;
2986 
2987 	/* If there is not LLL context and CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT is enabled
2988 	 * the controller is in the Direct Test Mode and may generate
2989 	 * the Connectionless IQ Report.
2990 	 */
2991 	if (!lll && IS_ENABLED(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)) {
2992 		/* Set sync_handle to 0x0FFF according to the BT Core 5.3 specification
2993 		 * Vol 4 7.7.65.21
2994 		 */
2995 		sync_handle = 0x0FFF;
2996 		/* Set periodic event counter to 0 since there is not periodic advertising train. */
2997 		per_evt_counter = 0;
2998 	}
2999 
3000 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
3001 	else {
3002 		sync = HDR_LLL2ULL(lll);
3003 
3004 		/* TX LL thread has higher priority than RX thread. It may happen that
3005 		 * host successfully disables CTE sampling in the meantime.
3006 		 * It should be verified here, to avoid reporting IQ samples after
3007 		 * the functionality was disabled or if sync was lost.
3008 		 */
3009 		if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) ||
3010 		    !sync->timeout_reload) {
3011 			/* Drop further processing of the event. */
3012 			return;
3013 		}
3014 
3015 		/* Get the sync handle corresponding to the LLL context passed in the
3016 		 * node rx footer field.
3017 		 */
3018 		sync_handle = ull_sync_handle_get(sync);
3019 		per_evt_counter = iq_report->event_counter;
3020 	}
3021 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
3022 
3023 	/* If packet status does not indicate insufficient resources for IQ samples and for
3024 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3025 	 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3026 	 */
3027 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3028 		samples_cnt = 0U;
3029 	} else {
3030 		samples_cnt = MAX(1, iq_report->sample_count);
3031 	}
3032 
3033 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT,
3034 		       (sizeof(*sep) +
3035 			(samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3036 
3037 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
3038 
3039 
3040 	sep->sync_handle = sys_cpu_to_le16(sync_handle);
3041 	sep->rssi = sys_cpu_to_le16(rssi);
3042 	sep->rssi_ant_id = iq_report->rssi_ant_id;
3043 	sep->cte_type = iq_report->cte_info.type;
3044 
3045 	sep->chan_idx = iq_report->chan_idx;
3046 	sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
3047 
3048 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3049 		sep->slot_durations = iq_report->local_slot_durations;
3050 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3051 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3052 	} else {
3053 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3054 	}
3055 
3056 	sep->packet_status = iq_report->packet_status;
3057 
3058 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3059 		if (iq_report->sample_count == 0U) {
3060 			sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3061 			sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3062 		} else {
3063 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3064 				sep->sample[idx].i =
3065 					iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3066 				sep->sample[idx].q =
3067 					iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3068 			}
3069 		}
3070 	}
3071 
3072 	sep->sample_count = samples_cnt;
3073 }
3074 #endif /* defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) */
3075 
3076 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
le_df_set_conn_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)3077 static void le_df_set_conn_cte_tx_params(struct net_buf *buf,
3078 					 struct net_buf **evt)
3079 {
3080 	struct bt_hci_cp_le_set_conn_cte_tx_params *cmd = (void *)buf->data;
3081 	struct bt_hci_rp_le_set_conn_cte_tx_params *rp;
3082 	uint16_t handle, handle_le16;
3083 	uint8_t status;
3084 
3085 	handle_le16 = cmd->handle;
3086 	handle = sys_le16_to_cpu(handle_le16);
3087 
3088 	status = ll_df_set_conn_cte_tx_params(handle, cmd->cte_types,
3089 					      cmd->switch_pattern_len,
3090 					      cmd->ant_ids);
3091 
3092 	rp = hci_cmd_complete(evt, sizeof(*rp));
3093 
3094 	rp->status = status;
3095 	rp->handle = handle_le16;
3096 }
3097 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
3098 
3099 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
le_df_set_conn_cte_rx_params(struct net_buf * buf,struct net_buf ** evt)3100 static void le_df_set_conn_cte_rx_params(struct net_buf *buf, struct net_buf **evt)
3101 {
3102 	struct bt_hci_cp_le_set_conn_cte_rx_params *cmd = (void *)buf->data;
3103 	struct bt_hci_rp_le_set_conn_cte_rx_params *rp;
3104 	uint16_t handle, handle_le16;
3105 	uint8_t status;
3106 
3107 	handle_le16 = cmd->handle;
3108 	handle = sys_le16_to_cpu(handle_le16);
3109 
3110 	status = ll_df_set_conn_cte_rx_params(handle, cmd->sampling_enable, cmd->slot_durations,
3111 					      cmd->switch_pattern_len, cmd->ant_ids);
3112 
3113 	rp = hci_cmd_complete(evt, sizeof(*rp));
3114 
3115 	rp->status = status;
3116 	rp->handle = handle_le16;
3117 }
3118 
le_df_connection_iq_report(struct node_rx_pdu * node_rx,struct net_buf * buf)3119 static void le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
3120 {
3121 	struct bt_hci_evt_le_connection_iq_report *sep;
3122 	struct node_rx_iq_report *iq_report;
3123 	struct lll_conn *lll;
3124 	uint8_t samples_cnt;
3125 	uint8_t phy_rx;
3126 	int16_t rssi;
3127 
3128 	iq_report = (struct node_rx_iq_report *)node_rx;
3129 
3130 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3131 	    !(le_event_mask & BT_EVT_MASK_LE_CONNECTION_IQ_REPORT)) {
3132 		return;
3133 	}
3134 
3135 	lll = iq_report->rx.rx_ftr.param;
3136 
3137 #if defined(CONFIG_BT_CTLR_PHY)
3138 	phy_rx = lll->phy_rx;
3139 
3140 	/* Make sure the report is generated for connection on PHY UNCODED */
3141 	LL_ASSERT(phy_rx != PHY_CODED);
3142 #else
3143 	phy_rx = PHY_1M;
3144 #endif /* CONFIG_BT_CTLR_PHY */
3145 
3146 	/* TX LL thread has higher priority than RX thread. It may happen that host succefully
3147 	 * disables CTE sampling in the meantime. It should be verified here, to avoid reporting
3148 	 * IQ samples after the functionality was disabled.
3149 	 */
3150 	if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
3151 		/* Dropp further processing of the event. */
3152 		return;
3153 	}
3154 
3155 	/* If packet status does not indicate insufficient resources for IQ samples and for
3156 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3157 	 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3158 	 */
3159 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3160 		samples_cnt = 0;
3161 	} else {
3162 		samples_cnt = MAX(1, iq_report->sample_count);
3163 	}
3164 
3165 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTION_IQ_REPORT,
3166 		       (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3167 
3168 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
3169 
3170 	sep->conn_handle = sys_cpu_to_le16(iq_report->rx.hdr.handle);
3171 	sep->rx_phy = phy_rx;
3172 	sep->rssi = sys_cpu_to_le16(rssi);
3173 	sep->rssi_ant_id = iq_report->rssi_ant_id;
3174 	sep->cte_type = iq_report->cte_info.type;
3175 
3176 	sep->data_chan_idx = iq_report->chan_idx;
3177 	sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
3178 
3179 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3180 		sep->slot_durations = iq_report->local_slot_durations;
3181 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3182 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3183 	} else {
3184 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3185 	}
3186 
3187 	sep->packet_status = iq_report->packet_status;
3188 
3189 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3190 		if (iq_report->sample_count == 0U) {
3191 			sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3192 			sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3193 		} else {
3194 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3195 				sep->sample[idx].i =
3196 					iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3197 				sep->sample[idx].q =
3198 					iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3199 			}
3200 		}
3201 	}
3202 
3203 	sep->sample_count = samples_cnt;
3204 }
3205 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
3206 
3207 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
le_df_set_conn_cte_req_enable(struct net_buf * buf,struct net_buf ** evt)3208 static void le_df_set_conn_cte_req_enable(struct net_buf *buf, struct net_buf **evt)
3209 {
3210 	struct bt_hci_cp_le_conn_cte_req_enable *cmd = (void *)buf->data;
3211 	struct bt_hci_rp_le_conn_cte_req_enable *rp;
3212 	uint16_t handle, handle_le16;
3213 	uint8_t status;
3214 
3215 	handle_le16 = cmd->handle;
3216 	handle = sys_le16_to_cpu(handle_le16);
3217 
3218 	status = ll_df_set_conn_cte_req_enable(handle, cmd->enable,
3219 					       sys_le16_to_cpu(cmd->cte_request_interval),
3220 					       cmd->requested_cte_length, cmd->requested_cte_type);
3221 	rp = hci_cmd_complete(evt, sizeof(*rp));
3222 
3223 	rp->status = status;
3224 	rp->handle = handle_le16;
3225 }
3226 
le_df_cte_req_failed(uint8_t error_code,uint16_t handle,struct net_buf * buf)3227 static void le_df_cte_req_failed(uint8_t error_code, uint16_t handle, struct net_buf *buf)
3228 {
3229 	struct bt_hci_evt_le_cte_req_failed *sep;
3230 
3231 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3232 	    !(le_event_mask & BT_EVT_MASK_LE_CTE_REQUEST_FAILED)) {
3233 		return;
3234 	}
3235 
3236 	sep = meta_evt(buf, BT_HCI_EVT_LE_CTE_REQUEST_FAILED, sizeof(*sep));
3237 
3238 	sep->status = error_code;
3239 	sep->conn_handle = sys_cpu_to_le16(handle);
3240 }
3241 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
3242 
3243 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
le_df_set_conn_cte_rsp_enable(struct net_buf * buf,struct net_buf ** evt)3244 static void le_df_set_conn_cte_rsp_enable(struct net_buf *buf, struct net_buf **evt)
3245 {
3246 	struct bt_hci_cp_le_conn_cte_rsp_enable *cmd = (void *)buf->data;
3247 	struct bt_hci_rp_le_conn_cte_rsp_enable *rp;
3248 	uint16_t handle, handle_le16;
3249 	uint8_t status;
3250 
3251 	handle_le16 = cmd->handle;
3252 	handle = sys_le16_to_cpu(handle_le16);
3253 
3254 	status = ll_df_set_conn_cte_rsp_enable(handle, cmd->enable);
3255 	rp = hci_cmd_complete(evt, sizeof(*rp));
3256 
3257 	rp->status = status;
3258 	rp->handle = handle_le16;
3259 }
3260 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
3261 
le_df_read_ant_inf(struct net_buf * buf,struct net_buf ** evt)3262 static void le_df_read_ant_inf(struct net_buf *buf, struct net_buf **evt)
3263 {
3264 	struct bt_hci_rp_le_read_ant_info *rp;
3265 	uint8_t max_switch_pattern_len;
3266 	uint8_t switch_sample_rates;
3267 	uint8_t max_cte_len;
3268 	uint8_t num_ant;
3269 
3270 	ll_df_read_ant_inf(&switch_sample_rates, &num_ant,
3271 			   &max_switch_pattern_len, &max_cte_len);
3272 
3273 	rp = hci_cmd_complete(evt, sizeof(*rp));
3274 
3275 	rp->max_switch_pattern_len = max_switch_pattern_len;
3276 	rp->switch_sample_rates = switch_sample_rates;
3277 	rp->max_cte_len = max_cte_len;
3278 	rp->num_ant = num_ant;
3279 	rp->status = 0x00;
3280 }
3281 #endif /* CONFIG_BT_CTLR_DF */
3282 
3283 #if defined(CONFIG_BT_CTLR_DTM_HCI)
le_rx_test(struct net_buf * buf,struct net_buf ** evt)3284 static void le_rx_test(struct net_buf *buf, struct net_buf **evt)
3285 {
3286 	struct bt_hci_cp_le_rx_test *cmd = (void *)buf->data;
3287 	uint8_t status;
3288 
3289 	status = ll_test_rx(cmd->rx_ch, BT_HCI_LE_RX_PHY_1M, BT_HCI_LE_MOD_INDEX_STANDARD,
3290 			    BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3291 			    BT_HCI_LE_TEST_SLOT_DURATION_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3292 			    NULL);
3293 
3294 	*evt = cmd_complete_status(status);
3295 }
3296 
le_tx_test(struct net_buf * buf,struct net_buf ** evt)3297 static void le_tx_test(struct net_buf *buf, struct net_buf **evt)
3298 {
3299 	struct bt_hci_cp_le_tx_test *cmd = (void *)buf->data;
3300 	uint8_t status;
3301 
3302 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload,
3303 			    BT_HCI_LE_TX_PHY_1M, BT_HCI_LE_TEST_CTE_DISABLED,
3304 			    BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3305 			    NULL, BT_HCI_TX_TEST_POWER_MAX_SET);
3306 
3307 	*evt = cmd_complete_status(status);
3308 }
3309 
le_test_end(struct net_buf * buf,struct net_buf ** evt)3310 static void le_test_end(struct net_buf *buf, struct net_buf **evt)
3311 {
3312 	struct bt_hci_rp_le_test_end *rp;
3313 	uint16_t rx_pkt_count;
3314 	uint8_t status;
3315 
3316 	status = ll_test_end(&rx_pkt_count);
3317 
3318 	rp = hci_cmd_complete(evt, sizeof(*rp));
3319 	rp->status = status;
3320 	rp->rx_pkt_count = sys_cpu_to_le16(rx_pkt_count);
3321 }
3322 
le_enh_rx_test(struct net_buf * buf,struct net_buf ** evt)3323 static void le_enh_rx_test(struct net_buf *buf, struct net_buf **evt)
3324 {
3325 	struct bt_hci_cp_le_enh_rx_test *cmd = (void *)buf->data;
3326 	uint8_t status;
3327 
3328 	status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, BT_HCI_LE_TEST_CTE_DISABLED,
3329 			    BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SLOT_DURATION_ANY,
3330 			    BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL);
3331 
3332 	*evt = cmd_complete_status(status);
3333 }
3334 
3335 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
le_rx_test_v3(struct net_buf * buf,struct net_buf ** evt)3336 static void le_rx_test_v3(struct net_buf *buf, struct net_buf **evt)
3337 {
3338 	struct bt_hci_cp_le_rx_test_v3 *cmd = (void *)buf->data;
3339 	uint8_t status;
3340 
3341 	status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, cmd->expected_cte_len,
3342 			    cmd->expected_cte_type, cmd->slot_durations, cmd->switch_pattern_len,
3343 			    cmd->ant_ids);
3344 
3345 	*evt = cmd_complete_status(status);
3346 }
3347 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
3348 
le_enh_tx_test(struct net_buf * buf,struct net_buf ** evt)3349 static void le_enh_tx_test(struct net_buf *buf, struct net_buf **evt)
3350 {
3351 	struct bt_hci_cp_le_enh_tx_test *cmd = (void *)buf->data;
3352 	uint8_t status;
3353 
3354 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3355 			    BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3356 			    BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL,
3357 			    BT_HCI_TX_TEST_POWER_MAX_SET);
3358 
3359 	*evt = cmd_complete_status(status);
3360 }
3361 
3362 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
le_tx_test_v3(struct net_buf * buf,struct net_buf ** evt)3363 static void le_tx_test_v3(struct net_buf *buf, struct net_buf **evt)
3364 {
3365 	struct bt_hci_cp_le_tx_test_v3 *cmd = (void *)buf->data;
3366 	uint8_t status;
3367 
3368 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3369 			    cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3370 			    BT_HCI_TX_TEST_POWER_MAX_SET);
3371 
3372 	*evt = cmd_complete_status(status);
3373 }
3374 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
3375 
3376 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
le_tx_test_v4(struct net_buf * buf,struct net_buf ** evt)3377 static void le_tx_test_v4(struct net_buf *buf, struct net_buf **evt)
3378 {
3379 	struct bt_hci_cp_le_tx_test_v4 *cmd = (void *)buf->data;
3380 	struct bt_hci_cp_le_tx_test_v4_tx_power *tx_power = (void *)(buf->data +
3381 			sizeof(struct bt_hci_cp_le_tx_test_v4) + cmd->switch_pattern_len);
3382 	uint8_t status;
3383 
3384 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3385 			    cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3386 			    tx_power->tx_power);
3387 
3388 	*evt = cmd_complete_status(status);
3389 }
3390 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
3391 #endif /* CONFIG_BT_CTLR_DTM_HCI */
3392 
3393 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3394 #if defined(CONFIG_BT_BROADCASTER)
3395 
le_set_adv_set_random_addr(struct net_buf * buf,struct net_buf ** evt)3396 static void le_set_adv_set_random_addr(struct net_buf *buf,
3397 				       struct net_buf **evt)
3398 {
3399 	struct bt_hci_cp_le_set_adv_set_random_addr *cmd = (void *)buf->data;
3400 	uint8_t status;
3401 	uint8_t handle;
3402 
3403 	if (adv_cmds_ext_check(evt)) {
3404 		return;
3405 	}
3406 
3407 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3408 	if (status) {
3409 		*evt = cmd_complete_status(status);
3410 		return;
3411 	}
3412 
3413 	status = ll_adv_aux_random_addr_set(handle, &cmd->bdaddr.val[0]);
3414 
3415 	*evt = cmd_complete_status(status);
3416 }
3417 
le_set_ext_adv_param(struct net_buf * buf,struct net_buf ** evt)3418 static void le_set_ext_adv_param(struct net_buf *buf, struct net_buf **evt)
3419 {
3420 	struct bt_hci_cp_le_set_ext_adv_param *cmd = (void *)buf->data;
3421 	struct bt_hci_rp_le_set_ext_adv_param *rp;
3422 	uint32_t min_interval;
3423 	uint16_t evt_prop;
3424 	uint8_t tx_pwr;
3425 	uint8_t status;
3426 	uint8_t phy_p;
3427 	uint8_t phy_s;
3428 	uint8_t handle;
3429 
3430 	if (adv_cmds_ext_check(evt)) {
3431 		return;
3432 	}
3433 
3434 	if (cmd->handle > BT_HCI_LE_ADV_HANDLE_MAX) {
3435 		*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3436 		return;
3437 	}
3438 
3439 	evt_prop = sys_le16_to_cpu(cmd->props);
3440 	min_interval = sys_get_le24(cmd->prim_min_interval);
3441 
3442 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3443 		const uint32_t max_interval =
3444 					sys_get_le24(cmd->prim_max_interval);
3445 
3446 		/* Compare advertising interval maximum with implementation
3447 		 * supported advertising interval maximum value defined in the
3448 		 * Kconfig CONFIG_BT_CTLR_ADV_INTERVAL_MAX.
3449 		 */
3450 		if ((!(evt_prop & BT_HCI_LE_ADV_PROP_LEGACY) ||
3451 		     !(evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN)) &&
3452 		    ((min_interval > max_interval) ||
3453 		     (min_interval < BT_HCI_LE_PRIM_ADV_INTERVAL_MIN) ||
3454 		     (max_interval > CONFIG_BT_CTLR_ADV_INTERVAL_MAX))) {
3455 			*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3456 			return;
3457 		}
3458 
3459 		if ((cmd->prim_adv_phy > BT_HCI_LE_PHY_CODED) ||
3460 		    (cmd->sec_adv_phy > BT_HCI_LE_PHY_CODED) ||
3461 		    (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
3462 		     ((cmd->prim_adv_phy == BT_HCI_LE_PHY_CODED) ||
3463 		      (cmd->sec_adv_phy == BT_HCI_LE_PHY_CODED)))) {
3464 			*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3465 			return;
3466 		}
3467 	}
3468 
3469 	status = ll_adv_set_by_hci_handle_get_or_new(cmd->handle, &handle);
3470 	if (status) {
3471 		*evt = cmd_complete_status(status);
3472 		return;
3473 	}
3474 
3475 	tx_pwr = cmd->tx_power;
3476 	phy_p = BIT(cmd->prim_adv_phy - 1);
3477 	phy_s = BIT(cmd->sec_adv_phy - 1);
3478 
3479 	status = ll_adv_params_set(handle, evt_prop, min_interval,
3480 				   PDU_ADV_TYPE_EXT_IND, cmd->own_addr_type,
3481 				   cmd->peer_addr.type, cmd->peer_addr.a.val,
3482 				   cmd->prim_channel_map, cmd->filter_policy,
3483 				   &tx_pwr, phy_p, cmd->sec_adv_max_skip, phy_s,
3484 				   cmd->sid, cmd->scan_req_notify_enable);
3485 
3486 	rp = hci_cmd_complete(evt, sizeof(*rp));
3487 	rp->status = status;
3488 	rp->tx_power = tx_pwr;
3489 }
3490 
le_set_ext_adv_data(struct net_buf * buf,struct net_buf ** evt)3491 static void le_set_ext_adv_data(struct net_buf *buf, struct net_buf **evt)
3492 {
3493 	struct bt_hci_cp_le_set_ext_adv_data *cmd = (void *)buf->data;
3494 	uint8_t status;
3495 	uint8_t handle;
3496 
3497 	if (adv_cmds_ext_check(evt)) {
3498 		return;
3499 	}
3500 
3501 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3502 	if (status) {
3503 		*evt = cmd_complete_status(status);
3504 		return;
3505 	}
3506 
3507 	status = ll_adv_aux_ad_data_set(handle, cmd->op, cmd->frag_pref,
3508 					cmd->len, cmd->data);
3509 
3510 	*evt = cmd_complete_status(status);
3511 }
3512 
le_set_ext_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)3513 static void le_set_ext_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
3514 {
3515 	struct bt_hci_cp_le_set_ext_scan_rsp_data *cmd = (void *)buf->data;
3516 	uint8_t status;
3517 	uint8_t handle;
3518 
3519 	if (adv_cmds_ext_check(evt)) {
3520 		return;
3521 	}
3522 
3523 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3524 	if (status) {
3525 		*evt = cmd_complete_status(status);
3526 		return;
3527 	}
3528 
3529 	status = ll_adv_aux_sr_data_set(handle, cmd->op, cmd->frag_pref,
3530 					cmd->len, cmd->data);
3531 
3532 	*evt = cmd_complete_status(status);
3533 }
3534 
le_set_ext_adv_enable(struct net_buf * buf,struct net_buf ** evt)3535 static void le_set_ext_adv_enable(struct net_buf *buf, struct net_buf **evt)
3536 {
3537 	struct bt_hci_cp_le_set_ext_adv_enable *cmd = (void *)buf->data;
3538 	struct bt_hci_ext_adv_set *s;
3539 	uint8_t set_num;
3540 	uint8_t status;
3541 	uint8_t handle;
3542 
3543 	if (adv_cmds_ext_check(evt)) {
3544 		return;
3545 	}
3546 
3547 	set_num = cmd->set_num;
3548 	if (!set_num) {
3549 		if (cmd->enable) {
3550 			*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3551 			return;
3552 		}
3553 
3554 		status = ll_adv_disable_all();
3555 
3556 		*evt = cmd_complete_status(status);
3557 
3558 		return;
3559 	}
3560 
3561 	/* Check for duplicate handles */
3562 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3563 		for (uint8_t i = 0U; i < set_num - 1; i++) {
3564 			for (uint8_t j = i + 1U; j < set_num; j++) {
3565 				if (cmd->s[i].handle == cmd->s[j].handle) {
3566 					*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3567 					return;
3568 				}
3569 			}
3570 		}
3571 	}
3572 
3573 	s = (void *) cmd->s;
3574 	do {
3575 		status = ll_adv_set_by_hci_handle_get(s->handle, &handle);
3576 		if (status) {
3577 			break;
3578 		}
3579 
3580 		/* TODO: duration and events parameter use. */
3581 #if defined(CONFIG_BT_HCI_MESH_EXT)
3582 		status = ll_adv_enable(handle, cmd->enable, 0, 0, 0, 0, 0);
3583 #else /* !CONFIG_BT_HCI_MESH_EXT */
3584 		status = ll_adv_enable(handle, cmd->enable,
3585 				       sys_le16_to_cpu(s->duration), s->max_ext_adv_evts);
3586 #endif /* !CONFIG_BT_HCI_MESH_EXT */
3587 		if (status) {
3588 			/* TODO: how to handle succeeded ones before this
3589 			 * error.
3590 			 */
3591 			break;
3592 		}
3593 
3594 		s++;
3595 	} while (--set_num);
3596 
3597 	*evt = cmd_complete_status(status);
3598 }
3599 
le_read_max_adv_data_len(struct net_buf * buf,struct net_buf ** evt)3600 static void le_read_max_adv_data_len(struct net_buf *buf, struct net_buf **evt)
3601 {
3602 	struct bt_hci_rp_le_read_max_adv_data_len *rp;
3603 	uint16_t max_adv_data_len;
3604 
3605 	if (adv_cmds_ext_check(evt)) {
3606 		return;
3607 	}
3608 
3609 	rp = hci_cmd_complete(evt, sizeof(*rp));
3610 
3611 	max_adv_data_len = ll_adv_aux_max_data_length_get();
3612 
3613 	rp->max_adv_data_len = sys_cpu_to_le16(max_adv_data_len);
3614 	rp->status = 0x00;
3615 }
3616 
le_read_num_adv_sets(struct net_buf * buf,struct net_buf ** evt)3617 static void le_read_num_adv_sets(struct net_buf *buf, struct net_buf **evt)
3618 {
3619 	struct bt_hci_rp_le_read_num_adv_sets *rp;
3620 
3621 	if (adv_cmds_ext_check(evt)) {
3622 		return;
3623 	}
3624 
3625 	rp = hci_cmd_complete(evt, sizeof(*rp));
3626 
3627 	rp->num_sets = ll_adv_aux_set_count_get();
3628 	rp->status = 0x00;
3629 }
3630 
le_remove_adv_set(struct net_buf * buf,struct net_buf ** evt)3631 static void le_remove_adv_set(struct net_buf *buf, struct net_buf **evt)
3632 {
3633 	struct bt_hci_cp_le_remove_adv_set *cmd = (void *)buf->data;
3634 	uint8_t status;
3635 	uint8_t handle;
3636 
3637 	if (adv_cmds_ext_check(evt)) {
3638 		return;
3639 	}
3640 
3641 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3642 	if (status) {
3643 		*evt = cmd_complete_status(status);
3644 		return;
3645 	}
3646 
3647 	status = ll_adv_aux_set_remove(handle);
3648 
3649 	*evt = cmd_complete_status(status);
3650 }
3651 
le_clear_adv_sets(struct net_buf * buf,struct net_buf ** evt)3652 static void le_clear_adv_sets(struct net_buf *buf, struct net_buf **evt)
3653 {
3654 	uint8_t status;
3655 
3656 	if (adv_cmds_ext_check(evt)) {
3657 		return;
3658 	}
3659 
3660 	status = ll_adv_aux_set_clear();
3661 
3662 	*evt = cmd_complete_status(status);
3663 }
3664 
3665 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
le_set_per_adv_param(struct net_buf * buf,struct net_buf ** evt)3666 static void le_set_per_adv_param(struct net_buf *buf, struct net_buf **evt)
3667 {
3668 	struct bt_hci_cp_le_set_per_adv_param *cmd = (void *)buf->data;
3669 	uint16_t max_interval;
3670 	uint16_t flags;
3671 	uint8_t status;
3672 	uint8_t handle;
3673 
3674 	if (adv_cmds_ext_check(evt)) {
3675 		return;
3676 	}
3677 
3678 	max_interval = sys_le16_to_cpu(cmd->max_interval);
3679 
3680 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3681 		const uint32_t min_interval =
3682 					sys_le16_to_cpu(cmd->min_interval);
3683 
3684 		if ((min_interval > max_interval) ||
3685 		    (min_interval < BT_HCI_LE_PER_ADV_INTERVAL_MIN)) {
3686 			*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3687 			return;
3688 		}
3689 
3690 		/* Compare periodic advertising interval with
3691 		 * implementation supported periodic advertising interval
3692 		 * maximum value defined in the Kconfig
3693 		 * CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX.
3694 		 */
3695 		if (min_interval > CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX) {
3696 			*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3697 			return;
3698 		}
3699 
3700 		if (max_interval > CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX) {
3701 			max_interval = CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX;
3702 		}
3703 	}
3704 
3705 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3706 	if (status) {
3707 		*evt = cmd_complete_status(status);
3708 		return;
3709 	}
3710 
3711 	flags = sys_le16_to_cpu(cmd->props);
3712 
3713 	status = ll_adv_sync_param_set(handle, max_interval, flags);
3714 
3715 	*evt = cmd_complete_status(status);
3716 }
3717 
le_set_per_adv_data(struct net_buf * buf,struct net_buf ** evt)3718 static void le_set_per_adv_data(struct net_buf *buf, struct net_buf **evt)
3719 {
3720 	struct bt_hci_cp_le_set_per_adv_data *cmd = (void *)buf->data;
3721 	uint8_t status;
3722 	uint8_t handle;
3723 
3724 	if (adv_cmds_ext_check(evt)) {
3725 		return;
3726 	}
3727 
3728 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3729 	if (status) {
3730 		*evt = cmd_complete_status(status);
3731 		return;
3732 	}
3733 
3734 	status = ll_adv_sync_ad_data_set(handle, cmd->op, cmd->len,
3735 					 cmd->data);
3736 
3737 	*evt = cmd_complete_status(status);
3738 }
3739 
le_set_per_adv_enable(struct net_buf * buf,struct net_buf ** evt)3740 static void le_set_per_adv_enable(struct net_buf *buf, struct net_buf **evt)
3741 {
3742 	struct bt_hci_cp_le_set_per_adv_enable *cmd = (void *)buf->data;
3743 	uint8_t status;
3744 	uint8_t handle;
3745 
3746 	if (adv_cmds_ext_check(evt)) {
3747 		return;
3748 	}
3749 
3750 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3751 	if (status) {
3752 		*evt = cmd_complete_status(status);
3753 		return;
3754 	}
3755 
3756 	status = ll_adv_sync_enable(handle, cmd->enable);
3757 
3758 	*evt = cmd_complete_status(status);
3759 }
3760 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
3761 #endif /* CONFIG_BT_BROADCASTER */
3762 
3763 #if defined(CONFIG_BT_OBSERVER)
le_set_ext_scan_param(struct net_buf * buf,struct net_buf ** evt)3764 static void le_set_ext_scan_param(struct net_buf *buf, struct net_buf **evt)
3765 {
3766 	struct bt_hci_cp_le_set_ext_scan_param *cmd = (void *)buf->data;
3767 	struct bt_hci_ext_scan_phy *p;
3768 	uint8_t own_addr_type;
3769 	uint8_t filter_policy;
3770 	uint8_t phys_bitmask;
3771 	uint8_t status;
3772 	uint8_t phys;
3773 
3774 	if (adv_cmds_ext_check(evt)) {
3775 		return;
3776 	}
3777 
3778 	/* Number of bits set indicate scan sets to be configured by calling
3779 	 * ll_scan_params_set function.
3780 	 */
3781 	phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
3782 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
3783 		phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
3784 	}
3785 
3786 	phys = cmd->phys;
3787 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
3788 	    (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
3789 		*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3790 
3791 		return;
3792 	}
3793 
3794 	own_addr_type = cmd->own_addr_type;
3795 	filter_policy = cmd->filter_policy;
3796 	p = cmd->p;
3797 
3798 	/* Irrespective of enabled PHYs to scan for, ll_scan_params_set needs
3799 	 * to be called to initialise the scan sets.
3800 	 * Passing interval and window as 0, disable the particular scan set
3801 	 * from being enabled.
3802 	 */
3803 	do {
3804 		uint16_t interval;
3805 		uint16_t window;
3806 		uint8_t type;
3807 		uint8_t phy;
3808 
3809 		/* Get single PHY bit from the loop bitmask */
3810 		phy = BIT(find_lsb_set(phys_bitmask) - 1);
3811 
3812 		/* Pass the PHY (1M or Coded) of scan set in MSbits of type
3813 		 * parameter
3814 		 */
3815 		type = (phy << 1);
3816 
3817 		/* If current PHY is one of the PHY in the Scanning_PHYs,
3818 		 * pick the supplied scan type, interval and window.
3819 		 */
3820 		if (phys & phy) {
3821 			type |= (p->type & 0x01);
3822 			interval = sys_le16_to_cpu(p->interval);
3823 			window = sys_le16_to_cpu(p->window);
3824 			p++;
3825 		} else {
3826 			interval = 0U;
3827 			window = 0U;
3828 		}
3829 
3830 		status = ll_scan_params_set(type, interval, window,
3831 					    own_addr_type, filter_policy);
3832 		if (status) {
3833 			break;
3834 		}
3835 
3836 		phys_bitmask &= (phys_bitmask - 1);
3837 	} while (phys_bitmask);
3838 
3839 	*evt = cmd_complete_status(status);
3840 }
3841 
le_set_ext_scan_enable(struct net_buf * buf,struct net_buf ** evt)3842 static void le_set_ext_scan_enable(struct net_buf *buf, struct net_buf **evt)
3843 {
3844 	struct bt_hci_cp_le_set_ext_scan_enable *cmd = (void *)buf->data;
3845 	uint8_t status;
3846 
3847 	if (adv_cmds_ext_check(evt)) {
3848 		return;
3849 	}
3850 
3851 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3852 	/* Initialize duplicate filtering */
3853 	if (cmd->enable && cmd->filter_dup) {
3854 		if (0) {
3855 
3856 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3857 		} else if (dup_count == DUP_FILTER_DISABLED) {
3858 			dup_scan = true;
3859 
3860 			/* All entries reset */
3861 			dup_count = 0;
3862 			dup_curr = 0U;
3863 		} else if (!dup_scan) {
3864 			dup_scan = true;
3865 			dup_ext_adv_reset();
3866 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3867 
3868 		} else {
3869 			/* All entries reset */
3870 			dup_count = 0;
3871 			dup_curr = 0U;
3872 		}
3873 	} else {
3874 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3875 		dup_scan = false;
3876 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3877 		dup_count = DUP_FILTER_DISABLED;
3878 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3879 	}
3880 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
3881 
3882 	status = ll_scan_enable(cmd->enable, sys_le16_to_cpu(cmd->duration),
3883 				sys_le16_to_cpu(cmd->period));
3884 
3885 	/* NOTE: As filter duplicates is implemented here in HCI source code,
3886 	 *       enabling of already enabled scanning shall succeed after
3887 	 *       updates to filter duplicates is handled in the above
3888 	 *       statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
3889 	 */
3890 	if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
3891 	    (status == BT_HCI_ERR_CMD_DISALLOWED)) {
3892 		status = BT_HCI_ERR_SUCCESS;
3893 	}
3894 
3895 	*evt = cmd_complete_status(status);
3896 }
3897 
3898 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
le_per_adv_create_sync(struct net_buf * buf,struct net_buf ** evt)3899 static void le_per_adv_create_sync(struct net_buf *buf, struct net_buf **evt)
3900 {
3901 	struct bt_hci_cp_le_per_adv_create_sync *cmd = (void *)buf->data;
3902 	uint16_t sync_timeout;
3903 	uint8_t status;
3904 	uint16_t skip;
3905 
3906 	if (adv_cmds_ext_check(NULL)) {
3907 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
3908 		return;
3909 	}
3910 
3911 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
3912 	    (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST)) {
3913 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3914 		return;
3915 	}
3916 
3917 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
3918 	    (cmd->options &
3919 	     (BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED |
3920 	      BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE)) ==
3921 	    BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3922 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3923 		return;
3924 	}
3925 
3926 	/* FIXME: Check for HCI LE Set Periodic Advertising Receive Enable
3927 	 * command support and if reporting is initially disabled then
3928 	 * return error code Connection Failed to be Established /
3929 	 * Synchronization Timeout (0x3E).
3930 	 */
3931 
3932 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3933 	/* Initialize duplicate filtering */
3934 	if (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3935 		if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
3936 			dup_count = 0;
3937 			dup_curr = 0U;
3938 		} else {
3939 			/* NOTE: Invalidate dup_ext_adv_mode array entries is
3940 			 *       done when sync is established.
3941 			 */
3942 		}
3943 	} else if (!dup_scan) {
3944 		dup_count = DUP_FILTER_DISABLED;
3945 	}
3946 #endif
3947 
3948 	skip = sys_le16_to_cpu(cmd->skip);
3949 	sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
3950 
3951 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
3952 	if ((cmd->cte_type & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_INVALID_VALUE) != 0) {
3953 		status = BT_HCI_ERR_CMD_DISALLOWED;
3954 #else
3955 	if (cmd->cte_type != BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_FILTERING) {
3956 		status = BT_HCI_ERR_INVALID_PARAM;
3957 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
3958 	} else {
3959 		status = ll_sync_create(cmd->options, cmd->sid, cmd->addr.type, cmd->addr.a.val,
3960 					skip, sync_timeout, cmd->cte_type);
3961 	}
3962 	*evt = cmd_status(status);
3963 }
3964 
3965 static void le_per_adv_create_sync_cancel(struct net_buf *buf,
3966 					  struct net_buf **evt, void **node_rx)
3967 {
3968 	struct bt_hci_evt_cc_status *ccst;
3969 	uint8_t status;
3970 
3971 	if (adv_cmds_ext_check(evt)) {
3972 		return;
3973 	}
3974 
3975 	status = ll_sync_create_cancel(node_rx);
3976 
3977 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
3978 	ccst->status = status;
3979 }
3980 
3981 static void le_per_adv_terminate_sync(struct net_buf *buf, struct net_buf **evt)
3982 {
3983 	struct bt_hci_cp_le_per_adv_terminate_sync *cmd = (void *)buf->data;
3984 	struct bt_hci_evt_cc_status *ccst;
3985 	uint16_t handle;
3986 	uint8_t status;
3987 
3988 	if (adv_cmds_ext_check(evt)) {
3989 		return;
3990 	}
3991 
3992 	handle = sys_le16_to_cpu(cmd->handle);
3993 
3994 	status = ll_sync_terminate(handle);
3995 
3996 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
3997 	ccst->status = status;
3998 }
3999 
4000 static void le_per_adv_recv_enable(struct net_buf *buf, struct net_buf **evt)
4001 {
4002 	struct bt_hci_cp_le_set_per_adv_recv_enable *cmd = (void *)buf->data;
4003 	struct bt_hci_evt_cc_status *ccst;
4004 	uint16_t handle;
4005 	uint8_t status;
4006 
4007 	if (adv_cmds_ext_check(evt)) {
4008 		return;
4009 	}
4010 
4011 	handle = sys_le16_to_cpu(cmd->handle);
4012 
4013 	status = ll_sync_recv_enable(handle, cmd->enable);
4014 
4015 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
4016 	if (!status) {
4017 		if (cmd->enable &
4018 		    BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) {
4019 			if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
4020 				dup_count = 0;
4021 				dup_curr = 0U;
4022 			} else {
4023 				/* NOTE: Invalidate dup_ext_adv_mode array
4024 				 *       entries is done when sync is
4025 				 *       established.
4026 				 */
4027 			}
4028 		} else if (!dup_scan) {
4029 			dup_count = DUP_FILTER_DISABLED;
4030 		}
4031 	}
4032 #endif
4033 
4034 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
4035 	ccst->status = status;
4036 }
4037 
4038 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4039 static void le_add_dev_to_pal(struct net_buf *buf, struct net_buf **evt)
4040 {
4041 	struct bt_hci_cp_le_add_dev_to_per_adv_list *cmd = (void *)buf->data;
4042 	uint8_t status;
4043 
4044 	if (adv_cmds_ext_check(evt)) {
4045 		return;
4046 	}
4047 
4048 	status = ll_pal_add(&cmd->addr, cmd->sid);
4049 
4050 	*evt = cmd_complete_status(status);
4051 }
4052 
4053 static void le_rem_dev_from_pal(struct net_buf *buf, struct net_buf **evt)
4054 {
4055 	struct bt_hci_cp_le_rem_dev_from_per_adv_list *cmd = (void *)buf->data;
4056 	uint8_t status;
4057 
4058 	if (adv_cmds_ext_check(evt)) {
4059 		return;
4060 	}
4061 
4062 	status = ll_pal_remove(&cmd->addr, cmd->sid);
4063 
4064 	*evt = cmd_complete_status(status);
4065 }
4066 
4067 static void le_clear_pal(struct net_buf *buf, struct net_buf **evt)
4068 {
4069 	uint8_t status;
4070 
4071 	if (adv_cmds_ext_check(evt)) {
4072 		return;
4073 	}
4074 
4075 	status = ll_pal_clear();
4076 
4077 	*evt = cmd_complete_status(status);
4078 }
4079 
4080 static void le_read_pal_size(struct net_buf *buf, struct net_buf **evt)
4081 {
4082 	struct bt_hci_rp_le_read_per_adv_list_size *rp;
4083 
4084 	if (adv_cmds_ext_check(evt)) {
4085 		return;
4086 	}
4087 
4088 	rp = hci_cmd_complete(evt, sizeof(*rp));
4089 	rp->status = 0x00;
4090 
4091 	rp->list_size = ll_pal_size_get();
4092 }
4093 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4094 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4095 #endif /* CONFIG_BT_OBSERVER */
4096 
4097 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
4098 static void le_per_adv_sync_transfer(struct net_buf *buf, struct net_buf **evt)
4099 {
4100 	struct bt_hci_cp_le_per_adv_sync_transfer *cmd = (void *)buf->data;
4101 	struct bt_hci_rp_le_per_adv_sync_transfer *rp;
4102 	uint16_t conn_handle, conn_handle_le16;
4103 	uint16_t service_data;
4104 	uint16_t sync_handle;
4105 	uint8_t status;
4106 
4107 	conn_handle_le16 = cmd->conn_handle;
4108 
4109 	conn_handle = sys_le16_to_cpu(cmd->conn_handle);
4110 	service_data = sys_le16_to_cpu(cmd->service_data);
4111 	sync_handle = sys_le16_to_cpu(cmd->sync_handle);
4112 
4113 	status = ll_sync_transfer(conn_handle, service_data, sync_handle);
4114 
4115 	rp = hci_cmd_complete(evt, sizeof(*rp));
4116 	rp->conn_handle = conn_handle_le16;
4117 	rp->status = status;
4118 }
4119 
4120 static void le_per_adv_set_info_transfer(struct net_buf *buf, struct net_buf **evt)
4121 {
4122 	struct bt_hci_cp_le_per_adv_set_info_transfer *cmd = (void *)buf->data;
4123 	struct bt_hci_rp_le_per_adv_set_info_transfer *rp;
4124 	uint16_t conn_handle, conn_handle_le16;
4125 	uint16_t service_data;
4126 	uint8_t adv_handle;
4127 	uint8_t status;
4128 
4129 	conn_handle_le16 = cmd->conn_handle;
4130 
4131 	conn_handle = sys_le16_to_cpu(cmd->conn_handle);
4132 	service_data = sys_le16_to_cpu(cmd->service_data);
4133 	adv_handle = cmd->adv_handle;
4134 
4135 	status = ll_adv_sync_set_info_transfer(conn_handle, service_data, adv_handle);
4136 
4137 	rp = hci_cmd_complete(evt, sizeof(*rp));
4138 	rp->conn_handle = conn_handle_le16;
4139 	rp->status = status;
4140 }
4141 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
4142 
4143 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
4144 static void le_past_param(struct net_buf *buf, struct net_buf **evt)
4145 {
4146 	struct bt_hci_cp_le_past_param *cmd = (void *)buf->data;
4147 	struct bt_hci_rp_le_past_param *rp;
4148 	uint16_t conn_handle_le16;
4149 	uint16_t conn_handle;
4150 	uint16_t timeout;
4151 	uint8_t cte_type;
4152 	uint8_t status;
4153 	uint16_t skip;
4154 	uint8_t mode;
4155 
4156 	if (adv_cmds_ext_check(evt)) {
4157 		return;
4158 	}
4159 
4160 	conn_handle_le16 = cmd->conn_handle;
4161 
4162 	conn_handle = sys_le16_to_cpu(cmd->conn_handle);
4163 	mode = cmd->mode;
4164 	skip = sys_le16_to_cpu(cmd->skip);
4165 	timeout = sys_le16_to_cpu(cmd->timeout);
4166 	cte_type = cmd->cte_type;
4167 
4168 	status = ll_past_param(conn_handle, mode, skip, timeout, cte_type);
4169 
4170 	rp = hci_cmd_complete(evt, sizeof(*rp));
4171 	rp->conn_handle = conn_handle_le16;
4172 	rp->status = status;
4173 }
4174 
4175 static void le_default_past_param(struct net_buf *buf, struct net_buf **evt)
4176 {
4177 	struct bt_hci_cp_le_default_past_param *cmd = (void *)buf->data;
4178 	struct bt_hci_rp_le_default_past_param *rp;
4179 	uint16_t timeout;
4180 	uint8_t cte_type;
4181 	uint8_t status;
4182 	uint16_t skip;
4183 	uint8_t mode;
4184 
4185 	if (adv_cmds_ext_check(evt)) {
4186 		return;
4187 	}
4188 
4189 	mode = cmd->mode;
4190 	skip = sys_le16_to_cpu(cmd->skip);
4191 	timeout = sys_le16_to_cpu(cmd->timeout);
4192 	cte_type = cmd->cte_type;
4193 
4194 	status = ll_default_past_param(mode, skip, timeout, cte_type);
4195 
4196 	rp = hci_cmd_complete(evt, sizeof(*rp));
4197 	rp->status = status;
4198 }
4199 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
4200 
4201 #if defined(CONFIG_BT_CENTRAL)
4202 static void le_ext_create_connection(struct net_buf *buf, struct net_buf **evt)
4203 {
4204 	struct bt_hci_cp_le_ext_create_conn *cmd = (void *)buf->data;
4205 	struct bt_hci_ext_conn_phy *p;
4206 	uint8_t peer_addr_type;
4207 	uint8_t own_addr_type;
4208 	uint8_t filter_policy;
4209 	uint8_t phys_bitmask;
4210 	uint8_t *peer_addr;
4211 	uint8_t status;
4212 	uint8_t phys;
4213 
4214 	if (adv_cmds_ext_check(NULL)) {
4215 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
4216 		return;
4217 	}
4218 
4219 	/* Number of bits set indicate scan sets to be configured by calling
4220 	 * ll_create_connection function.
4221 	 */
4222 	phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
4223 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
4224 		phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
4225 	}
4226 
4227 	phys = cmd->phys;
4228 
4229 	/* Ignore Scan Interval and Scan Window, and ignore scanning if
4230 	 * Initiating PHY is set for LE 2M PHY
4231 	 * Refer to Bluetooth Core Specification Version 5.4 Vol 4, Part E
4232 	 * 7.8.66 LE Extended Create Connection command
4233 	 */
4234 	phys &= ~BT_HCI_LE_EXT_SCAN_PHY_2M;
4235 
4236 	/* Check if unsupported PHY requested for scanning */
4237 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
4238 	    (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
4239 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
4240 
4241 		return;
4242 	}
4243 
4244 	filter_policy = cmd->filter_policy;
4245 	own_addr_type = cmd->own_addr_type;
4246 	peer_addr_type = cmd->peer_addr.type;
4247 	peer_addr = cmd->peer_addr.a.val;
4248 	p = cmd->p;
4249 
4250 	do {
4251 		uint16_t supervision_timeout;
4252 		uint16_t conn_interval_max;
4253 		uint16_t scan_interval;
4254 		uint16_t conn_latency;
4255 		uint16_t scan_window;
4256 		uint8_t phy;
4257 
4258 		phy = BIT(find_lsb_set(phys_bitmask) - 1);
4259 
4260 		if (phys & phy) {
4261 			scan_interval = sys_le16_to_cpu(p->scan_interval);
4262 			scan_window = sys_le16_to_cpu(p->scan_window);
4263 			conn_interval_max =
4264 				sys_le16_to_cpu(p->conn_interval_max);
4265 			conn_latency = sys_le16_to_cpu(p->conn_latency);
4266 			supervision_timeout =
4267 				sys_le16_to_cpu(p->supervision_timeout);
4268 
4269 			if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
4270 				status = check_cconn_params(true, scan_interval,
4271 							    scan_window,
4272 							    conn_interval_max,
4273 							    conn_latency,
4274 							    supervision_timeout);
4275 				if (status) {
4276 					*evt = cmd_status(status);
4277 					return;
4278 				}
4279 			}
4280 
4281 			status = ll_create_connection(scan_interval,
4282 						      scan_window,
4283 						      filter_policy,
4284 						      peer_addr_type,
4285 						      peer_addr,
4286 						      own_addr_type,
4287 						      conn_interval_max,
4288 						      conn_latency,
4289 						      supervision_timeout,
4290 						      phy);
4291 			p++;
4292 		} else {
4293 			uint8_t type;
4294 
4295 			type = (phy << 1);
4296 			/* NOTE: Pass invalid interval value to reset the PHY
4297 			 *       value in the scan instance so not to start
4298 			 *       scanning on the unselected PHY.
4299 			 */
4300 			status = ll_scan_params_set(type, 0, 0, 0, 0);
4301 		}
4302 
4303 		if (status) {
4304 			*evt = cmd_status(status);
4305 			return;
4306 		}
4307 
4308 		phys_bitmask &= (phys_bitmask - 1);
4309 	} while (phys_bitmask);
4310 
4311 	status = ll_connect_enable(phys & BT_HCI_LE_EXT_SCAN_PHY_CODED);
4312 
4313 	*evt = cmd_status(status);
4314 }
4315 #endif /* CONFIG_BT_CENTRAL */
4316 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4317 
4318 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4319 static void le_cis_request(struct pdu_data *pdu_data,
4320 			   struct node_rx_pdu *node_rx,
4321 			   struct net_buf *buf)
4322 {
4323 	struct bt_hci_evt_le_cis_req *sep;
4324 	struct node_rx_conn_iso_req *req;
4325 	void *node;
4326 
4327 	/* Check for pdu field being aligned before accessing CIS established
4328 	 * event.
4329 	 */
4330 	node = pdu_data;
4331 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4332 
4333 	req = node;
4334 	if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS)) ||
4335 	    !(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4336 	    !(le_event_mask & BT_EVT_MASK_LE_CIS_REQ)) {
4337 		ll_cis_reject(req->cis_handle, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE);
4338 		return;
4339 	}
4340 
4341 	sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_REQ, sizeof(*sep));
4342 	sep->acl_handle = sys_cpu_to_le16(node_rx->hdr.handle);
4343 	sep->cis_handle = sys_cpu_to_le16(req->cis_handle);
4344 	sep->cig_id = req->cig_id;
4345 	sep->cis_id = req->cis_id;
4346 }
4347 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4348 
4349 #if defined(CONFIG_BT_CTLR_CONN_ISO)
4350 static void le_cis_established(struct pdu_data *pdu_data,
4351 			       struct node_rx_pdu *node_rx,
4352 			       struct net_buf *buf)
4353 {
4354 	struct lll_conn_iso_stream_rxtx *lll_cis_c;
4355 	struct lll_conn_iso_stream_rxtx *lll_cis_p;
4356 	struct bt_hci_evt_le_cis_established *sep;
4357 	struct lll_conn_iso_stream *lll_cis;
4358 	struct node_rx_conn_iso_estab *est;
4359 	struct ll_conn_iso_stream *cis;
4360 	struct ll_conn_iso_group *cig;
4361 	bool is_central;
4362 	void *node;
4363 
4364 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4365 	    !(le_event_mask & BT_EVT_MASK_LE_CIS_ESTABLISHED)) {
4366 		return;
4367 	}
4368 
4369 	cis = node_rx->rx_ftr.param;
4370 	cig = cis->group;
4371 
4372 	sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_ESTABLISHED, sizeof(*sep));
4373 
4374 	/* Check for pdu field being aligned before accessing CIS established
4375 	 * event.
4376 	 */
4377 	node = pdu_data;
4378 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4379 
4380 	est = node;
4381 	sep->status = est->status;
4382 	sep->conn_handle = sys_cpu_to_le16(est->cis_handle);
4383 
4384 	if (!cig) {
4385 		/* CIS was not established and instance was released */
4386 		return;
4387 	}
4388 
4389 	lll_cis = &cis->lll;
4390 	is_central = cig->lll.role == BT_CONN_ROLE_CENTRAL;
4391 	lll_cis_c = is_central ? &lll_cis->tx : &lll_cis->rx;
4392 	lll_cis_p = is_central ? &lll_cis->rx : &lll_cis->tx;
4393 
4394 	sys_put_le24(cig->sync_delay, sep->cig_sync_delay);
4395 	sys_put_le24(cis->sync_delay, sep->cis_sync_delay);
4396 	sys_put_le24(cig->c_latency, sep->c_latency);
4397 	sys_put_le24(cig->p_latency, sep->p_latency);
4398 	sep->c_phy = find_lsb_set(lll_cis_c->phy);
4399 	sep->p_phy = find_lsb_set(lll_cis_p->phy);
4400 	sep->nse = lll_cis->nse;
4401 	sep->c_bn = lll_cis_c->bn;
4402 	sep->p_bn = lll_cis_p->bn;
4403 	sep->c_ft = lll_cis_c->ft;
4404 	sep->p_ft = lll_cis_p->ft;
4405 	sep->c_max_pdu = sys_cpu_to_le16(lll_cis_c->max_pdu);
4406 	sep->p_max_pdu = sys_cpu_to_le16(lll_cis_p->max_pdu);
4407 	sep->interval = sys_cpu_to_le16(cig->iso_interval);
4408 
4409 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4410 	if (is_central) {
4411 		cis_pending_count--;
4412 	}
4413 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4414 }
4415 #endif /* CONFIG_BT_CTLR_CONN_ISO */
4416 
4417 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
4418 static void le_per_adv_sync_transfer_received(struct pdu_data *pdu_data_rx,
4419 					      struct node_rx_pdu *node_rx, struct net_buf *buf)
4420 {
4421 	struct bt_hci_evt_le_past_received *sep;
4422 	struct node_rx_past_received *se;
4423 	struct ll_sync_set *sync;
4424 	void *node;
4425 
4426 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4427 	    !(le_event_mask & BT_EVT_MASK_LE_PAST_RECEIVED)) {
4428 		return;
4429 	}
4430 
4431 	sep = meta_evt(buf, BT_HCI_EVT_LE_PAST_RECEIVED, sizeof(*sep));
4432 
4433 	/* Check for pdu field being aligned before accessing PAST received
4434 	 * event.
4435 	 */
4436 	node = pdu_data_rx;
4437 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_past_received));
4438 
4439 	se = node;
4440 	sep->status = se->rx_sync.status;
4441 
4442 	sync = node_rx->rx_ftr.param;
4443 
4444 	/* Resolved address, if private, has been populated in ULL */
4445 	sep->addr.type = sync->peer_id_addr_type;
4446 	if (sync->peer_addr_resolved) {
4447 		/* Mark it as identity address from RPA (0x02, 0x03) */
4448 		MARK_AS_IDENTITY_ADDR(sep->addr.type);
4449 	}
4450 	(void)memcpy(sep->addr.a.val, sync->peer_id_addr, BDADDR_SIZE);
4451 
4452 	sep->adv_sid = sync->sid;
4453 	sep->phy = find_lsb_set(se->rx_sync.phy);
4454 	sep->interval = sys_cpu_to_le16(se->rx_sync.interval);
4455 	sep->clock_accuracy = se->rx_sync.sca;
4456 	sep->conn_handle = sys_cpu_to_le16(se->conn_handle);
4457 	sep->service_data = sys_cpu_to_le16(se->service_data);
4458 	sep->sync_handle = sys_cpu_to_le16(node_rx->hdr.handle);
4459 }
4460 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
4461 
4462 static int controller_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
4463 				 struct net_buf **evt, void **node_rx)
4464 {
4465 	switch (ocf) {
4466 	case BT_OCF(BT_HCI_OP_LE_SET_EVENT_MASK):
4467 		le_set_event_mask(cmd, evt);
4468 		break;
4469 
4470 	case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE):
4471 		le_read_buffer_size(cmd, evt);
4472 		break;
4473 
4474 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4475 	case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2):
4476 		le_read_buffer_size_v2(cmd, evt);
4477 		break;
4478 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4479 
4480 	case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_FEATURES):
4481 		le_read_local_features(cmd, evt);
4482 		break;
4483 
4484 	case BT_OCF(BT_HCI_OP_LE_SET_RANDOM_ADDRESS):
4485 		le_set_random_address(cmd, evt);
4486 		break;
4487 
4488 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
4489 	case BT_OCF(BT_HCI_OP_LE_READ_FAL_SIZE):
4490 		le_read_fal_size(cmd, evt);
4491 		break;
4492 
4493 	case BT_OCF(BT_HCI_OP_LE_CLEAR_FAL):
4494 		le_clear_fal(cmd, evt);
4495 		break;
4496 
4497 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_FAL):
4498 		le_add_dev_to_fal(cmd, evt);
4499 		break;
4500 
4501 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_FAL):
4502 		le_rem_dev_from_fal(cmd, evt);
4503 		break;
4504 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
4505 
4506 #if defined(CONFIG_BT_CTLR_CRYPTO)
4507 	case BT_OCF(BT_HCI_OP_LE_ENCRYPT):
4508 		le_encrypt(cmd, evt);
4509 		break;
4510 #endif /* CONFIG_BT_CTLR_CRYPTO */
4511 
4512 	case BT_OCF(BT_HCI_OP_LE_RAND):
4513 		le_rand(cmd, evt);
4514 		break;
4515 
4516 	case BT_OCF(BT_HCI_OP_LE_READ_SUPP_STATES):
4517 		le_read_supp_states(cmd, evt);
4518 		break;
4519 
4520 #if defined(CONFIG_BT_BROADCASTER)
4521 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_PARAM):
4522 		le_set_adv_param(cmd, evt);
4523 		break;
4524 
4525 	case BT_OCF(BT_HCI_OP_LE_READ_ADV_CHAN_TX_POWER):
4526 		le_read_adv_chan_tx_power(cmd, evt);
4527 		break;
4528 
4529 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_DATA):
4530 		le_set_adv_data(cmd, evt);
4531 		break;
4532 
4533 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_RSP_DATA):
4534 		le_set_scan_rsp_data(cmd, evt);
4535 		break;
4536 
4537 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_ENABLE):
4538 		le_set_adv_enable(cmd, evt);
4539 		break;
4540 
4541 #if defined(CONFIG_BT_CTLR_ADV_ISO)
4542 	case BT_OCF(BT_HCI_OP_LE_CREATE_BIG):
4543 		le_create_big(cmd, evt);
4544 		break;
4545 
4546 	case BT_OCF(BT_HCI_OP_LE_CREATE_BIG_TEST):
4547 		le_create_big_test(cmd, evt);
4548 		break;
4549 
4550 	case BT_OCF(BT_HCI_OP_LE_TERMINATE_BIG):
4551 		le_terminate_big(cmd, evt);
4552 		break;
4553 #endif /* CONFIG_BT_CTLR_ADV_ISO */
4554 #endif /* CONFIG_BT_BROADCASTER */
4555 
4556 #if defined(CONFIG_BT_OBSERVER)
4557 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_PARAM):
4558 		le_set_scan_param(cmd, evt);
4559 		break;
4560 
4561 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_ENABLE):
4562 		le_set_scan_enable(cmd, evt);
4563 		break;
4564 
4565 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
4566 	case BT_OCF(BT_HCI_OP_LE_BIG_CREATE_SYNC):
4567 		le_big_create_sync(cmd, evt);
4568 		break;
4569 
4570 	case BT_OCF(BT_HCI_OP_LE_BIG_TERMINATE_SYNC):
4571 		le_big_terminate_sync(cmd, evt, node_rx);
4572 		break;
4573 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
4574 #endif /* CONFIG_BT_OBSERVER */
4575 
4576 #if defined(CONFIG_BT_CENTRAL)
4577 	case BT_OCF(BT_HCI_OP_LE_CREATE_CONN):
4578 		le_create_connection(cmd, evt);
4579 		break;
4580 
4581 	case BT_OCF(BT_HCI_OP_LE_CREATE_CONN_CANCEL):
4582 		le_create_conn_cancel(cmd, evt, node_rx);
4583 		break;
4584 
4585 	case BT_OCF(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF):
4586 		le_set_host_chan_classif(cmd, evt);
4587 		break;
4588 
4589 #if defined(CONFIG_BT_CTLR_LE_ENC)
4590 	case BT_OCF(BT_HCI_OP_LE_START_ENCRYPTION):
4591 		le_start_encryption(cmd, evt);
4592 		break;
4593 #endif /* CONFIG_BT_CTLR_LE_ENC */
4594 
4595 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4596 	case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS):
4597 		le_set_cig_parameters(cmd, evt);
4598 		break;
4599 	case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS_TEST):
4600 		le_set_cig_params_test(cmd, evt);
4601 		break;
4602 	case BT_OCF(BT_HCI_OP_LE_CREATE_CIS):
4603 		le_create_cis(cmd, evt);
4604 		break;
4605 	case BT_OCF(BT_HCI_OP_LE_REMOVE_CIG):
4606 		le_remove_cig(cmd, evt);
4607 		break;
4608 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4609 #endif /* CONFIG_BT_CENTRAL */
4610 
4611 #if defined(CONFIG_BT_PERIPHERAL)
4612 #if defined(CONFIG_BT_CTLR_LE_ENC)
4613 	case BT_OCF(BT_HCI_OP_LE_LTK_REQ_REPLY):
4614 		le_ltk_req_reply(cmd, evt);
4615 		break;
4616 
4617 	case BT_OCF(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY):
4618 		le_ltk_req_neg_reply(cmd, evt);
4619 		break;
4620 #endif /* CONFIG_BT_CTLR_LE_ENC */
4621 
4622 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4623 	case BT_OCF(BT_HCI_OP_LE_ACCEPT_CIS):
4624 		le_accept_cis(cmd, evt);
4625 		break;
4626 	case BT_OCF(BT_HCI_OP_LE_REJECT_CIS):
4627 		le_reject_cis(cmd, evt);
4628 		break;
4629 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4630 #endif /* CONFIG_BT_PERIPHERAL */
4631 
4632 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
4633 	case BT_OCF(BT_HCI_OP_LE_REQ_PEER_SC):
4634 		le_req_peer_sca(cmd, evt);
4635 		break;
4636 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
4637 
4638 #if defined(CONFIG_BT_CTLR_ISO)
4639 	case BT_OCF(BT_HCI_OP_LE_SETUP_ISO_PATH):
4640 		le_setup_iso_path(cmd, evt);
4641 		break;
4642 	case BT_OCF(BT_HCI_OP_LE_REMOVE_ISO_PATH):
4643 		le_remove_iso_path(cmd, evt);
4644 		break;
4645 	case BT_OCF(BT_HCI_OP_LE_ISO_TEST_END):
4646 		le_iso_test_end(cmd, evt);
4647 		break;
4648 #endif /* CONFIG_BT_CTLR_ISO */
4649 
4650 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4651 	case BT_OCF(BT_HCI_OP_LE_ISO_TRANSMIT_TEST):
4652 		le_iso_transmit_test(cmd, evt);
4653 		break;
4654 	case BT_OCF(BT_HCI_OP_LE_READ_ISO_TX_SYNC):
4655 		le_read_iso_tx_sync(cmd, evt);
4656 		break;
4657 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4658 
4659 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4660 	case BT_OCF(BT_HCI_OP_LE_ISO_RECEIVE_TEST):
4661 		le_iso_receive_test(cmd, evt);
4662 		break;
4663 	case BT_OCF(BT_HCI_OP_LE_ISO_READ_TEST_COUNTERS):
4664 		le_iso_read_test_counters(cmd, evt);
4665 		break;
4666 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
4667 	case BT_OCF(BT_HCI_OP_LE_READ_ISO_LINK_QUALITY):
4668 		le_read_iso_link_quality(cmd, evt);
4669 		break;
4670 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
4671 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
4672 
4673 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
4674 	case BT_OCF(BT_HCI_OP_LE_SET_HOST_FEATURE):
4675 		le_set_host_feature(cmd, evt);
4676 		break;
4677 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
4678 
4679 #if defined(CONFIG_BT_CONN)
4680 	case BT_OCF(BT_HCI_OP_LE_READ_CHAN_MAP):
4681 		le_read_chan_map(cmd, evt);
4682 		break;
4683 
4684 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
4685 	case BT_OCF(BT_HCI_OP_LE_READ_REMOTE_FEATURES):
4686 		le_read_remote_features(cmd, evt);
4687 		break;
4688 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
4689 
4690 	case BT_OCF(BT_HCI_OP_LE_CONN_UPDATE):
4691 		le_conn_update(cmd, evt);
4692 		break;
4693 
4694 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
4695 	case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY):
4696 		le_conn_param_req_reply(cmd, evt);
4697 		break;
4698 
4699 	case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY):
4700 		le_conn_param_req_neg_reply(cmd, evt);
4701 		break;
4702 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
4703 
4704 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4705 	case BT_OCF(BT_HCI_OP_LE_SET_DATA_LEN):
4706 		le_set_data_len(cmd, evt);
4707 		break;
4708 
4709 	case BT_OCF(BT_HCI_OP_LE_READ_DEFAULT_DATA_LEN):
4710 		le_read_default_data_len(cmd, evt);
4711 		break;
4712 
4713 	case BT_OCF(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN):
4714 		le_write_default_data_len(cmd, evt);
4715 		break;
4716 
4717 	case BT_OCF(BT_HCI_OP_LE_READ_MAX_DATA_LEN):
4718 		le_read_max_data_len(cmd, evt);
4719 		break;
4720 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4721 
4722 #if defined(CONFIG_BT_CTLR_PHY)
4723 	case BT_OCF(BT_HCI_OP_LE_READ_PHY):
4724 		le_read_phy(cmd, evt);
4725 		break;
4726 
4727 	case BT_OCF(BT_HCI_OP_LE_SET_DEFAULT_PHY):
4728 		le_set_default_phy(cmd, evt);
4729 		break;
4730 
4731 	case BT_OCF(BT_HCI_OP_LE_SET_PHY):
4732 		le_set_phy(cmd, evt);
4733 		break;
4734 #endif /* CONFIG_BT_CTLR_PHY */
4735 #endif /* CONFIG_BT_CONN */
4736 
4737 #if defined(CONFIG_BT_CTLR_ADV_EXT)
4738 #if defined(CONFIG_BT_BROADCASTER)
4739 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_SET_RANDOM_ADDR):
4740 		le_set_adv_set_random_addr(cmd, evt);
4741 		break;
4742 
4743 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_PARAM):
4744 		le_set_ext_adv_param(cmd, evt);
4745 		break;
4746 
4747 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_DATA):
4748 		le_set_ext_adv_data(cmd, evt);
4749 		break;
4750 
4751 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_RSP_DATA):
4752 		le_set_ext_scan_rsp_data(cmd, evt);
4753 		break;
4754 
4755 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_ENABLE):
4756 		le_set_ext_adv_enable(cmd, evt);
4757 		break;
4758 
4759 	case BT_OCF(BT_HCI_OP_LE_READ_MAX_ADV_DATA_LEN):
4760 		le_read_max_adv_data_len(cmd, evt);
4761 		break;
4762 
4763 	case BT_OCF(BT_HCI_OP_LE_READ_NUM_ADV_SETS):
4764 		le_read_num_adv_sets(cmd, evt);
4765 		break;
4766 
4767 	case BT_OCF(BT_HCI_OP_LE_REMOVE_ADV_SET):
4768 		le_remove_adv_set(cmd, evt);
4769 		break;
4770 
4771 	case BT_OCF(BT_HCI_OP_CLEAR_ADV_SETS):
4772 		le_clear_adv_sets(cmd, evt);
4773 		break;
4774 
4775 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
4776 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_PARAM):
4777 		le_set_per_adv_param(cmd, evt);
4778 		break;
4779 
4780 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_DATA):
4781 		le_set_per_adv_data(cmd, evt);
4782 		break;
4783 
4784 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_ENABLE):
4785 		le_set_per_adv_enable(cmd, evt);
4786 		break;
4787 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
4788 #endif /* CONFIG_BT_BROADCASTER */
4789 
4790 #if defined(CONFIG_BT_OBSERVER)
4791 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_PARAM):
4792 		le_set_ext_scan_param(cmd, evt);
4793 		break;
4794 
4795 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_ENABLE):
4796 		le_set_ext_scan_enable(cmd, evt);
4797 		break;
4798 
4799 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
4800 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC):
4801 		le_per_adv_create_sync(cmd, evt);
4802 		break;
4803 
4804 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC_CANCEL):
4805 		le_per_adv_create_sync_cancel(cmd, evt, node_rx);
4806 		break;
4807 
4808 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_TERMINATE_SYNC):
4809 		le_per_adv_terminate_sync(cmd, evt);
4810 		break;
4811 
4812 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_RECV_ENABLE):
4813 		le_per_adv_recv_enable(cmd, evt);
4814 		break;
4815 
4816 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4817 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_PER_ADV_LIST):
4818 		le_add_dev_to_pal(cmd, evt);
4819 		break;
4820 
4821 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_PER_ADV_LIST):
4822 		le_rem_dev_from_pal(cmd, evt);
4823 		break;
4824 
4825 	case BT_OCF(BT_HCI_OP_LE_CLEAR_PER_ADV_LIST):
4826 		le_clear_pal(cmd, evt);
4827 		break;
4828 
4829 	case BT_OCF(BT_HCI_OP_LE_READ_PER_ADV_LIST_SIZE):
4830 		le_read_pal_size(cmd, evt);
4831 		break;
4832 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4833 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4834 #endif /* CONFIG_BT_OBSERVER */
4835 
4836 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
4837 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_SYNC_TRANSFER):
4838 		le_per_adv_sync_transfer(cmd, evt);
4839 		break;
4840 
4841 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_SET_INFO_TRANSFER):
4842 		le_per_adv_set_info_transfer(cmd, evt);
4843 		break;
4844 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
4845 
4846 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
4847 	case BT_OCF(BT_HCI_OP_LE_PAST_PARAM):
4848 		le_past_param(cmd, evt);
4849 		break;
4850 
4851 	case BT_OCF(BT_HCI_OP_LE_DEFAULT_PAST_PARAM):
4852 		le_default_past_param(cmd, evt);
4853 		break;
4854 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
4855 
4856 #if defined(CONFIG_BT_CONN)
4857 #if defined(CONFIG_BT_CENTRAL)
4858 	case BT_OCF(BT_HCI_OP_LE_EXT_CREATE_CONN):
4859 		le_ext_create_connection(cmd, evt);
4860 		break;
4861 #endif /* CONFIG_BT_CENTRAL */
4862 #endif /* CONFIG_BT_CONN */
4863 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4864 
4865 #if defined(CONFIG_BT_CTLR_PRIVACY)
4866 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_RL):
4867 		le_add_dev_to_rl(cmd, evt);
4868 		break;
4869 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_RL):
4870 		le_rem_dev_from_rl(cmd, evt);
4871 		break;
4872 	case BT_OCF(BT_HCI_OP_LE_CLEAR_RL):
4873 		le_clear_rl(cmd, evt);
4874 		break;
4875 	case BT_OCF(BT_HCI_OP_LE_READ_RL_SIZE):
4876 		le_read_rl_size(cmd, evt);
4877 		break;
4878 	case BT_OCF(BT_HCI_OP_LE_READ_PEER_RPA):
4879 		le_read_peer_rpa(cmd, evt);
4880 		break;
4881 	case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_RPA):
4882 		le_read_local_rpa(cmd, evt);
4883 		break;
4884 	case BT_OCF(BT_HCI_OP_LE_SET_ADDR_RES_ENABLE):
4885 		le_set_addr_res_enable(cmd, evt);
4886 		break;
4887 	case BT_OCF(BT_HCI_OP_LE_SET_RPA_TIMEOUT):
4888 		le_set_rpa_timeout(cmd, evt);
4889 		break;
4890 	case BT_OCF(BT_HCI_OP_LE_SET_PRIVACY_MODE):
4891 		le_set_privacy_mode(cmd, evt);
4892 		break;
4893 #endif /* CONFIG_BT_CTLR_PRIVACY */
4894 
4895 	case BT_OCF(BT_HCI_OP_LE_READ_TX_POWER):
4896 		le_read_tx_power(cmd, evt);
4897 		break;
4898 
4899 #if defined(CONFIG_BT_CTLR_DF)
4900 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
4901 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_PARAMS):
4902 		le_df_set_cl_cte_tx_params(cmd, evt);
4903 		break;
4904 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_ENABLE):
4905 		le_df_set_cl_cte_enable(cmd, evt);
4906 		break;
4907 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
4908 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
4909 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_SAMPLING_ENABLE):
4910 		le_df_set_cl_iq_sampling_enable(cmd, evt);
4911 		break;
4912 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
4913 	case BT_OCF(BT_HCI_OP_LE_READ_ANT_INFO):
4914 		le_df_read_ant_inf(cmd, evt);
4915 		break;
4916 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
4917 	case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_TX_PARAMS):
4918 		le_df_set_conn_cte_tx_params(cmd, evt);
4919 		break;
4920 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
4921 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
4922 	case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_RX_PARAMS):
4923 		le_df_set_conn_cte_rx_params(cmd, evt);
4924 		break;
4925 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
4926 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
4927 	case BT_OCF(BT_HCI_OP_LE_CONN_CTE_REQ_ENABLE):
4928 		le_df_set_conn_cte_req_enable(cmd, evt);
4929 		break;
4930 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
4931 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
4932 	case BT_OCF(BT_HCI_OP_LE_CONN_CTE_RSP_ENABLE):
4933 		le_df_set_conn_cte_rsp_enable(cmd, evt);
4934 		break;
4935 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
4936 #endif /* CONFIG_BT_CTLR_DF */
4937 
4938 #if defined(CONFIG_BT_CTLR_DTM_HCI)
4939 	case BT_OCF(BT_HCI_OP_LE_RX_TEST):
4940 		le_rx_test(cmd, evt);
4941 		break;
4942 	case BT_OCF(BT_HCI_OP_LE_TX_TEST):
4943 		le_tx_test(cmd, evt);
4944 		break;
4945 	case BT_OCF(BT_HCI_OP_LE_TEST_END):
4946 		le_test_end(cmd, evt);
4947 		break;
4948 	case BT_OCF(BT_HCI_OP_LE_ENH_RX_TEST):
4949 		le_enh_rx_test(cmd, evt);
4950 		break;
4951 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
4952 	case BT_OCF(BT_HCI_OP_LE_RX_TEST_V3):
4953 		le_rx_test_v3(cmd, evt);
4954 		break;
4955 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
4956 	case BT_OCF(BT_HCI_OP_LE_ENH_TX_TEST):
4957 		le_enh_tx_test(cmd, evt);
4958 		break;
4959 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
4960 	case BT_OCF(BT_HCI_OP_LE_TX_TEST_V3):
4961 		le_tx_test_v3(cmd, evt);
4962 		break;
4963 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
4964 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
4965 	case BT_OCF(BT_HCI_OP_LE_TX_TEST_V4):
4966 		le_tx_test_v4(cmd, evt);
4967 		break;
4968 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
4969 #endif /* CONFIG_BT_CTLR_DTM_HCI */
4970 
4971 	default:
4972 		return -EINVAL;
4973 	}
4974 
4975 	return 0;
4976 }
4977 
4978 #if defined(CONFIG_BT_HCI_VS)
4979 static void vs_read_version_info(struct net_buf *buf, struct net_buf **evt)
4980 {
4981 	struct bt_hci_rp_vs_read_version_info *rp;
4982 
4983 	rp = hci_cmd_complete(evt, sizeof(*rp));
4984 
4985 	rp->status = 0x00;
4986 	rp->hw_platform = sys_cpu_to_le16(BT_HCI_VS_HW_PLAT);
4987 	rp->hw_variant = sys_cpu_to_le16(BT_HCI_VS_HW_VAR);
4988 
4989 	rp->fw_variant = 0U;
4990 	rp->fw_version = (KERNEL_VERSION_MAJOR & 0xff);
4991 	rp->fw_revision = sys_cpu_to_le16(KERNEL_VERSION_MINOR);
4992 	rp->fw_build = sys_cpu_to_le32(KERNEL_PATCHLEVEL & 0xffff);
4993 }
4994 
4995 static void vs_read_supported_commands(struct net_buf *buf,
4996 				       struct net_buf **evt)
4997 {
4998 	struct bt_hci_rp_vs_read_supported_commands *rp;
4999 
5000 	rp = hci_cmd_complete(evt, sizeof(*rp));
5001 
5002 	rp->status = 0x00;
5003 	(void)memset(&rp->commands[0], 0, sizeof(rp->commands));
5004 
5005 	/* Set Version Information, Supported Commands, Supported Features. */
5006 	rp->commands[0] |= BIT(0) | BIT(1) | BIT(2);
5007 	/* Write BD_ADDR, Read Build Info */
5008 	rp->commands[0] |= BIT(5) | BIT(7);
5009 	/* Read Static Addresses, Read Key Hierarchy Roots */
5010 	rp->commands[1] |= BIT(0) | BIT(1);
5011 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
5012 	/* Set Scan Request Reports */
5013 	rp->commands[1] |= BIT(4);
5014 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
5015 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5016 	/* Write Tx Power, Read Tx Power */
5017 	rp->commands[1] |= BIT(5) | BIT(6);
5018 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5019 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
5020 	/* Read Supported USB Transport Modes */
5021 	rp->commands[1] |= BIT(7);
5022 	/* Set USB Transport Mode */
5023 	rp->commands[2] |= BIT(0);
5024 #endif /* USB_DEVICE_BLUETOOTH_VS_H4 */
5025 }
5026 
5027 static void vs_read_supported_features(struct net_buf *buf,
5028 				       struct net_buf **evt)
5029 {
5030 	struct bt_hci_rp_vs_read_supported_features *rp;
5031 
5032 	rp = hci_cmd_complete(evt, sizeof(*rp));
5033 
5034 	rp->status = 0x00;
5035 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
5036 }
5037 
5038 uint8_t __weak hci_vendor_read_static_addr(struct bt_hci_vs_static_addr addrs[],
5039 					uint8_t size)
5040 {
5041 	ARG_UNUSED(addrs);
5042 	ARG_UNUSED(size);
5043 
5044 	return 0;
5045 }
5046 
5047 static void vs_write_bd_addr(struct net_buf *buf, struct net_buf **evt)
5048 {
5049 	struct bt_hci_cp_vs_write_bd_addr *cmd = (void *)buf->data;
5050 
5051 	ll_addr_set(0, &cmd->bdaddr.val[0]);
5052 
5053 	*evt = cmd_complete_status(0x00);
5054 }
5055 
5056 static void vs_read_build_info(struct net_buf *buf, struct net_buf **evt)
5057 {
5058 	struct bt_hci_rp_vs_read_build_info *rp;
5059 
5060 #define HCI_VS_BUILD_INFO "Zephyr OS v" \
5061 	KERNEL_VERSION_STRING CONFIG_BT_CTLR_HCI_VS_BUILD_INFO
5062 
5063 	const char build_info[] = HCI_VS_BUILD_INFO;
5064 
5065 #define BUILD_INFO_EVT_LEN (sizeof(struct bt_hci_evt_hdr) + \
5066 			    sizeof(struct bt_hci_evt_cmd_complete) + \
5067 			    sizeof(struct bt_hci_rp_vs_read_build_info) + \
5068 			    sizeof(build_info))
5069 
5070 	BUILD_ASSERT(CONFIG_BT_BUF_EVT_RX_SIZE >= BUILD_INFO_EVT_LEN);
5071 
5072 	rp = hci_cmd_complete(evt, sizeof(*rp) + sizeof(build_info));
5073 	rp->status = 0x00;
5074 	memcpy(rp->info, build_info, sizeof(build_info));
5075 }
5076 
5077 void __weak hci_vendor_read_key_hierarchy_roots(uint8_t ir[16], uint8_t er[16])
5078 {
5079 	/* Mark IR as invalid */
5080 	(void)memset(ir, 0x00, 16);
5081 
5082 	/* Mark ER as invalid */
5083 	(void)memset(er, 0x00, 16);
5084 }
5085 
5086 static void vs_read_static_addrs(struct net_buf *buf, struct net_buf **evt)
5087 {
5088 	struct bt_hci_rp_vs_read_static_addrs *rp;
5089 
5090 	rp = hci_cmd_complete(evt, sizeof(*rp) +
5091 				   sizeof(struct bt_hci_vs_static_addr));
5092 	rp->status = 0x00;
5093 	rp->num_addrs = hci_vendor_read_static_addr(rp->a, 1);
5094 }
5095 
5096 static void vs_read_key_hierarchy_roots(struct net_buf *buf,
5097 					struct net_buf **evt)
5098 {
5099 	struct bt_hci_rp_vs_read_key_hierarchy_roots *rp;
5100 
5101 	rp = hci_cmd_complete(evt, sizeof(*rp));
5102 	rp->status = 0x00;
5103 	hci_vendor_read_key_hierarchy_roots(rp->ir, rp->er);
5104 }
5105 
5106 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
5107 static void vs_set_min_used_chans(struct net_buf *buf, struct net_buf **evt)
5108 {
5109 	struct bt_hci_cp_vs_set_min_num_used_chans *cmd = (void *)buf->data;
5110 	uint16_t handle = sys_le16_to_cpu(cmd->handle);
5111 	uint8_t status;
5112 
5113 	status = ll_set_min_used_chans(handle, cmd->phys, cmd->min_used_chans);
5114 
5115 	*evt = cmd_complete_status(status);
5116 }
5117 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
5118 
5119 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
5120 static void vs_set_scan_req_reports(struct net_buf *buf, struct net_buf **evt)
5121 {
5122 	struct bt_hci_cp_vs_set_scan_req_reports *cmd = (void *)buf->data;
5123 
5124 	if (cmd->enable) {
5125 		vs_events_mask |= BT_EVT_MASK_VS_SCAN_REQ_RX;
5126 	} else {
5127 		vs_events_mask &= ~BT_EVT_MASK_VS_SCAN_REQ_RX;
5128 	}
5129 	*evt = cmd_complete_status(0x00);
5130 }
5131 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
5132 
5133 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5134 static void vs_write_tx_power_level(struct net_buf *buf, struct net_buf **evt)
5135 {
5136 	struct bt_hci_cp_vs_write_tx_power_level *cmd = (void *)buf->data;
5137 	struct bt_hci_rp_vs_write_tx_power_level *rp;
5138 	uint8_t handle_type;
5139 	uint16_t handle;
5140 	uint8_t status;
5141 
5142 	handle_type = cmd->handle_type;
5143 	handle = sys_le16_to_cpu(cmd->handle);
5144 
5145 	rp = hci_cmd_complete(evt, sizeof(*rp));
5146 	rp->selected_tx_power = cmd->tx_power_level;
5147 
5148 	status = ll_tx_pwr_lvl_set(handle_type, handle, &rp->selected_tx_power);
5149 
5150 	rp->status = status;
5151 	rp->handle_type = handle_type;
5152 	rp->handle = sys_cpu_to_le16(handle);
5153 }
5154 
5155 static void vs_read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
5156 {
5157 	struct bt_hci_cp_vs_read_tx_power_level *cmd = (void *)buf->data;
5158 	struct bt_hci_rp_vs_read_tx_power_level *rp;
5159 	uint8_t handle_type;
5160 	uint16_t handle;
5161 	uint8_t status;
5162 
5163 	handle_type = cmd->handle_type;
5164 	handle = sys_le16_to_cpu(cmd->handle);
5165 
5166 	rp = hci_cmd_complete(evt, sizeof(*rp));
5167 
5168 	status = ll_tx_pwr_lvl_get(handle_type, handle, 0, &rp->tx_power_level);
5169 
5170 	rp->status = status;
5171 	rp->handle_type = handle_type;
5172 	rp->handle = sys_cpu_to_le16(handle);
5173 }
5174 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5175 
5176 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
5177 /* A memory pool for vandor specific events for fatal error reporting purposes. */
5178 NET_BUF_POOL_FIXED_DEFINE(vs_err_tx_pool, 1, BT_BUF_EVT_RX_SIZE,
5179 			  sizeof(struct bt_buf_data), NULL);
5180 
5181 /* The alias for convenience of Controller HCI implementation. Controller is build for
5182  * a particular architecture hence the alias will allow to avoid conditional compilation.
5183  * Host may be not aware of hardware architecture the Controller is working on, hence
5184  * all CPU data types for supported architectures should be available during build, hence
5185  * the alias is defined here.
5186  */
5187 #if defined(CONFIG_CPU_CORTEX_M)
5188 typedef struct bt_hci_vs_fata_error_cpu_data_cortex_m bt_hci_vs_fatal_error_cpu_data;
5189 
5190 static void vs_err_fatal_cpu_data_fill(bt_hci_vs_fatal_error_cpu_data *cpu_data,
5191 				       const struct arch_esf *esf)
5192 {
5193 	cpu_data->a1 = sys_cpu_to_le32(esf->basic.a1);
5194 	cpu_data->a2 = sys_cpu_to_le32(esf->basic.a2);
5195 	cpu_data->a3 = sys_cpu_to_le32(esf->basic.a3);
5196 	cpu_data->a4 = sys_cpu_to_le32(esf->basic.a4);
5197 	cpu_data->ip = sys_cpu_to_le32(esf->basic.ip);
5198 	cpu_data->lr = sys_cpu_to_le32(esf->basic.lr);
5199 	cpu_data->xpsr = sys_cpu_to_le32(esf->basic.xpsr);
5200 }
5201 #endif /* CONFIG_CPU_CORTEX_M */
5202 
5203 static struct net_buf *vs_err_evt_create(uint8_t subevt, uint8_t len)
5204 {
5205 	struct net_buf *buf;
5206 
5207 	buf = net_buf_alloc(&vs_err_tx_pool, K_FOREVER);
5208 	if (buf) {
5209 		struct bt_hci_evt_le_meta_event *me;
5210 		struct bt_hci_evt_hdr *hdr;
5211 
5212 		net_buf_reserve(buf, BT_BUF_RESERVE);
5213 		bt_buf_set_type(buf, BT_BUF_EVT);
5214 
5215 		hdr = net_buf_add(buf, sizeof(*hdr));
5216 		hdr->evt = BT_HCI_EVT_VENDOR;
5217 		hdr->len = len + sizeof(*me);
5218 
5219 		me = net_buf_add(buf, sizeof(*me));
5220 		me->subevent = subevt;
5221 	}
5222 
5223 	return buf;
5224 }
5225 
5226 struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const struct arch_esf *esf)
5227 {
5228 	/* Prepare vendor specific HCI Fatal Error event */
5229 	struct bt_hci_vs_fatal_error_stack_frame *sf;
5230 	bt_hci_vs_fatal_error_cpu_data *cpu_data;
5231 	struct net_buf *buf;
5232 
5233 	buf = vs_err_evt_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_STACK_FRAME,
5234 				sizeof(*sf) + sizeof(*cpu_data));
5235 	if (buf != NULL) {
5236 		sf = net_buf_add(buf, (sizeof(*sf) + sizeof(*cpu_data)));
5237 		sf->reason = sys_cpu_to_le32(reason);
5238 		sf->cpu_type = BT_HCI_EVT_VS_ERROR_CPU_TYPE_CORTEX_M;
5239 
5240 		vs_err_fatal_cpu_data_fill(
5241 			(bt_hci_vs_fatal_error_cpu_data *)sf->cpu_data, esf);
5242 	} else {
5243 		LOG_ERR("Can't create HCI Fatal Error event");
5244 	}
5245 
5246 	return buf;
5247 }
5248 
5249 static struct net_buf *hci_vs_err_trace_create(uint8_t data_type,
5250 					       const char *file_path,
5251 					       uint32_t line, uint64_t pc)
5252 {
5253 	uint32_t file_name_len = 0U, pos = 0U;
5254 	struct net_buf *buf = NULL;
5255 
5256 	if (file_path) {
5257 		/* Extract file name from a path */
5258 		while (file_path[file_name_len] != '\0') {
5259 			if (file_path[file_name_len] == '/') {
5260 				pos = file_name_len + 1;
5261 			}
5262 			file_name_len++;
5263 		}
5264 		file_path += pos;
5265 		file_name_len -= pos;
5266 
5267 		/* If file name was found in file_path, in other words: file_path is not empty
5268 		 * string and is not `foo/bar/`.
5269 		 */
5270 		if (file_name_len) {
5271 			/* Total data length: len = file name strlen + \0 + sizeof(line number)
5272 			 * Maximum length of an HCI event data is BT_BUF_EVT_RX_SIZE. If total data
5273 			 * length exceeds this maximum, truncate file name.
5274 			 */
5275 			uint32_t data_len = 1 + sizeof(line);
5276 
5277 			/* If a buffer is created for a TRACE data, include sizeof(pc) in total
5278 			 * length.
5279 			 */
5280 			if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5281 				data_len += sizeof(pc);
5282 			}
5283 
5284 			if (data_len + file_name_len > BT_BUF_EVT_RX_SIZE) {
5285 				uint32_t overflow_len =
5286 					file_name_len + data_len - BT_BUF_EVT_RX_SIZE;
5287 
5288 				/* Truncate the file name length by number of overflow bytes */
5289 				file_name_len -= overflow_len;
5290 			}
5291 
5292 			/* Get total event data length including file name length */
5293 			data_len += file_name_len;
5294 
5295 			/* Prepare vendor specific HCI Fatal Error event */
5296 			buf = vs_err_evt_create(data_type, data_len);
5297 			if (buf != NULL) {
5298 				if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5299 					net_buf_add_le64(buf, pc);
5300 				}
5301 				net_buf_add_mem(buf, file_path, file_name_len);
5302 				net_buf_add_u8(buf, STR_NULL_TERMINATOR);
5303 				net_buf_add_le32(buf, line);
5304 			} else {
5305 				LOG_ERR("Can't create HCI Fatal Error event");
5306 			}
5307 		}
5308 	}
5309 
5310 	return buf;
5311 }
5312 
5313 struct net_buf *hci_vs_err_trace(const char *file, uint32_t line, uint64_t pc)
5314 {
5315 	return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE, file, line, pc);
5316 }
5317 
5318 struct net_buf *hci_vs_err_assert(const char *file, uint32_t line)
5319 {
5320 	/* ASSERT data does not contain PC counter, because of that zero constant is used */
5321 	return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_CTRL_ASSERT, file, line, 0U);
5322 }
5323 #endif /* CONFIG_BT_HCI_VS_FATAL_ERROR */
5324 
5325 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
5326 static void vs_le_df_connectionless_iq_report(struct pdu_data *pdu_rx, struct node_rx_pdu *node_rx,
5327 					      struct net_buf *buf)
5328 {
5329 	struct bt_hci_evt_vs_le_connectionless_iq_report *sep;
5330 	struct node_rx_iq_report *iq_report;
5331 	struct lll_sync *lll;
5332 	uint8_t samples_cnt;
5333 	int16_t rssi;
5334 	uint16_t sync_handle;
5335 	uint16_t per_evt_counter;
5336 	struct ll_sync_set *sync = NULL;
5337 
5338 	iq_report = (struct node_rx_iq_report *)node_rx;
5339 
5340 	if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTIONLESS_IQ_REPORT)) {
5341 		return;
5342 	}
5343 
5344 	lll = iq_report->rx.rx_ftr.param;
5345 
5346 	sync = HDR_LLL2ULL(lll);
5347 
5348 	/* TX LL thread has higher priority than RX thread. It may happen that
5349 	 * host successfully disables CTE sampling in the meantime.
5350 	 * It should be verified here, to avoid reporting IQ samples after
5351 	 * the functionality was disabled or if sync was lost.
5352 	 */
5353 	if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) || !sync->timeout_reload) {
5354 		/* Drop further processing of the event. */
5355 		return;
5356 	}
5357 
5358 	/* Get the sync handle corresponding to the LLL context passed in the
5359 	 * node rx footer field.
5360 	 */
5361 	sync_handle = ull_sync_handle_get(sync);
5362 	per_evt_counter = iq_report->event_counter;
5363 
5364 	/* If packet status does not indicate insufficient resources for IQ samples and for
5365 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5366 	 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE
5367 	 * value.
5368 	 */
5369 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5370 		samples_cnt = 0U;
5371 	} else {
5372 		samples_cnt = MAX(1, iq_report->sample_count);
5373 	}
5374 
5375 	sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
5376 		       (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5377 
5378 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
5379 
5380 	sep->sync_handle = sys_cpu_to_le16(sync_handle);
5381 	sep->rssi = sys_cpu_to_le16(rssi);
5382 	sep->rssi_ant_id = iq_report->rssi_ant_id;
5383 	sep->cte_type = iq_report->cte_info.type;
5384 
5385 	sep->chan_idx = iq_report->chan_idx;
5386 	sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
5387 
5388 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5389 		sep->slot_durations = iq_report->local_slot_durations;
5390 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5391 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5392 	} else {
5393 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5394 	}
5395 
5396 	sep->packet_status = iq_report->packet_status;
5397 
5398 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5399 		if (iq_report->sample_count == 0U) {
5400 			sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5401 			sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5402 		} else {
5403 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5404 				sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5405 				sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5406 			}
5407 		}
5408 	}
5409 
5410 	sep->sample_count = samples_cnt;
5411 }
5412 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
5413 
5414 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
5415 static void vs_le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
5416 {
5417 	struct bt_hci_evt_vs_le_connection_iq_report *sep;
5418 	struct node_rx_iq_report *iq_report;
5419 	struct lll_conn *lll;
5420 	uint8_t samples_cnt;
5421 	uint8_t phy_rx;
5422 	int16_t rssi;
5423 
5424 	iq_report = (struct node_rx_iq_report *)node_rx;
5425 
5426 	if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTION_IQ_REPORT)) {
5427 		return;
5428 	}
5429 
5430 	lll = iq_report->rx.rx_ftr.param;
5431 
5432 #if defined(CONFIG_BT_CTLR_PHY)
5433 	phy_rx = lll->phy_rx;
5434 
5435 	/* Make sure the report is generated for connection on PHY UNCODED */
5436 	LL_ASSERT(phy_rx != PHY_CODED);
5437 #else
5438 	phy_rx = PHY_1M;
5439 #endif /* CONFIG_BT_CTLR_PHY */
5440 
5441 	/* TX LL thread has higher priority than RX thread. It may happen that host succefully
5442 	 * disables CTE sampling in the meantime. It should be verified here, to avoid reporting
5443 	 * IQ samples after the functionality was disabled.
5444 	 */
5445 	if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
5446 		/* Dropp further processing of the event. */
5447 		return;
5448 	}
5449 
5450 	/* If packet status does not indicate insufficient resources for IQ samples and for
5451 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5452 	 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE value.
5453 	 */
5454 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5455 		samples_cnt = 0U;
5456 	} else {
5457 		samples_cnt = MAX(1, iq_report->sample_count);
5458 	}
5459 
5460 	sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT,
5461 			(sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5462 
5463 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
5464 
5465 	sep->conn_handle = sys_cpu_to_le16(iq_report->rx.hdr.handle);
5466 	sep->rx_phy = phy_rx;
5467 	sep->rssi = sys_cpu_to_le16(rssi);
5468 	sep->rssi_ant_id = iq_report->rssi_ant_id;
5469 	sep->cte_type = iq_report->cte_info.type;
5470 
5471 	sep->data_chan_idx = iq_report->chan_idx;
5472 	sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
5473 
5474 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5475 		sep->slot_durations = iq_report->local_slot_durations;
5476 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5477 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5478 	} else {
5479 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5480 	}
5481 
5482 	sep->packet_status = iq_report->packet_status;
5483 
5484 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5485 		if (iq_report->sample_count == 0U) {
5486 			sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5487 			sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5488 		} else {
5489 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5490 				sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5491 				sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5492 			}
5493 		}
5494 	}
5495 
5496 	sep->sample_count = samples_cnt;
5497 }
5498 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
5499 
5500 #if defined(CONFIG_BT_HCI_MESH_EXT)
5501 static void mesh_get_opts(struct net_buf *buf, struct net_buf **evt)
5502 {
5503 	struct bt_hci_rp_mesh_get_opts *rp;
5504 
5505 	rp = hci_cmd_complete(evt, sizeof(*rp));
5506 
5507 	rp->status = 0x00;
5508 	rp->opcode = BT_HCI_OC_MESH_GET_OPTS;
5509 
5510 	rp->revision = BT_HCI_MESH_REVISION;
5511 	rp->ch_map = 0x7;
5512 	/*@todo: nRF51 only */
5513 	rp->min_tx_power = -30;
5514 	/*@todo: nRF51 only */
5515 	rp->max_tx_power = 4;
5516 	rp->max_scan_filter = CONFIG_BT_CTLR_MESH_SCAN_FILTERS;
5517 	rp->max_filter_pattern = CONFIG_BT_CTLR_MESH_SF_PATTERNS;
5518 	rp->max_adv_slot = 1U;
5519 	rp->evt_prefix_len = 0x01;
5520 	rp->evt_prefix = BT_HCI_MESH_EVT_PREFIX;
5521 }
5522 
5523 static void mesh_set_scan_filter(struct net_buf *buf, struct net_buf **evt)
5524 {
5525 	struct bt_hci_cp_mesh_set_scan_filter *cmd = (void *)buf->data;
5526 	struct bt_hci_rp_mesh_set_scan_filter *rp;
5527 	uint8_t filter = cmd->scan_filter - 1;
5528 	struct scan_filter *f;
5529 	uint8_t status = 0x00;
5530 	uint8_t i;
5531 
5532 	if (filter > ARRAY_SIZE(scan_filters) ||
5533 	    cmd->num_patterns > CONFIG_BT_CTLR_MESH_SF_PATTERNS) {
5534 		status = BT_HCI_ERR_INVALID_PARAM;
5535 		goto exit;
5536 	}
5537 
5538 	if (filter == sf_curr) {
5539 		status = BT_HCI_ERR_CMD_DISALLOWED;
5540 		goto exit;
5541 	}
5542 
5543 	/* duplicate filtering not supported yet */
5544 	if (cmd->filter_dup) {
5545 		status = BT_HCI_ERR_INVALID_PARAM;
5546 		goto exit;
5547 	}
5548 
5549 	f = &scan_filters[filter];
5550 	for (i = 0U; i < cmd->num_patterns; i++) {
5551 		if (!cmd->patterns[i].pattern_len ||
5552 		    cmd->patterns[i].pattern_len >
5553 		    BT_HCI_MESH_PATTERN_LEN_MAX) {
5554 			status = BT_HCI_ERR_INVALID_PARAM;
5555 			goto exit;
5556 		}
5557 		f->lengths[i] = cmd->patterns[i].pattern_len;
5558 		memcpy(f->patterns[i], cmd->patterns[i].pattern, f->lengths[i]);
5559 	}
5560 
5561 	f->count = cmd->num_patterns;
5562 
5563 exit:
5564 	rp = hci_cmd_complete(evt, sizeof(*rp));
5565 	rp->status = status;
5566 	rp->opcode = BT_HCI_OC_MESH_SET_SCAN_FILTER;
5567 	rp->scan_filter = filter + 1;
5568 }
5569 
5570 static void mesh_advertise(struct net_buf *buf, struct net_buf **evt)
5571 {
5572 	struct bt_hci_cp_mesh_advertise *cmd = (void *)buf->data;
5573 	struct bt_hci_rp_mesh_advertise *rp;
5574 	uint8_t adv_slot = cmd->adv_slot;
5575 	uint8_t status;
5576 
5577 	status = ll_mesh_advertise(adv_slot,
5578 				   cmd->own_addr_type, cmd->random_addr.val,
5579 				   cmd->ch_map, cmd->tx_power,
5580 				   cmd->min_tx_delay, cmd->max_tx_delay,
5581 				   cmd->retx_count, cmd->retx_interval,
5582 				   cmd->scan_duration, cmd->scan_delay,
5583 				   cmd->scan_filter, cmd->data_len, cmd->data);
5584 	if (!status) {
5585 		/* Yields 0xFF if no scan filter selected */
5586 		sf_curr = cmd->scan_filter - 1;
5587 	}
5588 
5589 	rp = hci_cmd_complete(evt, sizeof(*rp));
5590 	rp->status = status;
5591 	rp->opcode = BT_HCI_OC_MESH_ADVERTISE;
5592 	rp->adv_slot = adv_slot;
5593 }
5594 
5595 static void mesh_advertise_cancel(struct net_buf *buf, struct net_buf **evt)
5596 {
5597 	struct bt_hci_cp_mesh_advertise_cancel *cmd = (void *)buf->data;
5598 	struct bt_hci_rp_mesh_advertise_cancel *rp;
5599 	uint8_t adv_slot = cmd->adv_slot;
5600 	uint8_t status;
5601 
5602 	status = ll_mesh_advertise_cancel(adv_slot);
5603 	if (!status) {
5604 		/* Yields 0xFF if no scan filter selected */
5605 		sf_curr = 0xFF;
5606 	}
5607 
5608 	rp = hci_cmd_complete(evt, sizeof(*rp));
5609 	rp->status = status;
5610 	rp->opcode = BT_HCI_OC_MESH_ADVERTISE_CANCEL;
5611 	rp->adv_slot = adv_slot;
5612 }
5613 
5614 static int mesh_cmd_handle(struct net_buf *cmd, struct net_buf **evt)
5615 {
5616 	struct bt_hci_cp_mesh *cp_mesh;
5617 	uint8_t mesh_op;
5618 
5619 	if (cmd->len < sizeof(*cp_mesh)) {
5620 		LOG_ERR("No HCI VSD Command header");
5621 		return -EINVAL;
5622 	}
5623 
5624 	cp_mesh = net_buf_pull_mem(cmd, sizeof(*cp_mesh));
5625 	mesh_op = cp_mesh->opcode;
5626 
5627 	switch (mesh_op) {
5628 	case BT_HCI_OC_MESH_GET_OPTS:
5629 		mesh_get_opts(cmd, evt);
5630 		break;
5631 
5632 	case BT_HCI_OC_MESH_SET_SCAN_FILTER:
5633 		mesh_set_scan_filter(cmd, evt);
5634 		break;
5635 
5636 	case BT_HCI_OC_MESH_ADVERTISE:
5637 		mesh_advertise(cmd, evt);
5638 		break;
5639 
5640 	case BT_HCI_OC_MESH_ADVERTISE_CANCEL:
5641 		mesh_advertise_cancel(cmd, evt);
5642 		break;
5643 
5644 	default:
5645 		return -EINVAL;
5646 	}
5647 
5648 	return 0;
5649 }
5650 #endif /* CONFIG_BT_HCI_MESH_EXT */
5651 
5652 int hci_vendor_cmd_handle_common(uint16_t ocf, struct net_buf *cmd,
5653 				 struct net_buf **evt)
5654 {
5655 	switch (ocf) {
5656 	case BT_OCF(BT_HCI_OP_VS_READ_VERSION_INFO):
5657 		vs_read_version_info(cmd, evt);
5658 		break;
5659 
5660 	case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS):
5661 		vs_read_supported_commands(cmd, evt);
5662 		break;
5663 
5664 	case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES):
5665 		vs_read_supported_features(cmd, evt);
5666 		break;
5667 
5668 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
5669 	case BT_OCF(BT_HCI_OP_VS_READ_USB_TRANSPORT_MODE):
5670 		break;
5671 	case BT_OCF(BT_HCI_OP_VS_SET_USB_TRANSPORT_MODE):
5672 		reset(cmd, evt);
5673 		break;
5674 #endif /* CONFIG_USB_DEVICE_BLUETOOTH_VS_H4 */
5675 
5676 	case BT_OCF(BT_HCI_OP_VS_READ_BUILD_INFO):
5677 		vs_read_build_info(cmd, evt);
5678 		break;
5679 
5680 	case BT_OCF(BT_HCI_OP_VS_WRITE_BD_ADDR):
5681 		vs_write_bd_addr(cmd, evt);
5682 		break;
5683 
5684 	case BT_OCF(BT_HCI_OP_VS_READ_STATIC_ADDRS):
5685 		vs_read_static_addrs(cmd, evt);
5686 		break;
5687 
5688 	case BT_OCF(BT_HCI_OP_VS_READ_KEY_HIERARCHY_ROOTS):
5689 		vs_read_key_hierarchy_roots(cmd, evt);
5690 		break;
5691 
5692 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
5693 	case BT_OCF(BT_HCI_OP_VS_SET_SCAN_REQ_REPORTS):
5694 		vs_set_scan_req_reports(cmd, evt);
5695 		break;
5696 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
5697 
5698 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5699 	case BT_OCF(BT_HCI_OP_VS_WRITE_TX_POWER_LEVEL):
5700 		vs_write_tx_power_level(cmd, evt);
5701 		break;
5702 
5703 	case BT_OCF(BT_HCI_OP_VS_READ_TX_POWER_LEVEL):
5704 		vs_read_tx_power_level(cmd, evt);
5705 		break;
5706 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5707 
5708 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
5709 	case BT_OCF(BT_HCI_OP_VS_SET_MIN_NUM_USED_CHANS):
5710 		vs_set_min_used_chans(cmd, evt);
5711 		break;
5712 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
5713 
5714 #if defined(CONFIG_BT_HCI_MESH_EXT)
5715 	case BT_OCF(BT_HCI_OP_VS_MESH):
5716 		mesh_cmd_handle(cmd, evt);
5717 		break;
5718 #endif /* CONFIG_BT_HCI_MESH_EXT */
5719 
5720 	default:
5721 		return -EINVAL;
5722 	}
5723 
5724 	return 0;
5725 }
5726 #endif
5727 
5728 struct net_buf *hci_cmd_handle(struct net_buf *cmd, void **node_rx)
5729 {
5730 	struct bt_hci_cmd_hdr *chdr;
5731 	struct net_buf *evt = NULL;
5732 	uint16_t ocf;
5733 	int err;
5734 
5735 	if (cmd->len < sizeof(*chdr)) {
5736 		LOG_ERR("No HCI Command header");
5737 		return NULL;
5738 	}
5739 
5740 	chdr = net_buf_pull_mem(cmd, sizeof(*chdr));
5741 	if (cmd->len < chdr->param_len) {
5742 		LOG_ERR("Invalid HCI CMD packet length");
5743 		return NULL;
5744 	}
5745 
5746 	/* store in a global for later CC/CS event creation */
5747 	_opcode = sys_le16_to_cpu(chdr->opcode);
5748 
5749 	ocf = BT_OCF(_opcode);
5750 
5751 	switch (BT_OGF(_opcode)) {
5752 	case BT_OGF_LINK_CTRL:
5753 		err = link_control_cmd_handle(ocf, cmd, &evt);
5754 		break;
5755 	case BT_OGF_BASEBAND:
5756 		err = ctrl_bb_cmd_handle(ocf, cmd, &evt);
5757 		break;
5758 	case BT_OGF_INFO:
5759 		err = info_cmd_handle(ocf, cmd, &evt);
5760 		break;
5761 	case BT_OGF_STATUS:
5762 		err = status_cmd_handle(ocf, cmd, &evt);
5763 		break;
5764 	case BT_OGF_LE:
5765 		err = controller_cmd_handle(ocf, cmd, &evt, node_rx);
5766 		break;
5767 #if defined(CONFIG_BT_HCI_VS)
5768 	case BT_OGF_VS:
5769 		err = hci_vendor_cmd_handle(ocf, cmd, &evt);
5770 		break;
5771 #endif
5772 	default:
5773 		err = -EINVAL;
5774 		break;
5775 	}
5776 
5777 	if (err == -EINVAL) {
5778 		evt = cmd_status(BT_HCI_ERR_UNKNOWN_CMD);
5779 	}
5780 
5781 	return evt;
5782 }
5783 
5784 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
5785 	defined(CONFIG_BT_CTLR_CONN_ISO)
5786 static void data_buf_overflow(struct net_buf **buf, uint8_t link_type)
5787 {
5788 	struct bt_hci_evt_data_buf_overflow *ep;
5789 
5790 	if (!(event_mask & BT_EVT_MASK_DATA_BUFFER_OVERFLOW)) {
5791 		return;
5792 	}
5793 
5794 	*buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
5795 	hci_evt_create(*buf, BT_HCI_EVT_DATA_BUF_OVERFLOW, sizeof(*ep));
5796 	ep = net_buf_add(*buf, sizeof(*ep));
5797 
5798 	ep->link_type = link_type;
5799 }
5800 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_SYNC_ISO ||
5801 	* CONFIG_BT_CTLR_CONN_ISO
5802 	*/
5803 
5804 #if defined(CONFIG_BT_CONN)
5805 int hci_acl_handle(struct net_buf *buf, struct net_buf **evt)
5806 {
5807 	struct node_tx *node_tx;
5808 	struct bt_hci_acl_hdr *acl;
5809 	struct pdu_data *pdu_data;
5810 	uint16_t handle;
5811 	uint8_t flags;
5812 	uint16_t len;
5813 
5814 	*evt = NULL;
5815 
5816 	if (buf->len < sizeof(*acl)) {
5817 		LOG_ERR("No HCI ACL header");
5818 		return -EINVAL;
5819 	}
5820 
5821 	acl = net_buf_pull_mem(buf, sizeof(*acl));
5822 	len = sys_le16_to_cpu(acl->len);
5823 	handle = sys_le16_to_cpu(acl->handle);
5824 
5825 	if (buf->len < len) {
5826 		LOG_ERR("Invalid HCI ACL packet length");
5827 		return -EINVAL;
5828 	}
5829 
5830 	if (len > LL_LENGTH_OCTETS_TX_MAX) {
5831 		LOG_ERR("Invalid HCI ACL Data length");
5832 		return -EINVAL;
5833 	}
5834 
5835 	/* assigning flags first because handle will be overwritten */
5836 	flags = bt_acl_flags(handle);
5837 	handle = bt_acl_handle(handle);
5838 
5839 	node_tx = ll_tx_mem_acquire();
5840 	if (!node_tx) {
5841 		LOG_ERR("Tx Buffer Overflow");
5842 		data_buf_overflow(evt, BT_OVERFLOW_LINK_ACL);
5843 		return -ENOBUFS;
5844 	}
5845 
5846 	pdu_data = (void *)node_tx->pdu;
5847 
5848 	if (bt_acl_flags_bc(flags) != BT_ACL_POINT_TO_POINT) {
5849 		return -EINVAL;
5850 	}
5851 
5852 	switch (bt_acl_flags_pb(flags)) {
5853 	case BT_ACL_START_NO_FLUSH:
5854 		pdu_data->ll_id = PDU_DATA_LLID_DATA_START;
5855 		break;
5856 	case BT_ACL_CONT:
5857 		pdu_data->ll_id = PDU_DATA_LLID_DATA_CONTINUE;
5858 		break;
5859 	default:
5860 		/* BT_ACL_START and BT_ACL_COMPLETE not allowed on LE-U
5861 		 * from Host to Controller
5862 		 */
5863 		return -EINVAL;
5864 	}
5865 
5866 	pdu_data->len = len;
5867 	memcpy(&pdu_data->lldata[0], buf->data, len);
5868 
5869 	if (ll_tx_mem_enqueue(handle, node_tx)) {
5870 		LOG_ERR("Invalid Tx Enqueue");
5871 		ll_tx_mem_release(node_tx);
5872 		return -EINVAL;
5873 	}
5874 
5875 	return 0;
5876 }
5877 #endif /* CONFIG_BT_CONN */
5878 
5879 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
5880 int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
5881 {
5882 	struct bt_hci_iso_sdu_hdr *iso_sdu_hdr;
5883 	struct isoal_sdu_tx sdu_frag_tx;
5884 	struct bt_hci_iso_hdr *iso_hdr;
5885 	uint32_t *time_stamp;
5886 	uint16_t handle;
5887 	uint8_t pb_flag;
5888 	uint8_t ts_flag;
5889 	uint8_t flags;
5890 	uint16_t len;
5891 
5892 	iso_sdu_hdr = NULL;
5893 	*evt  = NULL;
5894 
5895 	if (buf->len < sizeof(*iso_hdr)) {
5896 		LOG_ERR("No HCI ISO header");
5897 		return -EINVAL;
5898 	}
5899 
5900 	iso_hdr = net_buf_pull_mem(buf, sizeof(*iso_hdr));
5901 	handle = sys_le16_to_cpu(iso_hdr->handle);
5902 	len = bt_iso_hdr_len(sys_le16_to_cpu(iso_hdr->len));
5903 
5904 	if (buf->len < len) {
5905 		LOG_ERR("Invalid HCI ISO packet length");
5906 		return -EINVAL;
5907 	}
5908 
5909 	/* Assigning flags first because handle will be overwritten */
5910 	flags = bt_iso_flags(handle);
5911 	pb_flag = bt_iso_flags_pb(flags);
5912 	ts_flag = bt_iso_flags_ts(flags);
5913 	handle = bt_iso_handle(handle);
5914 
5915 	/* Extract time stamp */
5916 	/* Set default to current time
5917 	 * BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
5918 	 * 3.1 Time_Offset in framed PDUs :
5919 	 * The Controller transmitting a SDU may use any of the following
5920 	 * methods to determine the value of the SDU reference time:
5921 	 * -- A captured time stamp of the SDU
5922 	 * -- A time stamp provided by the higher layer
5923 	 * -- A computed time stamp based on a sequence counter provided by the
5924 	 *    higher layer
5925 	 * -- Any other method of determining Time_Offset
5926 	 *    (Uses a timestamp computed from the difference in provided
5927 	 *    timestamps, if the timestamp is deemed not based on the
5928 	 *    controller's clock)
5929 	 */
5930 	sdu_frag_tx.cntr_time_stamp = HAL_TICKER_TICKS_TO_US(ticker_ticks_now_get());
5931 	if (ts_flag) {
5932 		/* Use HCI provided time stamp */
5933 		time_stamp = net_buf_pull_mem(buf, sizeof(*time_stamp));
5934 		len -= sizeof(*time_stamp);
5935 		sdu_frag_tx.time_stamp = sys_le32_to_cpu(*time_stamp);
5936 	} else {
5937 		/* Use controller's capture time */
5938 		sdu_frag_tx.time_stamp = sdu_frag_tx.cntr_time_stamp;
5939 	}
5940 
5941 	/* Extract ISO data header if included (PB_Flag 0b00 or 0b10) */
5942 	if ((pb_flag & 0x01) == 0) {
5943 		iso_sdu_hdr = net_buf_pull_mem(buf, sizeof(*iso_sdu_hdr));
5944 		len -= sizeof(*iso_sdu_hdr);
5945 		sdu_frag_tx.packet_sn = sys_le16_to_cpu(iso_sdu_hdr->sn);
5946 		sdu_frag_tx.iso_sdu_length =
5947 			sys_le16_to_cpu(bt_iso_pkt_len(iso_sdu_hdr->slen));
5948 	} else {
5949 		sdu_frag_tx.packet_sn = 0;
5950 		sdu_frag_tx.iso_sdu_length = 0;
5951 	}
5952 
5953 	/* Packet boundary flags should be bitwise identical to the SDU state
5954 	 * 0b00 BT_ISO_START
5955 	 * 0b01 BT_ISO_CONT
5956 	 * 0b10 BT_ISO_SINGLE
5957 	 * 0b11 BT_ISO_END
5958 	 */
5959 	sdu_frag_tx.sdu_state = pb_flag;
5960 	/* Fill in SDU buffer fields */
5961 	sdu_frag_tx.dbuf = buf->data;
5962 	sdu_frag_tx.size = len;
5963 
5964 	if (false) {
5965 
5966 #if defined(CONFIG_BT_CTLR_CONN_ISO)
5967 	/* Extract source handle from CIS or BIS handle by way of header and
5968 	 * data path
5969 	 */
5970 	} else if (IS_CIS_HANDLE(handle)) {
5971 		struct ll_conn_iso_stream *cis;
5972 		struct ll_conn_iso_group *cig;
5973 		struct ll_iso_stream_hdr *hdr;
5974 		struct ll_iso_datapath *dp_in;
5975 		uint8_t event_offset;
5976 
5977 		cis = ll_iso_stream_connected_get(handle);
5978 		if (!cis) {
5979 			return -EINVAL;
5980 		}
5981 
5982 		cig = cis->group;
5983 
5984 		/* We must ensure sufficient time for ISO-AL to fragment SDU and
5985 		 * deliver PDUs to the TX queue. By checking ull_ref_get, we
5986 		 * know if we are within the subevents of an ISO event. If so,
5987 		 * we can assume that we have enough time to deliver in the next
5988 		 * ISO event. If we're not active within the ISO event, we don't
5989 		 * know if there is enough time to deliver in the next event,
5990 		 * and for safety we set the target to current event + 2.
5991 		 *
5992 		 * For FT > 1, we have the opportunity to retransmit in later
5993 		 * event(s), in which case we have the option to target an
5994 		 * earlier event (this or next) because being late does not
5995 		 * instantly flush the payload.
5996 		 */
5997 
5998 		event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
5999 
6000 		if (cis->lll.tx.ft > 1) {
6001 			/* FT > 1, target an earlier event */
6002 			event_offset -= 1;
6003 		}
6004 
6005 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
6006 		uint64_t event_count;
6007 		uint64_t pkt_seq_num;
6008 
6009 		/* Catch up local pkt_seq_num with internal pkt_seq_num */
6010 		event_count = cis->lll.event_count + event_offset;
6011 		pkt_seq_num = event_count + 1U;
6012 
6013 		/* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
6014 		 * then we simply check that the pb_flag is an even value, and
6015 		 * then  pkt_seq_num is a future sequence number value compare
6016 		 * to last recorded number in cis->pkt_seq_num.
6017 		 *
6018 		 * When (pkt_seq_num - stream->pkt_seq_num) is negative then
6019 		 * BIT64(39) will be set (2's compliment value). The diff value
6020 		 * less than or equal to BIT64_MASK(38) means the diff value is
6021 		 * positive and hence pkt_seq_num is greater than
6022 		 * stream->pkt_seq_num. This calculation is valid for when value
6023 		 * rollover too.
6024 		 */
6025 		if (!(pb_flag & 0x01) &&
6026 		    (((pkt_seq_num - cis->pkt_seq_num) &
6027 		      BIT64_MASK(39)) <= BIT64_MASK(38))) {
6028 			cis->pkt_seq_num = pkt_seq_num;
6029 		} else {
6030 			pkt_seq_num = cis->pkt_seq_num;
6031 		}
6032 
6033 		/* Pre-increment, when pg_flag is BT_ISO_SINGLE (0b10) or
6034 		 * BT_ISO_END (0b11) then we simple check if pb_flag has bit 1
6035 		 * is set, for next ISO data packet seq num comparison.
6036 		 */
6037 		if (pb_flag & 0x10) {
6038 			cis->pkt_seq_num++;
6039 		}
6040 
6041 		/* Target next ISO event to avoid overlapping with, if any,
6042 		 * current ISO event
6043 		 */
6044 		pkt_seq_num++;
6045 		sdu_frag_tx.target_event = pkt_seq_num;
6046 		sdu_frag_tx.grp_ref_point =
6047 			isoal_get_wrapped_time_us(cig->cig_ref_point,
6048 						  ((pkt_seq_num - event_count) *
6049 						   cig->iso_interval *
6050 						   ISO_INT_UNIT_US));
6051 
6052 #else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6053 		sdu_frag_tx.target_event = cis->lll.event_count + event_offset;
6054 		sdu_frag_tx.grp_ref_point =
6055 			isoal_get_wrapped_time_us(cig->cig_ref_point,
6056 						  (event_offset *
6057 						   cig->iso_interval *
6058 						   ISO_INT_UNIT_US));
6059 #endif /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6060 
6061 		/* Get controller's input data path for CIS */
6062 		hdr = &cis->hdr;
6063 		dp_in = hdr->datapath_in;
6064 		if (!dp_in || dp_in->path_id != BT_HCI_DATAPATH_ID_HCI) {
6065 			LOG_ERR("Input data path not set for HCI");
6066 			return -EINVAL;
6067 		}
6068 
6069 		/* Get input data path's source handle */
6070 		isoal_source_handle_t source = dp_in->source_hdl;
6071 
6072 		/* Start Fragmentation */
6073 		isoal_status_t isoal_status =
6074 			isoal_tx_sdu_fragment(source, &sdu_frag_tx);
6075 
6076 		if (isoal_status) {
6077 			if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
6078 				data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
6079 				return -ENOBUFS;
6080 			}
6081 
6082 			return -EINVAL;
6083 		}
6084 
6085 		/* TODO: Assign *evt if an immediate response is required */
6086 		return 0;
6087 #endif /* CONFIG_BT_CTLR_CONN_ISO */
6088 
6089 #if defined(CONFIG_BT_CTLR_ADV_ISO)
6090 	} else if (IS_ADV_ISO_HANDLE(handle)) {
6091 		struct lll_adv_iso_stream *stream;
6092 		struct ll_adv_iso_set *adv_iso;
6093 		struct lll_adv_iso *lll_iso;
6094 		uint16_t latency_prepare;
6095 		uint16_t stream_handle;
6096 		uint64_t target_event;
6097 		uint8_t event_offset;
6098 
6099 		/* Get BIS stream handle and stream context */
6100 		stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
6101 		stream = ull_adv_iso_stream_get(stream_handle);
6102 		if (!stream || !stream->dp) {
6103 			LOG_ERR("Invalid BIS stream");
6104 			return -EINVAL;
6105 		}
6106 
6107 		adv_iso = ull_adv_iso_by_stream_get(stream_handle);
6108 		if (!adv_iso) {
6109 			LOG_ERR("No BIG associated with stream handle");
6110 			return -EINVAL;
6111 		}
6112 
6113 		lll_iso = &adv_iso->lll;
6114 
6115 		/* Determine the target event and the first event offset after
6116 		 * datapath setup.
6117 		 * event_offset mitigates the possibility of first SDU being
6118 		 * late on the datapath and avoid all subsequent SDUs being
6119 		 * dropped for a said SDU interval. i.e. upper layer is not
6120 		 * drifting, say first SDU dropped, hence subsequent SDUs all
6121 		 * dropped, is mitigated by offsetting the grp_ref_point.
6122 		 *
6123 		 * It is ok to do the below for every received ISO data, ISOAL
6124 		 * will not consider subsequent skewed target_event after the
6125 		 * first use of target_event value.
6126 		 *
6127 		 * In BIG implementation in LLL, payload_count corresponds to
6128 		 * the next BIG event, hence calculate grp_ref_point for next
6129 		 * BIG event by incrementing the previous elapsed big_ref_point
6130 		 * by one additional ISO interval.
6131 		 */
6132 		target_event = lll_iso->payload_count / lll_iso->bn;
6133 		latency_prepare = lll_iso->latency_prepare;
6134 		if (latency_prepare) {
6135 			/* big_ref_point has been updated, but payload_count
6136 			 * hasn't been updated yet - increment target_event to
6137 			 * compensate
6138 			 */
6139 			target_event += latency_prepare;
6140 		}
6141 		event_offset = ull_ref_get(&adv_iso->ull) ? 0U : 1U;
6142 
6143 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
6144 		uint64_t event_count;
6145 		uint64_t pkt_seq_num;
6146 
6147 		/* Catch up local pkt_seq_num with internal pkt_seq_num */
6148 		event_count = target_event + event_offset;
6149 		pkt_seq_num = event_count + 1U;
6150 
6151 		/* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
6152 		 * then we simply check that the pb_flag is an even value, and
6153 		 * then  pkt_seq_num is a future sequence number value compare
6154 		 * to last recorded number in cis->pkt_seq_num.
6155 		 *
6156 		 * When (pkt_seq_num - stream->pkt_seq_num) is negative then
6157 		 * BIT64(39) will be set (2's compliment value). The diff value
6158 		 * less than or equal to BIT64_MASK(38) means the diff value is
6159 		 * positive and hence pkt_seq_num is greater than
6160 		 * stream->pkt_seq_num. This calculation is valid for when value
6161 		 * rollover too.
6162 		 */
6163 		if (!(pb_flag & 0x01) &&
6164 		    (((pkt_seq_num - stream->pkt_seq_num) &
6165 		      BIT64_MASK(39)) <= BIT64_MASK(38))) {
6166 			stream->pkt_seq_num = pkt_seq_num;
6167 		} else {
6168 			pkt_seq_num = stream->pkt_seq_num;
6169 		}
6170 
6171 		/* Pre-increment, when pg_flag is BT_ISO_SINGLE (0b10) or
6172 		 * BT_ISO_END (0b11) then we simple check if pb_flag has bit 1
6173 		 * is set, for next ISO data packet seq num comparison.
6174 		 */
6175 		if (pb_flag & 0x10) {
6176 			stream->pkt_seq_num++;
6177 		}
6178 
6179 		/* Target next ISO event to avoid overlapping with, if any,
6180 		 * current ISO event
6181 		 */
6182 		/* FIXME: Implement ISO Tx ack generation early in done compared
6183 		 *        to currently only in prepare. I.e. to ensure upper
6184 		 *        layer has the number of completed packet before the
6185 		 *        next BIG event, so as to supply new ISO data packets.
6186 		 *        Without which upper layers need extra buffers to
6187 		 *        buffer next ISO data packet.
6188 		 *
6189 		 *        Enable below increment once early Tx ack is
6190 		 *        implemented.
6191 		 *
6192 		 * pkt_seq_num++;
6193 		 */
6194 		sdu_frag_tx.target_event = pkt_seq_num;
6195 		sdu_frag_tx.grp_ref_point =
6196 			isoal_get_wrapped_time_us(adv_iso->big_ref_point,
6197 						  (((pkt_seq_num + 1U) -
6198 						    event_count) *
6199 						   lll_iso->iso_interval *
6200 						   ISO_INT_UNIT_US));
6201 
6202 #else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6203 		sdu_frag_tx.target_event = target_event + event_offset;
6204 		sdu_frag_tx.grp_ref_point =
6205 			isoal_get_wrapped_time_us(adv_iso->big_ref_point,
6206 						  ((event_offset + 1U) *
6207 						   lll_iso->iso_interval *
6208 						   ISO_INT_UNIT_US));
6209 #endif /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6210 
6211 		/* Start Fragmentation */
6212 		/* FIXME: need to ensure ISO-AL returns proper isoal_status.
6213 		 * Currently there are cases where ISO-AL calls LL_ASSERT.
6214 		 */
6215 		isoal_status_t isoal_status =
6216 			isoal_tx_sdu_fragment(stream->dp->source_hdl, &sdu_frag_tx);
6217 
6218 		if (isoal_status) {
6219 			if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
6220 				data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
6221 				return -ENOBUFS;
6222 			}
6223 
6224 			return -EINVAL;
6225 		}
6226 
6227 		return 0;
6228 #endif /* CONFIG_BT_CTLR_ADV_ISO */
6229 
6230 	}
6231 
6232 	return -EINVAL;
6233 }
6234 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
6235 
6236 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6237 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6238 static void dup_ext_adv_adi_store(struct dup_ext_adv_mode *dup_mode,
6239 				  const struct pdu_adv_adi *adi,
6240 				  uint8_t data_status)
6241 {
6242 	struct dup_ext_adv_set *adv_set;
6243 
6244 	adv_set = &dup_mode->set[dup_mode->set_curr];
6245 
6246 	adv_set->data_cmplt = (data_status ==
6247 			       BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) ?
6248 			      1U : 0U;
6249 
6250 	if (adi) {
6251 		(void)memcpy(&adv_set->adi, adi, sizeof(*adi));
6252 	} else {
6253 		(void)memset(&adv_set->adi, 0U, sizeof(*adi));
6254 	}
6255 
6256 	if (dup_mode->set_count < CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
6257 		dup_mode->set_count++;
6258 		dup_mode->set_curr = dup_mode->set_count;
6259 	} else {
6260 		dup_mode->set_curr++;
6261 	}
6262 
6263 	if (dup_mode->set_curr == CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
6264 		dup_mode->set_curr = 0U;
6265 	}
6266 }
6267 
6268 static void dup_ext_adv_mode_reset(struct dup_ext_adv_mode *dup_adv_mode)
6269 {
6270 	uint8_t adv_mode;
6271 
6272 	for (adv_mode = 0U; adv_mode < DUP_EXT_ADV_MODE_COUNT;
6273 	     adv_mode++) {
6274 		struct dup_ext_adv_mode *dup_mode;
6275 
6276 		dup_mode = &dup_adv_mode[adv_mode];
6277 		dup_mode->set_count = 0U;
6278 		dup_mode->set_curr = 0U;
6279 	}
6280 }
6281 
6282 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
6283 static void dup_ext_adv_reset(void)
6284 {
6285 	for (int32_t i = 0; i < dup_count; i++) {
6286 		struct dup_entry *dup;
6287 
6288 		dup = &dup_filter[i];
6289 		dup->mask = 0U;
6290 		dup_ext_adv_mode_reset(dup->adv_mode);
6291 	}
6292 }
6293 
6294 static void dup_periodic_adv_reset(uint8_t addr_type, const uint8_t *addr,
6295 				   uint8_t sid)
6296 {
6297 	for (int32_t addr_idx = 0; addr_idx < dup_count; addr_idx++) {
6298 		struct dup_ext_adv_mode *dup_mode;
6299 		struct dup_entry *dup;
6300 
6301 		dup = &dup_filter[addr_idx];
6302 		if (memcmp(addr, dup->addr.a.val, sizeof(bt_addr_t)) ||
6303 		    (addr_type != dup->addr.type)) {
6304 			continue;
6305 		}
6306 
6307 		dup_mode = &dup->adv_mode[DUP_EXT_ADV_MODE_PERIODIC];
6308 		for (uint16_t set_idx = 0; set_idx < dup_mode->set_count;
6309 		     set_idx++) {
6310 			struct dup_ext_adv_set *adv_set;
6311 
6312 			adv_set = &dup_mode->set[set_idx];
6313 			if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != sid) {
6314 				continue;
6315 			}
6316 
6317 			/* reset data complete state */
6318 			adv_set->data_cmplt = 0U;
6319 
6320 			return;
6321 		}
6322 
6323 		return;
6324 	}
6325 }
6326 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
6327 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6328 
6329 static inline bool is_dup_or_update(struct dup_entry *dup, uint8_t adv_type,
6330 				    uint8_t adv_mode,
6331 				    const struct pdu_adv_adi *adi,
6332 				    uint8_t data_status)
6333 {
6334 	if (!(dup->mask & BIT(adv_type))) {
6335 		/* report different adv types */
6336 		dup->mask |= BIT(adv_type);
6337 
6338 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6339 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6340 				      data_status);
6341 
6342 		return false;
6343 	} else if (adv_type != PDU_ADV_TYPE_EXT_IND) {
6344 		/* drop duplicate legacy advertising */
6345 		return true;
6346 	} else if (dup->adv_mode[adv_mode].set_count == 0U) {
6347 		/* report different extended adv mode */
6348 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6349 				      data_status);
6350 		return false;
6351 	} else if (adi) {
6352 		struct dup_ext_adv_mode *dup_mode;
6353 		uint8_t j;
6354 
6355 		dup_mode = &dup->adv_mode[adv_mode];
6356 		for (j = 0; j < dup_mode->set_count; j++) {
6357 			struct dup_ext_adv_set *adv_set;
6358 
6359 			adv_set = &dup_mode->set[j];
6360 			if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != PDU_ADV_ADI_SID_GET(adi)) {
6361 				continue;
6362 			}
6363 
6364 			if (PDU_ADV_ADI_DID_GET(&adv_set->adi) != PDU_ADV_ADI_DID_GET(adi)) {
6365 				/* report different DID */
6366 				adv_set->adi.did_sid_packed[0] = adi->did_sid_packed[0];
6367 				adv_set->adi.did_sid_packed[1] = adi->did_sid_packed[1];
6368 				/* set new data status */
6369 				if (data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
6370 					adv_set->data_cmplt = 1U;
6371 				} else {
6372 					adv_set->data_cmplt = 0U;
6373 				}
6374 
6375 				return false;
6376 			} else if (!adv_set->data_cmplt &&
6377 				   (data_status ==
6378 				    BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE)) {
6379 				/* report data complete */
6380 				adv_set->data_cmplt = 1U;
6381 				return false;
6382 			} else if (!adv_set->data_cmplt) {
6383 				/* report partial and incomplete data */
6384 				return false;
6385 			}
6386 
6387 			return true;
6388 		}
6389 
6390 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6391 				      data_status);
6392 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6393 
6394 		return false;
6395 	}
6396 
6397 	return true;
6398 }
6399 
6400 static bool dup_found(uint8_t adv_type, uint8_t addr_type, const uint8_t *addr,
6401 		      uint8_t adv_mode, const struct pdu_adv_adi *adi,
6402 		      uint8_t data_status)
6403 {
6404 	/* check for duplicate filtering */
6405 	if (dup_count >= 0) {
6406 		struct dup_entry *dup;
6407 
6408 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6409 		__ASSERT((adv_mode < ARRAY_SIZE(dup_filter[0].adv_mode)),
6410 			 "adv_mode index out-of-bound");
6411 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6412 
6413 		/* find for existing entry and update if changed */
6414 		for (int32_t i = 0; i < dup_count; i++) {
6415 			dup = &dup_filter[i];
6416 			if (memcmp(addr, &dup->addr.a.val[0],
6417 				   sizeof(bt_addr_t)) ||
6418 			    (addr_type != dup->addr.type)) {
6419 				continue;
6420 			}
6421 
6422 			/* still duplicate or update entry with change */
6423 			return is_dup_or_update(dup, adv_type, adv_mode, adi,
6424 						data_status);
6425 		}
6426 
6427 		/* insert into the duplicate filter */
6428 		dup = &dup_filter[dup_curr];
6429 		(void)memcpy(&dup->addr.a.val[0], addr, sizeof(bt_addr_t));
6430 		dup->addr.type = addr_type;
6431 		dup->mask = BIT(adv_type);
6432 
6433 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6434 		dup_ext_adv_mode_reset(dup->adv_mode);
6435 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6436 				      data_status);
6437 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6438 
6439 		if (dup_count < CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6440 			dup_count++;
6441 			dup_curr = dup_count;
6442 		} else {
6443 			dup_curr++;
6444 		}
6445 
6446 		if (dup_curr == CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6447 			dup_curr = 0U;
6448 		}
6449 	}
6450 
6451 	return false;
6452 }
6453 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6454 
6455 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6456 static inline void le_dir_adv_report(struct pdu_adv *adv, struct net_buf *buf,
6457 				     int8_t rssi, uint8_t rl_idx)
6458 {
6459 	struct bt_hci_evt_le_direct_adv_report *drp;
6460 	struct bt_hci_evt_le_direct_adv_info *dir_info;
6461 
6462 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6463 	    !(le_event_mask & BT_EVT_MASK_LE_DIRECT_ADV_REPORT)) {
6464 		return;
6465 	}
6466 
6467 	LL_ASSERT(adv->type == PDU_ADV_TYPE_DIRECT_IND);
6468 
6469 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6470 	if (dup_scan &&
6471 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6472 		return;
6473 	}
6474 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6475 
6476 	drp = meta_evt(buf, BT_HCI_EVT_LE_DIRECT_ADV_REPORT,
6477 		       sizeof(*drp) + sizeof(*dir_info));
6478 
6479 	drp->num_reports = 1U;
6480 	dir_info = (void *)(((uint8_t *)drp) + sizeof(*drp));
6481 
6482 	/* Directed Advertising */
6483 	dir_info->evt_type = BT_HCI_ADV_DIRECT_IND;
6484 
6485 #if defined(CONFIG_BT_CTLR_PRIVACY)
6486 	if (rl_idx < ll_rl_size_get()) {
6487 		/* Store identity address */
6488 		ll_rl_id_addr_get(rl_idx, &dir_info->addr.type,
6489 				  &dir_info->addr.a.val[0]);
6490 		/* Mark it as identity address from RPA (0x02, 0x03) */
6491 		MARK_AS_IDENTITY_ADDR(dir_info->addr.type);
6492 	} else {
6493 #else
6494 	if (1) {
6495 #endif /* CONFIG_BT_CTLR_PRIVACY */
6496 		dir_info->addr.type = adv->tx_addr;
6497 		memcpy(&dir_info->addr.a.val[0], &adv->direct_ind.adv_addr[0],
6498 		       sizeof(bt_addr_t));
6499 	}
6500 
6501 	dir_info->dir_addr.type = adv->rx_addr;
6502 	memcpy(&dir_info->dir_addr.a.val[0],
6503 	       &adv->direct_ind.tgt_addr[0], sizeof(bt_addr_t));
6504 
6505 	dir_info->rssi = rssi;
6506 }
6507 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6508 
6509 #if defined(CONFIG_BT_OBSERVER)
6510 #if defined(CONFIG_BT_HCI_MESH_EXT)
6511 static inline bool scan_filter_apply(uint8_t filter, uint8_t *data, uint8_t len)
6512 {
6513 	struct scan_filter *f = &scan_filters[filter];
6514 
6515 	/* No patterns means filter out all advertising packets */
6516 	for (uint8_t i = 0; i < f->count; i++) {
6517 		/* Require at least the length of the pattern */
6518 		if (len >= f->lengths[i] &&
6519 		    !memcmp(data, f->patterns[i], f->lengths[i])) {
6520 			return true;
6521 		}
6522 	}
6523 
6524 	return false;
6525 }
6526 
6527 static inline void le_mesh_scan_report(struct pdu_adv *adv,
6528 				       struct node_rx_pdu *node_rx,
6529 				       struct net_buf *buf, int8_t rssi)
6530 {
6531 	uint8_t data_len = (adv->len - BDADDR_SIZE);
6532 	struct bt_hci_evt_mesh_scanning_report *mep;
6533 	struct bt_hci_evt_mesh_scan_report *sr;
6534 	uint32_t instant;
6535 	uint8_t chan;
6536 
6537 	LL_ASSERT(adv->type == PDU_ADV_TYPE_NONCONN_IND);
6538 
6539 	/* Filter based on currently active Scan Filter */
6540 	if (sf_curr < ARRAY_SIZE(scan_filters) &&
6541 	    !scan_filter_apply(sf_curr, &adv->adv_ind.data[0], data_len)) {
6542 		/* Drop the report */
6543 		return;
6544 	}
6545 
6546 	chan = node_rx->rx_ftr.chan;
6547 	instant = node_rx->rx_ftr.anchor_ticks;
6548 
6549 	mep = mesh_evt(buf, BT_HCI_EVT_MESH_SCANNING_REPORT,
6550 			    sizeof(*mep) + sizeof(*sr));
6551 
6552 	mep->num_reports = 1U;
6553 	sr = (void *)(((uint8_t *)mep) + sizeof(*mep));
6554 	sr->addr.type = adv->tx_addr;
6555 	memcpy(&sr->addr.a.val[0], &adv->adv_ind.addr[0], sizeof(bt_addr_t));
6556 	sr->chan = chan;
6557 	sr->rssi = rssi;
6558 	sys_put_le32(instant, (uint8_t *)&sr->instant);
6559 
6560 	sr->data_len = data_len;
6561 	memcpy(&sr->data[0], &adv->adv_ind.data[0], data_len);
6562 }
6563 #endif /* CONFIG_BT_HCI_MESH_EXT */
6564 
6565 static void le_advertising_report(struct pdu_data *pdu_data,
6566 				  struct node_rx_pdu *node_rx,
6567 				  struct net_buf *buf)
6568 {
6569 	const uint8_t c_adv_type[] = { 0x00, 0x01, 0x03, 0xff, 0x04,
6570 				    0xff, 0x02 };
6571 	struct bt_hci_evt_le_advertising_report *sep;
6572 	struct pdu_adv *adv = (void *)pdu_data;
6573 	struct bt_hci_evt_le_advertising_info *adv_info;
6574 	uint8_t data_len;
6575 	uint8_t info_len;
6576 	int8_t rssi;
6577 #if defined(CONFIG_BT_CTLR_PRIVACY)
6578 	uint8_t rl_idx;
6579 #endif /* CONFIG_BT_CTLR_PRIVACY */
6580 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6581 	uint8_t direct_report;
6582 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6583 	int8_t *prssi;
6584 
6585 	rssi = -(node_rx->rx_ftr.rssi);
6586 #if defined(CONFIG_BT_CTLR_PRIVACY)
6587 	rl_idx = node_rx->rx_ftr.rl_idx;
6588 #endif /* CONFIG_BT_CTLR_PRIVACY */
6589 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6590 	direct_report = node_rx->rx_ftr.direct;
6591 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6592 
6593 #if defined(CONFIG_BT_CTLR_PRIVACY)
6594 	if (adv->tx_addr) {
6595 		/* Update current RPA */
6596 		ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6597 	}
6598 #endif /* CONFIG_BT_CTLR_PRIVACY */
6599 
6600 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6601 	if (direct_report) {
6602 #if defined(CONFIG_BT_CTLR_PRIVACY)
6603 		le_dir_adv_report(adv, buf, rssi, rl_idx);
6604 #else
6605 		le_dir_adv_report(adv, buf, rssi, 0xFF);
6606 #endif /* CONFIG_BT_CTLR_PRIVACY */
6607 		return;
6608 	}
6609 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6610 
6611 #if defined(CONFIG_BT_HCI_MESH_EXT)
6612 	if (node_rx->hdr.type == NODE_RX_TYPE_MESH_REPORT) {
6613 		le_mesh_scan_report(adv, node_rx, buf, rssi);
6614 		return;
6615 	}
6616 #endif /* CONFIG_BT_HCI_MESH_EXT */
6617 
6618 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6619 	    !(le_event_mask & BT_EVT_MASK_LE_ADVERTISING_REPORT)) {
6620 		return;
6621 	}
6622 
6623 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6624 	if (dup_scan &&
6625 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6626 		return;
6627 	}
6628 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6629 
6630 	if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6631 		data_len = (adv->len - BDADDR_SIZE);
6632 	} else {
6633 		data_len = 0U;
6634 	}
6635 	info_len = sizeof(struct bt_hci_evt_le_advertising_info) + data_len +
6636 		   sizeof(*prssi);
6637 	sep = meta_evt(buf, BT_HCI_EVT_LE_ADVERTISING_REPORT,
6638 		       sizeof(*sep) + info_len);
6639 
6640 	sep->num_reports = 1U;
6641 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6642 
6643 	adv_info->evt_type = c_adv_type[adv->type];
6644 
6645 #if defined(CONFIG_BT_CTLR_PRIVACY)
6646 	if (rl_idx < ll_rl_size_get()) {
6647 		/* Store identity address */
6648 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6649 				  &adv_info->addr.a.val[0]);
6650 		/* Mark it as identity address from RPA (0x02, 0x03) */
6651 		MARK_AS_IDENTITY_ADDR(adv_info->addr.type);
6652 	} else {
6653 #else
6654 	if (1) {
6655 #endif /* CONFIG_BT_CTLR_PRIVACY */
6656 
6657 		adv_info->addr.type = adv->tx_addr;
6658 		memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6659 		       sizeof(bt_addr_t));
6660 	}
6661 
6662 	adv_info->length = data_len;
6663 	memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6664 	/* RSSI */
6665 	prssi = &adv_info->data[0] + data_len;
6666 	*prssi = rssi;
6667 }
6668 
6669 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6670 static void le_ext_adv_legacy_report(struct pdu_data *pdu_data,
6671 				     struct node_rx_pdu *node_rx,
6672 				     struct net_buf *buf)
6673 {
6674 	/* Lookup event type based on pdu_adv_type set by LLL */
6675 	const uint8_t evt_type_lookup[] = {
6676 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6677 		 BT_HCI_LE_ADV_EVT_TYPE_CONN),   /* ADV_IND */
6678 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_DIRECT |
6679 		 BT_HCI_LE_ADV_EVT_TYPE_CONN),   /* DIRECT_IND */
6680 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY), /* NONCONN_IND */
6681 		0xff,                            /* Invalid index lookup */
6682 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6683 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6684 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN),   /* SCAN_RSP to an ADV_SCAN_IND
6685 						  */
6686 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6687 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6688 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6689 		 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* SCAN_RSP to an ADV_IND,
6690 						* NOTE: LLL explicitly sets
6691 						* adv_type to
6692 						* PDU_ADV_TYPE_ADV_IND_SCAN_RSP
6693 						*/
6694 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6695 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN)    /* SCAN_IND */
6696 	};
6697 	struct bt_hci_evt_le_ext_advertising_info *adv_info;
6698 	struct bt_hci_evt_le_ext_advertising_report *sep;
6699 	struct pdu_adv *adv = (void *)pdu_data;
6700 	uint8_t data_len;
6701 	uint8_t info_len;
6702 	int8_t rssi;
6703 
6704 #if defined(CONFIG_BT_CTLR_PRIVACY)
6705 	uint8_t rl_idx;
6706 #endif /* CONFIG_BT_CTLR_PRIVACY */
6707 
6708 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6709 	    !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
6710 		return;
6711 	}
6712 
6713 	/* The Link Layer currently returns RSSI as an absolute value */
6714 	rssi = -(node_rx->rx_ftr.rssi);
6715 
6716 #if defined(CONFIG_BT_CTLR_PRIVACY)
6717 	rl_idx = node_rx->rx_ftr.rl_idx;
6718 #endif /* CONFIG_BT_CTLR_PRIVACY */
6719 
6720 #if defined(CONFIG_BT_CTLR_PRIVACY)
6721 	if (adv->tx_addr) {
6722 		/* Update current RPA */
6723 		ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6724 	}
6725 #endif /* CONFIG_BT_CTLR_PRIVACY */
6726 
6727 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6728 	if (dup_scan &&
6729 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6730 		return;
6731 	}
6732 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6733 
6734 	if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6735 		data_len = (adv->len - BDADDR_SIZE);
6736 	} else {
6737 		data_len = 0U;
6738 	}
6739 
6740 	info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6741 		   data_len;
6742 	sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6743 		       sizeof(*sep) + info_len);
6744 
6745 	sep->num_reports = 1U;
6746 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6747 
6748 	adv_info->evt_type = sys_cpu_to_le16((uint16_t)evt_type_lookup[adv->type]);
6749 
6750 #if defined(CONFIG_BT_CTLR_PRIVACY)
6751 	if (rl_idx < ll_rl_size_get()) {
6752 		/* Store identity address */
6753 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6754 				  &adv_info->addr.a.val[0]);
6755 		/* Mark it as identity address from RPA (0x02, 0x03) */
6756 		MARK_AS_IDENTITY_ADDR(adv_info->addr.type);
6757 	} else
6758 #endif /* CONFIG_BT_CTLR_PRIVACY */
6759 	{
6760 		adv_info->addr.type = adv->tx_addr;
6761 		memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6762 		       sizeof(bt_addr_t));
6763 	}
6764 
6765 	adv_info->prim_phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
6766 	adv_info->sec_phy = 0U;
6767 	adv_info->sid = 0xff;
6768 	adv_info->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6769 	adv_info->rssi = rssi;
6770 	adv_info->interval = 0U;
6771 
6772 	if (adv->type == PDU_ADV_TYPE_DIRECT_IND) {
6773 		adv_info->direct_addr.type = adv->rx_addr;
6774 		bt_addr_copy(&adv_info->direct_addr.a,
6775 			     (void *)adv->direct_ind.tgt_addr);
6776 	} else {
6777 		adv_info->direct_addr.type = 0U;
6778 		(void)memset(adv_info->direct_addr.a.val, 0U,
6779 			     sizeof(adv_info->direct_addr.a.val));
6780 	}
6781 
6782 	adv_info->length = data_len;
6783 	memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6784 }
6785 
6786 static uint8_t ext_adv_direct_addr_type(struct lll_scan *lll,
6787 					bool peer_resolved, bool direct_report,
6788 					uint8_t rx_addr_type,
6789 					const uint8_t *const rx_addr)
6790 {
6791 	/* The directed address is resolvable private address, but Controller
6792 	 * could not resolve it.
6793 	 */
6794 	if (direct_report) {
6795 		return BT_ADDR_LE_UNRESOLVED;
6796 	}
6797 
6798 	if (0) {
6799 #if defined(CONFIG_BT_CTLR_PRIVACY)
6800 	/* Peer directed advertiser's address was resolved */
6801 	} else if (peer_resolved) {
6802 		struct ll_scan_set *scan;
6803 
6804 		scan = HDR_LLL2ULL(lll);
6805 		if ((rx_addr_type == lll->init_addr_type) &&
6806 		    !memcmp(lll->init_addr, rx_addr, BDADDR_SIZE)) {
6807 			/* Peer directed advertiser used local scanner's
6808 			 * initiator address.
6809 			 */
6810 			return scan->own_addr_type;
6811 		}
6812 
6813 		/* Peer directed advertiser used directed resolvable
6814 		 * private address generated from the local scanner's
6815 		 * Identity Resolution Key.
6816 		 */
6817 		return scan->own_addr_type | BIT(1);
6818 #endif /* CONFIG_BT_CTLR_PRIVACY */
6819 	} else {
6820 		struct ll_scan_set *scan;
6821 
6822 		scan = HDR_LLL2ULL(lll);
6823 
6824 		/* Peer directed advertiser used local scanner's
6825 		 * initiator address.
6826 		 */
6827 		return scan->own_addr_type;
6828 	}
6829 }
6830 
6831 static uint8_t ext_adv_data_get(const struct node_rx_pdu *node_rx_data,
6832 				uint8_t *const sec_phy, int8_t *const tx_pwr,
6833 				const uint8_t **const data)
6834 {
6835 	const struct pdu_adv *adv = (void *)node_rx_data->pdu;
6836 	const struct pdu_adv_com_ext_adv *p;
6837 	const struct pdu_adv_ext_hdr *h;
6838 	uint8_t hdr_buf_len;
6839 	const uint8_t *ptr;
6840 	uint8_t hdr_len;
6841 
6842 	*tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6843 
6844 	p = (void *)&adv->adv_ext_ind;
6845 	h = (void *)p->ext_hdr_adv_data;
6846 	ptr = (void *)h;
6847 
6848 	if (!p->ext_hdr_len) {
6849 		hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
6850 
6851 		goto no_ext_hdr;
6852 	}
6853 
6854 	ptr = h->data;
6855 
6856 	if (h->adv_addr) {
6857 		ptr += BDADDR_SIZE;
6858 	}
6859 
6860 	if (h->tgt_addr) {
6861 		ptr += BDADDR_SIZE;
6862 	}
6863 
6864 	if (h->adi) {
6865 		ptr += sizeof(struct pdu_adv_adi);
6866 	}
6867 
6868 	if (h->aux_ptr) {
6869 		struct pdu_adv_aux_ptr *aux_ptr;
6870 
6871 		aux_ptr = (void *)ptr;
6872 		ptr += sizeof(*aux_ptr);
6873 
6874 		*sec_phy = HCI_AUX_PHY_TO_HCI_PHY(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
6875 	}
6876 
6877 	if (h->sync_info) {
6878 		ptr += sizeof(struct pdu_adv_sync_info);
6879 	}
6880 
6881 	if (h->tx_pwr) {
6882 		*tx_pwr = *(int8_t *)ptr;
6883 		ptr++;
6884 	}
6885 
6886 	hdr_len = ptr - (uint8_t *)p;
6887 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
6888 	if (hdr_len < hdr_buf_len) {
6889 		uint8_t acad_len = hdr_buf_len - hdr_len;
6890 
6891 		ptr += acad_len;
6892 		hdr_len += acad_len;
6893 	}
6894 
6895 no_ext_hdr:
6896 	if (hdr_len < adv->len) {
6897 		*data = ptr;
6898 
6899 		return adv->len - hdr_len;
6900 	}
6901 
6902 	return 0;
6903 }
6904 
6905 static void node_rx_extra_list_release(struct node_rx_pdu *node_rx_extra)
6906 {
6907 	while (node_rx_extra) {
6908 		struct node_rx_pdu *node_rx_curr;
6909 
6910 		node_rx_curr = node_rx_extra;
6911 		node_rx_extra = node_rx_curr->rx_ftr.extra;
6912 
6913 		node_rx_curr->hdr.next = NULL;
6914 		ll_rx_mem_release((void **)&node_rx_curr);
6915 	}
6916 }
6917 
6918 static void ext_adv_info_fill(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6919 			      uint8_t adv_addr_type, const uint8_t *adv_addr,
6920 			      uint8_t direct_addr_type,
6921 			      const uint8_t *direct_addr, uint8_t rl_idx,
6922 			      int8_t tx_pwr, int8_t rssi,
6923 			      uint16_t interval_le16,
6924 			      const struct pdu_adv_adi *adi, uint8_t data_len,
6925 			      const uint8_t *data, struct net_buf *buf)
6926 {
6927 	struct bt_hci_evt_le_ext_advertising_info *adv_info;
6928 	struct bt_hci_evt_le_ext_advertising_report *sep;
6929 	uint8_t info_len;
6930 
6931 	info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6932 		   data_len;
6933 	sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6934 		       sizeof(*sep) + info_len);
6935 
6936 	sep->num_reports = 1U;
6937 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6938 
6939 	adv_info->evt_type = sys_cpu_to_le16((uint16_t)evt_type);
6940 
6941 	if (0) {
6942 #if defined(CONFIG_BT_CTLR_PRIVACY)
6943 	} else if (rl_idx < ll_rl_size_get()) {
6944 		/* Store identity address */
6945 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6946 				  adv_info->addr.a.val);
6947 		/* Mark it as identity address from RPA (0x02, 0x03) */
6948 		MARK_AS_IDENTITY_ADDR(adv_info->addr.type);
6949 #else /* !CONFIG_BT_CTLR_PRIVACY */
6950 		ARG_UNUSED(rl_idx);
6951 #endif /* !CONFIG_BT_CTLR_PRIVACY */
6952 	} else if (adv_addr) {
6953 		adv_info->addr.type = adv_addr_type;
6954 		(void)memcpy(adv_info->addr.a.val, adv_addr, sizeof(bt_addr_t));
6955 	} else {
6956 		adv_info->addr.type = 0U;
6957 		(void)memset(adv_info->addr.a.val, 0, sizeof(bt_addr_t));
6958 	}
6959 
6960 	adv_info->prim_phy = find_lsb_set(phy);
6961 	adv_info->sec_phy = sec_phy;
6962 	adv_info->sid = (adi) ? PDU_ADV_ADI_SID_GET(adi) : BT_HCI_LE_EXT_ADV_SID_INVALID;
6963 	adv_info->tx_power = tx_pwr;
6964 	adv_info->rssi = rssi;
6965 	adv_info->interval = interval_le16;
6966 
6967 	if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_DIRECT) {
6968 		adv_info->direct_addr.type = direct_addr_type;
6969 		(void)memcpy(adv_info->direct_addr.a.val, direct_addr,
6970 			     sizeof(bt_addr_t));
6971 	} else {
6972 		adv_info->direct_addr.type = 0U;
6973 		(void)memset(adv_info->direct_addr.a.val, 0, sizeof(bt_addr_t));
6974 	}
6975 
6976 	adv_info->length = data_len;
6977 	(void)memcpy(adv_info->data, data, data_len);
6978 }
6979 
6980 static void ext_adv_pdu_frag(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6981 			     uint8_t adv_addr_type, const uint8_t *adv_addr,
6982 			     uint8_t direct_addr_type,
6983 			     const uint8_t *direct_addr, uint8_t rl_idx,
6984 			     int8_t tx_pwr, int8_t rssi, uint16_t interval_le16,
6985 			     const struct pdu_adv_adi *adi,
6986 			     uint8_t data_len_max,
6987 			     uint16_t *const data_len_total,
6988 			     uint8_t *const data_len,
6989 			     const uint8_t **const data, struct net_buf *buf,
6990 			     struct net_buf **const evt_buf)
6991 {
6992 	const uint8_t data_len_frag = MIN(*data_len, data_len_max);
6993 
6994 	do {
6995 		/* Prepare a fragment of PDU data in a HCI event */
6996 		ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type,
6997 				  adv_addr, direct_addr_type, direct_addr,
6998 				  rl_idx, tx_pwr, rssi, interval_le16, adi,
6999 				  data_len_frag, *data, *evt_buf);
7000 
7001 		*data += data_len_frag;
7002 		*data_len -= data_len_frag;
7003 		*data_len_total -= data_len_frag;
7004 
7005 		*evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7006 		LL_ASSERT(*evt_buf);
7007 
7008 		net_buf_frag_add(buf, *evt_buf);
7009 
7010 		/* Continue to fragment until last partial PDU data fragment,
7011 		 * remainder PDU data's HCI event will be prepare by caller.
7012 		 */
7013 	} while (*data_len > data_len_max);
7014 }
7015 
7016 static void ext_adv_data_frag(const struct node_rx_pdu *node_rx_data,
7017 			      uint8_t evt_type, uint8_t phy,
7018 			      uint8_t *const sec_phy, uint8_t adv_addr_type,
7019 			      const uint8_t *adv_addr, uint8_t direct_addr_type,
7020 			      const uint8_t *direct_addr, uint8_t rl_idx,
7021 			      int8_t *const tx_pwr, int8_t rssi,
7022 			      uint16_t interval_le16,
7023 			      const struct pdu_adv_adi *adi,
7024 			      uint8_t data_len_max, uint16_t data_len_total,
7025 			      uint8_t *const data_len,
7026 			      const uint8_t **const data, struct net_buf *buf,
7027 			      struct net_buf **const evt_buf)
7028 {
7029 	evt_type |= (BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL << 5);
7030 
7031 	do {
7032 		/* Fragment the PDU data */
7033 		ext_adv_pdu_frag(evt_type, phy, *sec_phy, adv_addr_type,
7034 				 adv_addr, direct_addr_type, direct_addr,
7035 				 rl_idx, *tx_pwr, rssi, interval_le16, adi,
7036 				 data_len_max, &data_len_total, data_len,
7037 				 data, buf, evt_buf);
7038 
7039 		/* Check if more PDUs in the list */
7040 		node_rx_data = node_rx_data->rx_ftr.extra;
7041 		if (node_rx_data) {
7042 			if (*data_len >= data_len_total) {
7043 				/* Last fragment restricted to maximum scan
7044 				 * data length, caller will prepare the last
7045 				 * HCI fragment event.
7046 				 */
7047 				break;
7048 			} else if (*data_len) {
7049 				/* Last fragment of current PDU data */
7050 				ext_adv_pdu_frag(evt_type, phy, *sec_phy,
7051 						 adv_addr_type, adv_addr,
7052 						 direct_addr_type, direct_addr,
7053 						 rl_idx, *tx_pwr, rssi,
7054 						 interval_le16, adi,
7055 						 data_len_max, &data_len_total,
7056 						 data_len, data, buf, evt_buf);
7057 			}
7058 
7059 			/* Get next PDU data in list */
7060 			*data_len = ext_adv_data_get(node_rx_data, sec_phy,
7061 						     tx_pwr, data);
7062 
7063 			/* Restrict PDU data to maximum scan data length */
7064 			if (*data_len > data_len_total) {
7065 				*data_len = data_len_total;
7066 			}
7067 		}
7068 
7069 		/* Continue to fragment if current PDU data length less than
7070 		 * total data length or current PDU data length greater than
7071 		 * HCI event max length.
7072 		 */
7073 	} while ((*data_len < data_len_total) || (*data_len > data_len_max));
7074 }
7075 
7076 static void le_ext_adv_report(struct pdu_data *pdu_data,
7077 			      struct node_rx_pdu *node_rx,
7078 			      struct net_buf *buf, uint8_t phy)
7079 {
7080 	int8_t scan_rsp_tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7081 	int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7082 	struct node_rx_pdu *node_rx_scan_data = NULL;
7083 	struct node_rx_pdu *node_rx_data = NULL;
7084 	const struct pdu_adv_adi *adi = NULL;
7085 	uint16_t scan_data_len_total = 0U;
7086 	struct node_rx_pdu *node_rx_curr;
7087 	struct node_rx_pdu *node_rx_next;
7088 	const uint8_t *scan_data = NULL;
7089 	uint8_t scan_data_status = 0U;
7090 	uint8_t direct_addr_type = 0U;
7091 	uint16_t data_len_total = 0U;
7092 	uint8_t *direct_addr = NULL;
7093 	uint16_t interval_le16 = 0U;
7094 	const uint8_t *data = NULL;
7095 	uint8_t scan_data_len = 0U;
7096 	uint8_t adv_addr_type = 0U;
7097 	uint8_t sec_phy_scan = 0U;
7098 	uint8_t *adv_addr = NULL;
7099 	uint8_t data_status = 0U;
7100 	struct net_buf *evt_buf;
7101 	bool devmatch = false;
7102 	uint8_t data_len = 0U;
7103 	uint8_t evt_type = 0U;
7104 	uint8_t sec_phy = 0U;
7105 	uint8_t data_len_max;
7106 	uint8_t rl_idx = 0U;
7107 	struct pdu_adv *adv;
7108 	int8_t rssi;
7109 
7110 	/* NOTE: This function uses a lot of initializers before the check and
7111 	 * return below, as an exception to initializing close to their locality
7112 	 * of reference. This is acceptable as the return is unlikely in typical
7113 	 * Controller use.
7114 	 */
7115 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7116 	    !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
7117 		node_rx_extra_list_release(node_rx->rx_ftr.extra);
7118 		return;
7119 	}
7120 
7121 #if defined(CONFIG_BT_CTLR_PRIVACY)
7122 	rl_idx = ll_rl_size_get();
7123 #endif /* CONFIG_BT_CTLR_PRIVACY */
7124 
7125 	adv = (void *)pdu_data;
7126 	node_rx_curr = node_rx;
7127 	node_rx_next = node_rx_curr->rx_ftr.extra;
7128 	do {
7129 		int8_t tx_pwr_curr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7130 		struct pdu_adv_adi *adi_curr = NULL;
7131 		uint8_t direct_addr_type_curr = 0U;
7132 		bool direct_resolved_curr = false;
7133 		uint8_t *direct_addr_curr = NULL;
7134 		uint8_t adv_addr_type_curr = 0U;
7135 		struct pdu_adv_com_ext_adv *p;
7136 		uint8_t *adv_addr_curr = NULL;
7137 		uint8_t data_len_curr = 0U;
7138 		uint8_t *data_curr = NULL;
7139 		struct pdu_adv_ext_hdr *h;
7140 		uint8_t sec_phy_curr = 0U;
7141 		uint8_t evt_type_curr;
7142 		uint8_t hdr_buf_len;
7143 		uint8_t hdr_len;
7144 		uint8_t *ptr;
7145 
7146 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
7147 		bool direct_report_curr = node_rx_curr->rx_ftr.direct;
7148 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
7149 
7150 #if defined(CONFIG_BT_CTLR_PRIVACY)
7151 		uint8_t rl_idx_curr = node_rx_curr->rx_ftr.rl_idx;
7152 
7153 		direct_resolved_curr = node_rx_curr->rx_ftr.direct_resolved;
7154 #endif /* CONFIG_BT_CTLR_PRIVACY */
7155 
7156 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7157 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7158 		const bool devmatch_curr = node_rx_curr->rx_ftr.devmatch;
7159 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7160 
7161 		/* The Link Layer currently returns RSSI as an absolute value */
7162 		rssi = -(node_rx_curr->rx_ftr.rssi);
7163 
7164 		LOG_DBG("phy= 0x%x, type= 0x%x, len= %u, tat= %u, rat= %u,"
7165 		       " rssi=%d dB", phy, adv->type, adv->len, adv->tx_addr,
7166 		       adv->rx_addr, rssi);
7167 
7168 		p = (void *)&adv->adv_ext_ind;
7169 		h = (void *)p->ext_hdr_adv_data;
7170 		ptr = (void *)h;
7171 
7172 		LOG_DBG("    Ext. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
7173 
7174 		evt_type_curr = p->adv_mode;
7175 
7176 		if (!p->ext_hdr_len) {
7177 			hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
7178 
7179 			goto no_ext_hdr;
7180 		}
7181 
7182 		ptr = h->data;
7183 
7184 		if (h->adv_addr) {
7185 			/* AdvA is RFU in AUX_CHAIN_IND */
7186 			if (node_rx_curr == node_rx ||
7187 			    node_rx_curr == node_rx->rx_ftr.extra) {
7188 				bt_addr_le_t addr;
7189 
7190 				adv_addr_type_curr = adv->tx_addr;
7191 				adv_addr_curr = ptr;
7192 
7193 				addr.type = adv->tx_addr;
7194 				(void)memcpy(addr.a.val, ptr, sizeof(bt_addr_t));
7195 
7196 				LOG_DBG("    AdvA: %s", bt_addr_le_str(&addr));
7197 			}
7198 
7199 			ptr += BDADDR_SIZE;
7200 		}
7201 
7202 		if (h->tgt_addr) {
7203 			/* TargetA is RFU in AUX_CHAIN_IND */
7204 			if (node_rx_curr == node_rx ||
7205 			    node_rx_curr == node_rx->rx_ftr.extra) {
7206 				struct lll_scan *lll;
7207 				bt_addr_le_t addr;
7208 
7209 				lll = node_rx->rx_ftr.param;
7210 
7211 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
7212 				direct_addr_type_curr =
7213 					ext_adv_direct_addr_type(lll,
7214 								 direct_resolved_curr,
7215 								 direct_report_curr,
7216 								 adv->rx_addr, ptr);
7217 #else /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
7218 				direct_addr_type_curr =
7219 					ext_adv_direct_addr_type(lll,
7220 								 direct_resolved_curr,
7221 								 false, adv->rx_addr,
7222 								 ptr);
7223 #endif /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
7224 
7225 				direct_addr_curr = ptr;
7226 
7227 				addr.type = adv->rx_addr;
7228 				(void)memcpy(addr.a.val, direct_addr_curr,
7229 					     sizeof(bt_addr_t));
7230 
7231 				LOG_DBG("    TgtA: %s", bt_addr_le_str(&addr));
7232 			}
7233 
7234 			ptr += BDADDR_SIZE;
7235 		}
7236 
7237 		if (h->cte_info) {
7238 			/* CTEInfo is RFU */
7239 			ptr += 1;
7240 		}
7241 
7242 		if (h->adi) {
7243 			adi_curr = (void *)ptr;
7244 
7245 			ptr += sizeof(*adi);
7246 
7247 			LOG_DBG("    AdvDataInfo DID = 0x%x, SID = 0x%x",
7248 				PDU_ADV_ADI_DID_GET(adi_curr), PDU_ADV_ADI_SID_GET(adi_curr));
7249 		}
7250 
7251 		if (h->aux_ptr) {
7252 			struct pdu_adv_aux_ptr *aux_ptr;
7253 
7254 			/* AuxPtr is RFU for connectable or scannable AUX_ADV_IND */
7255 			if (node_rx_curr != node_rx->rx_ftr.extra ||
7256 			    evt_type_curr == 0U) {
7257 				uint8_t aux_phy;
7258 
7259 				aux_ptr = (void *)ptr;
7260 
7261 				/* Don't report if invalid phy or AUX_ADV_IND was not received
7262 				 * See BT Core 5.4, Vol 6, Part B, Section 4.4.3.5:
7263 				 * If the Controller does not listen for or does not receive the
7264 				 * AUX_ADV_IND PDU, no report shall be generated
7265 				 */
7266 				if ((node_rx_curr == node_rx && !node_rx_next) ||
7267 				    PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7268 					struct node_rx_ftr *ftr;
7269 
7270 					ftr = &node_rx->rx_ftr;
7271 					node_rx_extra_list_release(ftr->extra);
7272 					return;
7273 				}
7274 
7275 
7276 				sec_phy_curr = HCI_AUX_PHY_TO_HCI_PHY(
7277 					PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7278 
7279 				aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7280 
7281 				LOG_DBG("    AuxPtr chan_idx = %u, ca = %u, offs_units "
7282 				       "= %u offs = 0x%x, phy = 0x%x",
7283 				       aux_ptr->chan_idx, aux_ptr->ca,
7284 				       aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr),
7285 				       aux_phy);
7286 			}
7287 
7288 			ptr += sizeof(*aux_ptr);
7289 		}
7290 
7291 		if (h->sync_info) {
7292 			struct pdu_adv_sync_info *si;
7293 
7294 			si = (void *)ptr;
7295 			ptr += sizeof(*si);
7296 
7297 			interval_le16 = si->interval;
7298 
7299 			LOG_DBG("    SyncInfo offs = %u, offs_unit = 0x%x, "
7300 			       "interval = 0x%x, sca = 0x%x, "
7301 			       "chan map = 0x%x 0x%x 0x%x 0x%x 0x%x, "
7302 			       "AA = 0x%x%x%x%x, CRC = 0x%x 0x%x 0x%x, "
7303 			       "evt cntr = 0x%x",
7304 			       PDU_ADV_SYNC_INFO_OFFSET_GET(si),
7305 			       PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si),
7306 			       sys_le16_to_cpu(si->interval),
7307 			       ((si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
7308 				 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
7309 				PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS),
7310 			       si->sca_chm[0], si->sca_chm[1], si->sca_chm[2],
7311 			       si->sca_chm[3],
7312 			       (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
7313 				~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK),
7314 			       si->aa[3], si->aa[2], si->aa[1], si->aa[0],
7315 			       si->crc_init[0], si->crc_init[1],
7316 			       si->crc_init[2], sys_le16_to_cpu(si->evt_cntr));
7317 		}
7318 
7319 		if (h->tx_pwr) {
7320 			tx_pwr_curr = *(int8_t *)ptr;
7321 			ptr++;
7322 
7323 			LOG_DBG("    Tx pwr= %d dB", tx_pwr_curr);
7324 		}
7325 
7326 		hdr_len = ptr - (uint8_t *)p;
7327 		hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7328 		if (hdr_len > hdr_buf_len) {
7329 			LOG_WRN("    Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7330 		} else {
7331 			uint8_t acad_len = hdr_buf_len - hdr_len;
7332 
7333 			if (acad_len) {
7334 				ptr += acad_len;
7335 				hdr_len += acad_len;
7336 			}
7337 		}
7338 
7339 no_ext_hdr:
7340 		if (hdr_len < adv->len) {
7341 			data_len_curr = adv->len - hdr_len;
7342 			data_curr = ptr;
7343 
7344 			LOG_DBG("    AD Data (%u): <todo>", data_len);
7345 		}
7346 
7347 		if (data_len_total + data_len_curr > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
7348 			/* Truncating advertising data
7349 			 * Note that this has to be done at a PDU boundary, so stop
7350 			 * processing nodes from this one forward
7351 			 */
7352 			if (scan_data) {
7353 				scan_data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7354 			} else {
7355 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7356 			}
7357 			break;
7358 		}
7359 
7360 		if (node_rx_curr == node_rx) {
7361 			evt_type = evt_type_curr;
7362 			adv_addr_type = adv_addr_type_curr;
7363 			adv_addr = adv_addr_curr;
7364 			direct_addr_type = direct_addr_type_curr;
7365 			direct_addr = direct_addr_curr;
7366 			adi = adi_curr;
7367 			sec_phy = sec_phy_curr;
7368 			node_rx_data = node_rx_curr;
7369 			/* Adv data in ADV_EXT_IND is RFU */
7370 			data_len = 0U;
7371 			data_len_total = 0U;
7372 			data = NULL;
7373 			scan_data_len_total = 0U;
7374 			tx_pwr = tx_pwr_curr;
7375 
7376 #if defined(CONFIG_BT_CTLR_PRIVACY)
7377 			rl_idx = rl_idx_curr;
7378 #endif /* CONFIG_BT_CTLR_PRIVACY */
7379 
7380 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7381 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7382 			devmatch = devmatch_curr;
7383 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7384 
7385 		} else {
7386 			/* TODO: Validate current value with previous */
7387 
7388 			/* Detect the scan response in the list of node_rx */
7389 			if (node_rx_curr->rx_ftr.scan_rsp) {
7390 				node_rx_scan_data = node_rx_curr;
7391 				if (sec_phy_curr) {
7392 					sec_phy_scan = sec_phy_curr;
7393 				} else {
7394 					sec_phy_scan = sec_phy;
7395 				}
7396 				scan_data_len = data_len_curr;
7397 				scan_data = data_curr;
7398 				scan_rsp_tx_pwr = tx_pwr_curr;
7399 			}
7400 
7401 			if (!adv_addr) {
7402 				adv_addr_type = adv_addr_type_curr;
7403 				adv_addr = adv_addr_curr;
7404 			}
7405 
7406 			if (!direct_addr) {
7407 				direct_addr_type = direct_addr_type_curr;
7408 				direct_addr = direct_addr_curr;
7409 			}
7410 
7411 			if (scan_data) {
7412 				scan_data_len_total += data_len_curr;
7413 			} else if (!data) {
7414 				node_rx_data = node_rx_curr;
7415 				data_len = data_len_curr;
7416 				data_len_total = data_len;
7417 				data = data_curr;
7418 				tx_pwr = tx_pwr_curr;
7419 			} else {
7420 				data_len_total += data_len_curr;
7421 			}
7422 
7423 #if defined(CONFIG_BT_CTLR_PRIVACY)
7424 			if (rl_idx >= ll_rl_size_get()) {
7425 				rl_idx = rl_idx_curr;
7426 			}
7427 #endif /* CONFIG_BT_CTLR_PRIVACY */
7428 
7429 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7430 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7431 			if (!devmatch) {
7432 				devmatch = devmatch_curr;
7433 			}
7434 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7435 		}
7436 
7437 		if (!node_rx_next) {
7438 			bool has_aux_ptr = !!sec_phy_curr;
7439 
7440 			if (scan_data) {
7441 				if (has_aux_ptr) {
7442 					scan_data_status =
7443 				  BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7444 				}
7445 			} else if (has_aux_ptr) {
7446 				data_status =
7447 				  BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7448 			}
7449 
7450 			break;
7451 		}
7452 
7453 		node_rx_curr = node_rx_next;
7454 		node_rx_next = node_rx_curr->rx_ftr.extra;
7455 		adv = (void *)node_rx_curr->pdu;
7456 	} while (1);
7457 
7458 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
7459 	    IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST) &&
7460 	    !devmatch) {
7461 		node_rx_extra_list_release(node_rx->rx_ftr.extra);
7462 		return;
7463 	}
7464 
7465 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
7466 	if (adv_addr) {
7467 		if (dup_scan &&
7468 		    dup_found(PDU_ADV_TYPE_EXT_IND, adv_addr_type, adv_addr,
7469 			      (evt_type & BIT_MASK(2)), adi, data_status)) {
7470 			node_rx_extra_list_release(node_rx->rx_ftr.extra);
7471 			return;
7472 		}
7473 	}
7474 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
7475 
7476 	/* If data incomplete */
7477 	if (data_status) {
7478 		/* Data incomplete and no more to come */
7479 		if (!(adv_addr ||
7480 		      (adi && ((tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) ||
7481 			       data)))) {
7482 			/* No device address and no valid AD data parsed or
7483 			 * Tx Power present for this PDU chain that has ADI,
7484 			 * skip HCI event generation.
7485 			 * In other terms, generate HCI event if device address
7486 			 * is present or if Tx pwr and/or data is present from
7487 			 * anonymous device.
7488 			 */
7489 			node_rx_extra_list_release(node_rx->rx_ftr.extra);
7490 			return;
7491 		}
7492 	}
7493 
7494 	/* Set directed advertising bit */
7495 	if (direct_addr) {
7496 		evt_type |= BT_HCI_LE_ADV_EVT_TYPE_DIRECT;
7497 	}
7498 
7499 	/* HCI fragment */
7500 	evt_buf = buf;
7501 	data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7502 		       sizeof(struct bt_hci_evt_le_meta_event) -
7503 		       sizeof(struct bt_hci_evt_le_ext_advertising_report) -
7504 		       sizeof(struct bt_hci_evt_le_ext_advertising_info);
7505 
7506 	/* If PDU data length less than total data length or PDU data length
7507 	 * greater than maximum HCI event data length, then fragment.
7508 	 */
7509 	if ((data_len < data_len_total) || (data_len > data_len_max)) {
7510 		ext_adv_data_frag(node_rx_data, evt_type, phy, &sec_phy,
7511 				  adv_addr_type, adv_addr, direct_addr_type,
7512 				  direct_addr, rl_idx, &tx_pwr, rssi,
7513 				  interval_le16, adi, data_len_max,
7514 				  data_len_total, &data_len, &data, buf,
7515 				  &evt_buf);
7516 	}
7517 
7518 	/* Set data status bits */
7519 	evt_type |= (data_status << 5);
7520 
7521 	/* Start constructing the adv event for remainder of the PDU data */
7522 	ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type, adv_addr,
7523 			  direct_addr_type, direct_addr, rl_idx, tx_pwr, rssi,
7524 			  interval_le16, adi, data_len, data, evt_buf);
7525 
7526 	/* If scan response event to be constructed */
7527 	if (!scan_data) {
7528 		node_rx_extra_list_release(node_rx->rx_ftr.extra);
7529 
7530 		return;
7531 	}
7532 
7533 	/* Set scan response bit */
7534 	evt_type |= BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP;
7535 
7536 	/* Clear the data status bits */
7537 	evt_type &= ~(BIT_MASK(2) << 5);
7538 
7539 	/* Allocate, append as buf fragment and construct the scan response
7540 	 * event.
7541 	 */
7542 	evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7543 	LL_ASSERT(evt_buf);
7544 
7545 	net_buf_frag_add(buf, evt_buf);
7546 
7547 	/* If PDU data length less than total data length or PDU data length
7548 	 * greater than maximum HCI event data length, then fragment.
7549 	 */
7550 	if ((scan_data_len < scan_data_len_total) ||
7551 	    (scan_data_len > data_len_max)) {
7552 		ext_adv_data_frag(node_rx_scan_data, evt_type, phy,
7553 				  &sec_phy_scan, adv_addr_type, adv_addr,
7554 				  direct_addr_type, direct_addr, rl_idx,
7555 				  &scan_rsp_tx_pwr, rssi, interval_le16, adi,
7556 				  data_len_max, scan_data_len_total,
7557 				  &scan_data_len, &scan_data, buf, &evt_buf);
7558 	}
7559 
7560 	/* set scan data status bits */
7561 	evt_type |= (scan_data_status << 5);
7562 
7563 	/* Start constructing the event for remainder of the PDU data */
7564 	ext_adv_info_fill(evt_type, phy, sec_phy_scan, adv_addr_type, adv_addr,
7565 			  direct_addr_type, direct_addr, rl_idx,
7566 			  scan_rsp_tx_pwr, rssi, interval_le16, adi,
7567 			  scan_data_len, scan_data, evt_buf);
7568 
7569 	node_rx_extra_list_release(node_rx->rx_ftr.extra);
7570 }
7571 
7572 static void le_adv_ext_report(struct pdu_data *pdu_data,
7573 			      struct node_rx_pdu *node_rx,
7574 			      struct net_buf *buf, uint8_t phy)
7575 {
7576 	struct pdu_adv *adv = (void *)pdu_data;
7577 
7578 	if ((adv->type == PDU_ADV_TYPE_EXT_IND) && adv->len) {
7579 		le_ext_adv_report(pdu_data, node_rx, buf, phy);
7580 	} else {
7581 		le_ext_adv_legacy_report(pdu_data, node_rx, buf);
7582 	}
7583 }
7584 
7585 static void le_adv_ext_1M_report(struct pdu_data *pdu_data,
7586 				 struct node_rx_pdu *node_rx,
7587 				 struct net_buf *buf)
7588 {
7589 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_1M);
7590 }
7591 
7592 static void le_adv_ext_2M_report(struct pdu_data *pdu_data,
7593 				 struct node_rx_pdu *node_rx,
7594 				 struct net_buf *buf)
7595 {
7596 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_2M);
7597 }
7598 
7599 static void le_adv_ext_coded_report(struct pdu_data *pdu_data,
7600 				    struct node_rx_pdu *node_rx,
7601 				    struct net_buf *buf)
7602 {
7603 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_CODED);
7604 }
7605 
7606 static void le_scan_timeout(struct pdu_data *pdu_data,
7607 			    struct node_rx_pdu *node_rx, struct net_buf *buf)
7608 {
7609 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7610 	    !(le_event_mask & BT_EVT_MASK_LE_SCAN_TIMEOUT)) {
7611 		return;
7612 	}
7613 
7614 	meta_evt(buf, BT_HCI_EVT_LE_SCAN_TIMEOUT, 0U);
7615 }
7616 
7617 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
7618 static void le_per_adv_sync_established(struct pdu_data *pdu_data,
7619 					struct node_rx_pdu *node_rx,
7620 					struct net_buf *buf)
7621 {
7622 	struct bt_hci_evt_le_per_adv_sync_established *sep;
7623 	struct ll_sync_set *sync;
7624 	struct node_rx_sync *se;
7625 	void *node;
7626 
7627 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7628 	    !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED)) {
7629 		return;
7630 	}
7631 
7632 	sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
7633 		       sizeof(*sep));
7634 
7635 	/* Check for pdu field being aligned before accessing sync established
7636 	 * event.
7637 	 */
7638 	node = pdu_data;
7639 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync));
7640 
7641 	se = node;
7642 	sep->status = se->status;
7643 
7644 	if (se->status == BT_HCI_ERR_OP_CANCELLED_BY_HOST) {
7645 		return;
7646 	}
7647 
7648 	sync = node_rx->rx_ftr.param;
7649 
7650 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7651 	defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7652 	dup_periodic_adv_reset(sync->peer_id_addr_type, sync->peer_id_addr, sync->sid);
7653 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7654 	* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7655 	*/
7656 
7657 	sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7658 
7659 	/* Resolved address, if private, has been populated in ULL */
7660 	sep->adv_addr.type = sync->peer_id_addr_type;
7661 	if (sync->peer_addr_resolved) {
7662 		/* Mark it as identity address from RPA (0x02, 0x03) */
7663 		MARK_AS_IDENTITY_ADDR(sep->adv_addr.type);
7664 	}
7665 	(void)memcpy(sep->adv_addr.a.val, sync->peer_id_addr, BDADDR_SIZE);
7666 
7667 	sep->sid = sync->sid;
7668 	sep->phy = find_lsb_set(se->phy);
7669 	sep->interval = sys_cpu_to_le16(se->interval);
7670 	sep->clock_accuracy = se->sca;
7671 }
7672 
7673 static void le_per_adv_sync_report(struct pdu_data *pdu_data,
7674 				   struct node_rx_pdu *node_rx,
7675 				   struct net_buf *buf)
7676 {
7677 	struct node_rx_ftr *ftr = &node_rx->rx_ftr;
7678 	int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7679 	struct pdu_adv *adv = (void *)pdu_data;
7680 	struct pdu_adv_aux_ptr *aux_ptr = NULL;
7681 	const struct pdu_adv_adi *adi = NULL;
7682 	uint8_t cte_type = BT_HCI_LE_NO_CTE;
7683 	const struct ll_sync_set *sync;
7684 	struct pdu_adv_com_ext_adv *p;
7685 	struct pdu_adv_ext_hdr *h;
7686 	uint16_t data_len_total;
7687 	struct net_buf *evt_buf;
7688 	uint8_t data_len = 0U;
7689 	uint8_t acad_len = 0U;
7690 	uint8_t *data = NULL;
7691 	uint8_t data_len_max;
7692 	uint8_t *acad = NULL;
7693 	uint8_t hdr_buf_len;
7694 	uint8_t hdr_len;
7695 	uint8_t *ptr;
7696 	int8_t rssi;
7697 	bool accept;
7698 
7699 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7700 	    (!(le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7701 	     !(le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT))) {
7702 		return;
7703 	}
7704 
7705 	/* NOTE: The timeout_reload field in the sync context is checked under
7706 	 *       race condition between HCI Tx and Rx thread wherein a sync
7707 	 *       terminate was performed which resets the timeout_reload field
7708 	 *       before releasing the sync context back into its memory pool.
7709 	 *       It is important that timeout_reload field is at safe offset
7710 	 *       inside the sync context such that it is not corrupt while being
7711 	 *       in the memory pool.
7712 	 *
7713 	 *       This check ensures reports are not sent out after sync
7714 	 *       terminate.
7715 	 */
7716 	sync = HDR_LLL2ULL(ftr->param);
7717 	if (unlikely(!sync->timeout_reload)) {
7718 		return;
7719 	}
7720 
7721 	data_len_total = ftr->aux_data_len;
7722 
7723 	if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7724 	    (ftr->aux_failed || data_len_total > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7725 		struct bt_hci_evt_le_per_advertising_report *sep;
7726 
7727 		sep = meta_evt(buf,
7728 			       BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7729 			       sizeof(*sep));
7730 
7731 		sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7732 		sep->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7733 		sep->rssi = BT_HCI_LE_RSSI_NOT_AVAILABLE;
7734 		sep->cte_type = BT_HCI_LE_NO_CTE;
7735 		sep->data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7736 		sep->length = 0;
7737 
7738 		return;
7739 	}
7740 
7741 	/* The Link Layer currently returns RSSI as an absolute value */
7742 	rssi = -(ftr->rssi);
7743 
7744 	LOG_DBG("len = %u, rssi = %d", adv->len, rssi);
7745 
7746 	p = (void *)&adv->adv_ext_ind;
7747 	h = (void *)p->ext_hdr_adv_data;
7748 	ptr = (void *)h;
7749 
7750 	LOG_DBG("    Per. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
7751 
7752 	if (!p->ext_hdr_len) {
7753 		hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
7754 
7755 		goto no_ext_hdr;
7756 	}
7757 
7758 	ptr = h->data;
7759 
7760 	if (h->adv_addr) {
7761 		ptr += BDADDR_SIZE;
7762 	}
7763 
7764 	if (h->tgt_addr) {
7765 		ptr += BDADDR_SIZE;
7766 	}
7767 
7768 	if (h->cte_info) {
7769 		struct pdu_cte_info *cte_info;
7770 
7771 		cte_info = (void *)ptr;
7772 		cte_type = cte_info->type;
7773 		ptr++;
7774 
7775 		LOG_DBG("    CTE type= %d", cte_type);
7776 	}
7777 
7778 	if (h->adi) {
7779 		adi = (void *)ptr;
7780 
7781 		ptr += sizeof(struct pdu_adv_adi);
7782 	}
7783 
7784 	/* AuxPtr */
7785 	if (h->aux_ptr) {
7786 		uint8_t aux_phy;
7787 
7788 		aux_ptr = (void *)ptr;
7789 		if (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7790 			return;
7791 		}
7792 
7793 		ptr += sizeof(*aux_ptr);
7794 
7795 		aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7796 
7797 		LOG_DBG("    AuxPtr chan_idx = %u, ca = %u, offs_units "
7798 		       "= %u offs = 0x%x, phy = 0x%x",
7799 		       aux_ptr->chan_idx, aux_ptr->ca,
7800 		       aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr), aux_phy);
7801 	}
7802 
7803 	/* No SyncInfo */
7804 	if (h->sync_info) {
7805 		ptr += sizeof(struct pdu_adv_sync_info);
7806 	}
7807 
7808 	/* Tx Power */
7809 	if (h->tx_pwr) {
7810 		tx_pwr = *(int8_t *)ptr;
7811 		ptr++;
7812 
7813 		LOG_DBG("    Tx pwr= %d dB", tx_pwr);
7814 	}
7815 
7816 	hdr_len = ptr - (uint8_t *)p;
7817 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7818 	if (hdr_len > hdr_buf_len) {
7819 		LOG_WRN("    Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7820 	} else {
7821 		acad_len = hdr_buf_len - hdr_len;
7822 		if (acad_len) {
7823 			acad = ptr;
7824 
7825 			ptr += acad_len;
7826 			hdr_len += acad_len;
7827 		}
7828 	}
7829 
7830 no_ext_hdr:
7831 	if (hdr_len < adv->len) {
7832 		data_len = adv->len - hdr_len;
7833 		data = ptr;
7834 
7835 		LOG_DBG("    AD Data (%u): <todo>", data_len);
7836 	}
7837 
7838 	if (0) {
7839 
7840 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7841 	defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7842 	} else if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
7843 		   adi) {
7844 		uint8_t data_status;
7845 
7846 		data_status = (aux_ptr) ?
7847 			      BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL :
7848 			      BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7849 
7850 		accept = sync->rx_enable && ftr->sync_rx_enabled &&
7851 			 (!sync->nodups ||
7852 			  !dup_found(PDU_ADV_TYPE_EXT_IND,
7853 				     sync->peer_id_addr_type,
7854 				     sync->peer_id_addr,
7855 				     DUP_EXT_ADV_MODE_PERIODIC,
7856 				     adi, data_status));
7857 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7858 	* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7859 	*/
7860 
7861 	} else {
7862 		accept = sync->rx_enable && ftr->sync_rx_enabled;
7863 	}
7864 
7865 	data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7866 		       sizeof(struct bt_hci_evt_le_meta_event) -
7867 		       sizeof(struct bt_hci_evt_le_per_advertising_report);
7868 
7869 	evt_buf = buf;
7870 
7871 	if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) && accept) {
7872 
7873 		/* Pass verdict in LL.TS.p19 section 4.2.3.6 Extended Scanning,
7874 		 * Passive, Periodic Advertising Report, RSSI and TX_Power
7875 		 * states:
7876 		 * TX_Power is set to value of the TxPower field for the
7877 		 * AUX_SYNC_IND received, and RSSI set to a valid value.
7878 		 * Subsequent reports with data and the status set to
7879 		 * "Incomplete, more data to come" or "complete" can have the
7880 		 * TX_Power field set to 0x7F.
7881 		 *
7882 		 * In the implementation data_len_total is the running total
7883 		 * AD data length so far, data_len is the current PDU's AD data
7884 		 * length. For AUX_SYNC_IND received, data_len_total ==
7885 		 * data_len.
7886 		 */
7887 		if (data_len_total > data_len) {
7888 			/* Subsequent reports */
7889 			tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7890 		}
7891 
7892 		do {
7893 			struct bt_hci_evt_le_per_advertising_report *sep;
7894 			uint8_t data_len_frag;
7895 			uint8_t data_status;
7896 
7897 			data_len_frag = MIN(data_len, data_len_max);
7898 
7899 			/* Start constructing periodic advertising report */
7900 			sep = meta_evt(evt_buf,
7901 				       BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7902 				       sizeof(*sep) + data_len_frag);
7903 
7904 			sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7905 			sep->tx_power = tx_pwr;
7906 			sep->rssi = rssi;
7907 			sep->cte_type = cte_type;
7908 			sep->length = data_len_frag;
7909 			memcpy(&sep->data[0], data, data_len_frag);
7910 
7911 			data += data_len_frag;
7912 			data_len -= data_len_frag;
7913 
7914 			if (data_len > 0) {
7915 				/* Some data left in PDU, mark as partial data. */
7916 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7917 
7918 				evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7919 				LL_ASSERT(evt_buf);
7920 
7921 				net_buf_frag_add(buf, evt_buf);
7922 
7923 				tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7924 			} else if (!aux_ptr) {
7925 				/* No data left, no AuxPtr, mark as complete data. */
7926 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7927 			} else if (ftr->aux_sched &&
7928 				   (data_len_total < CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7929 				/* No data left, but have AuxPtr and scheduled aux scan,
7930 				 * mark as partial data.
7931 				 */
7932 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7933 			} else {
7934 				/* No data left, have AuxPtr but not aux scan scheduled,
7935 				 * mark as incomplete data.
7936 				 */
7937 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7938 			}
7939 
7940 			sep->data_status = data_status;
7941 		} while (data_len > 0);
7942 
7943 		evt_buf = NULL;
7944 	}
7945 
7946 	if ((le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT) && acad &&
7947 	    (acad_len >= (PDU_BIG_INFO_CLEARTEXT_SIZE +
7948 			  PDU_ADV_DATA_HEADER_SIZE))) {
7949 		struct bt_hci_evt_le_biginfo_adv_report *sep;
7950 		struct pdu_big_info *bi;
7951 		uint8_t bi_size;
7952 		uint8_t phy;
7953 
7954 		/* FIXME: Parse and find the BIGInfo */
7955 		if (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] != BT_DATA_BIG_INFO) {
7956 			return;
7957 		}
7958 
7959 		bi_size = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
7960 		bi = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
7961 
7962 		/* Do not report if phy is invalid or unsupported */
7963 		phy = (bi->chm_phy[4] >> 5);
7964 		if ((phy > EXT_ADV_AUX_PHY_LE_CODED) ||
7965 			(!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
7966 			 (phy == EXT_ADV_AUX_PHY_LE_CODED))) {
7967 			return;
7968 		}
7969 
7970 		/* Allocate new event buffer if periodic advertising report was
7971 		 * constructed with the caller supplied buffer.
7972 		 */
7973 		if (!evt_buf) {
7974 			evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7975 			LL_ASSERT(evt_buf);
7976 
7977 			net_buf_frag_add(buf, evt_buf);
7978 		}
7979 
7980 		/* Start constructing BIGInfo  advertising report */
7981 		sep = meta_evt(evt_buf, BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
7982 			       sizeof(*sep));
7983 
7984 		sep->sync_handle = sys_cpu_to_le16(node_rx->hdr.handle);
7985 
7986 		/* NOTE: both sep and bi struct store little-endian values.
7987 		 *       Multi-byte variables extracted using
7988 		 *       PDU_BIG_INFO_ISO_*_GET macros, which return
7989 		 *       value in host-endianness, require conversion.
7990 		 */
7991 		sep->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
7992 		sep->nse = PDU_BIG_INFO_NSE_GET(bi);
7993 		sep->iso_interval =
7994 			sys_cpu_to_le16(PDU_BIG_INFO_ISO_INTERVAL_GET(bi));
7995 		sep->bn = PDU_BIG_INFO_BN_GET(bi);
7996 		sep->pto = PDU_BIG_INFO_PTO_GET(bi);
7997 		sep->irc = PDU_BIG_INFO_IRC_GET(bi);
7998 
7999 		sep->max_pdu = sys_cpu_to_le16(bi->max_pdu);
8000 		sys_put_le24(PDU_BIG_INFO_SDU_INTERVAL_GET(bi),
8001 			sep->sdu_interval);
8002 		sep->max_sdu = sys_cpu_to_le16(PDU_BIG_INFO_MAX_SDU_GET(bi));
8003 		sep->phy = HCI_AUX_PHY_TO_HCI_PHY(bi->chm_phy[4] >> 5);
8004 		sep->framing = (bi->payload_count_framing[4] >> 7) & 0x01;
8005 		if (bi_size == (PDU_BIG_INFO_ENCRYPTED_SIZE + 1)) {
8006 			sep->encryption = 1U;
8007 		} else {
8008 			sep->encryption = 0U;
8009 		}
8010 	}
8011 }
8012 
8013 static void le_per_adv_sync_lost(struct pdu_data *pdu_data,
8014 				 struct node_rx_pdu *node_rx,
8015 				 struct net_buf *buf)
8016 {
8017 	struct bt_hci_evt_le_per_adv_sync_lost *sep;
8018 
8019 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8020 	    !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_LOST)) {
8021 		return;
8022 	}
8023 
8024 	sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, sizeof(*sep));
8025 	sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
8026 }
8027 
8028 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8029 static void le_big_sync_established(struct pdu_data *pdu,
8030 				    struct node_rx_pdu *node_rx,
8031 				    struct net_buf *buf)
8032 {
8033 	struct bt_hci_evt_le_big_sync_established *sep;
8034 	struct ll_sync_iso_set *sync_iso;
8035 	uint32_t transport_latency_big;
8036 	struct node_rx_sync_iso *se;
8037 	struct lll_sync_iso *lll;
8038 	uint32_t iso_interval_us;
8039 	uint32_t big_sync_delay;
8040 	size_t evt_size;
8041 	void *node;
8042 
8043 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8044 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED)) {
8045 		return;
8046 	}
8047 
8048 	sync_iso = node_rx->rx_ftr.param;
8049 	lll = &sync_iso->lll;
8050 
8051 	evt_size = sizeof(*sep) + (lll->stream_count * sizeof(uint16_t));
8052 
8053 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED, evt_size);
8054 	sep->big_handle = (uint8_t)node_rx->hdr.handle;
8055 
8056 	/* Check for pdu field being aligned before accessing ISO sync
8057 	 * established event.
8058 	 */
8059 	node = pdu;
8060 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync_iso));
8061 
8062 	se = node;
8063 	sep->status = se->status;
8064 	if (sep->status) {
8065 		return;
8066 	}
8067 
8068 	/* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
8069 	 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing + (NSE – 1) × Sub_Interval + MPT.
8070 	 *
8071 	 * BT Core v5.4 - Vol 6, Part G, Section 3.2.1: (Framed)
8072 	 * Transport_Latenct_BIG = BIG_Sync_Delay + PTO × (NSE / BN – IRC) * ISO_Interval +
8073 	 *                             ISO_Interval + SDU_Interval
8074 	 *
8075 	 * BT Core v5.4 - Vol 6, Part G, Section 3.2.2: (Unframed)
8076 	 * Transport_Latenct_BIG = BIG_Sync_Delay + (PTO × (NSE / BN – IRC) + 1) * ISO_Interval -
8077 	 *                             SDU_Interval
8078 	 */
8079 	iso_interval_us = lll->iso_interval * ISO_INT_UNIT_US;
8080 	big_sync_delay = ull_iso_big_sync_delay(lll->num_bis, lll->bis_spacing, lll->nse,
8081 						lll->sub_interval, lll->phy, lll->max_pdu,
8082 						lll->enc);
8083 	if (lll->framing) {
8084 		/* Framed */
8085 		transport_latency_big = big_sync_delay +
8086 					lll->pto * (lll->nse / lll->bn - lll->irc) *
8087 					iso_interval_us + iso_interval_us + lll->sdu_interval;
8088 	} else {
8089 		/* Unframed */
8090 		transport_latency_big = big_sync_delay +
8091 					(lll->pto * (lll->nse / lll->bn - lll->irc) + 1) *
8092 					iso_interval_us - lll->sdu_interval;
8093 	}
8094 
8095 	sys_put_le24(transport_latency_big, sep->latency);
8096 	sep->nse = lll->nse;
8097 	sep->bn = lll->bn;
8098 	sep->pto = lll->pto;
8099 	sep->irc = lll->irc;
8100 	sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
8101 	sep->iso_interval = sys_cpu_to_le16(lll->iso_interval);
8102 	sep->num_bis = lll->stream_count;
8103 
8104 	/* Connection handle list of all BISes synchronized in the BIG */
8105 	for (uint8_t i = 0U; i < lll->stream_count; i++) {
8106 		uint16_t handle;
8107 
8108 		handle = LL_BIS_SYNC_HANDLE_FROM_IDX(lll->stream_handle[i]);
8109 		sep->handle[i] = sys_cpu_to_le16(handle);
8110 	}
8111 }
8112 
8113 static void le_big_sync_lost(struct pdu_data *pdu,
8114 			     struct node_rx_pdu *node_rx,
8115 			     struct net_buf *buf)
8116 {
8117 	struct bt_hci_evt_le_big_sync_lost *sep;
8118 
8119 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8120 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_LOST)) {
8121 		return;
8122 	}
8123 
8124 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_LOST, sizeof(*sep));
8125 	sep->big_handle = (uint8_t)node_rx->hdr.handle;
8126 	sep->reason = *((uint8_t *)pdu);
8127 }
8128 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8129 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8130 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8131 #endif /* CONFIG_BT_OBSERVER */
8132 
8133 #if defined(CONFIG_BT_BROADCASTER)
8134 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8135 static void le_adv_ext_terminate(struct pdu_data *pdu_data,
8136 				    struct node_rx_pdu *node_rx,
8137 				    struct net_buf *buf)
8138 {
8139 	struct bt_hci_evt_le_adv_set_terminated *sep;
8140 
8141 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8142 	    !(le_event_mask & BT_EVT_MASK_LE_ADV_SET_TERMINATED)) {
8143 		return;
8144 	}
8145 
8146 	sep = meta_evt(buf, BT_HCI_EVT_LE_ADV_SET_TERMINATED, sizeof(*sep));
8147 	sep->status = node_rx->rx_ftr.param_adv_term.status;
8148 	sep->adv_handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
8149 	sep->conn_handle =
8150 		sys_cpu_to_le16(node_rx->rx_ftr.param_adv_term.conn_handle);
8151 	sep->num_completed_ext_adv_evts =
8152 		node_rx->rx_ftr.param_adv_term.num_events;
8153 }
8154 
8155 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8156 static void le_big_complete(struct pdu_data *pdu_data,
8157 			    struct node_rx_pdu *node_rx,
8158 			    struct net_buf *buf)
8159 {
8160 	struct bt_hci_evt_le_big_complete *sep;
8161 	struct ll_adv_iso_set *adv_iso;
8162 	struct lll_adv_iso *lll;
8163 	size_t evt_size;
8164 
8165 	adv_iso = node_rx->rx_ftr.param;
8166 	lll = &adv_iso->lll;
8167 
8168 	evt_size = sizeof(*sep) + (lll->num_bis * sizeof(uint16_t));
8169 
8170 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_COMPLETE, evt_size);
8171 
8172 	sep->status = BT_HCI_ERR_SUCCESS;
8173 	sep->big_handle = (uint8_t)node_rx->hdr.handle;
8174 
8175 	if (sep->status) {
8176 		return;
8177 	}
8178 
8179 	/* FIXME: Fill sync delay and latency */
8180 	sys_put_le24(0, sep->sync_delay);
8181 	sys_put_le24(0, sep->latency);
8182 
8183 	sep->phy = find_lsb_set(lll->phy);
8184 	sep->nse = lll->nse;
8185 	sep->bn = lll->bn;
8186 	sep->pto = lll->pto;
8187 	sep->irc = lll->irc;
8188 	sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
8189 	sep->num_bis = lll->num_bis;
8190 
8191 	/* Connection handle list of all BISes in the BIG */
8192 	for (uint8_t i = 0U; i < lll->num_bis; i++) {
8193 		uint16_t handle;
8194 
8195 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(lll->stream_handle[i]);
8196 		sep->handle[i] = sys_cpu_to_le16(handle);
8197 	}
8198 }
8199 
8200 static void le_big_terminate(struct pdu_data *pdu,
8201 			     struct node_rx_pdu *node_rx,
8202 			     struct net_buf *buf)
8203 {
8204 	struct bt_hci_evt_le_big_terminate *sep;
8205 
8206 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8207 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_TERMINATED)) {
8208 		return;
8209 	}
8210 
8211 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_TERMINATE, sizeof(*sep));
8212 	sep->big_handle = (uint8_t)node_rx->hdr.handle;
8213 	sep->reason = *((uint8_t *)pdu);
8214 }
8215 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8216 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8217 #endif /* CONFIG_BT_BROADCASTER */
8218 
8219 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8220 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8221 static void le_scan_req_received(struct pdu_data *pdu_data,
8222 				 struct node_rx_pdu *node_rx,
8223 				 struct net_buf *buf)
8224 {
8225 	struct pdu_adv *adv = (void *)pdu_data;
8226 	struct bt_hci_evt_le_scan_req_received *sep;
8227 
8228 #if defined(CONFIG_BT_CTLR_PRIVACY)
8229 	uint8_t rl_idx;
8230 #endif
8231 
8232 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8233 	    !(le_event_mask & BT_EVT_MASK_LE_SCAN_REQ_RECEIVED)) {
8234 		bt_addr_le_t addr;
8235 		uint8_t handle;
8236 		int8_t rssi;
8237 
8238 		handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
8239 		addr.type = adv->tx_addr;
8240 		memcpy(&addr.a.val[0], &adv->scan_req.scan_addr[0],
8241 		       sizeof(bt_addr_t));
8242 
8243 		/* The Link Layer currently returns RSSI as an absolute value */
8244 		rssi = -(node_rx->rx_ftr.rssi);
8245 
8246 		LOG_DBG("handle: %d, addr: %s, rssi: %d dB.", handle, bt_addr_le_str(&addr), rssi);
8247 
8248 		return;
8249 	}
8250 
8251 	sep = meta_evt(buf, BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, sizeof(*sep));
8252 	sep->handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
8253 	sep->addr.type = adv->tx_addr;
8254 	memcpy(&sep->addr.a.val[0], &adv->scan_req.scan_addr[0],
8255 	       sizeof(bt_addr_t));
8256 
8257 #if defined(CONFIG_BT_CTLR_PRIVACY)
8258 	rl_idx = node_rx->rx_ftr.rl_idx;
8259 	if (rl_idx < ll_rl_size_get()) {
8260 		/* Store identity address */
8261 		ll_rl_id_addr_get(rl_idx, &sep->addr.type,
8262 				  &sep->addr.a.val[0]);
8263 		/* Mark it as identity address from RPA (0x02, 0x03) */
8264 		MARK_AS_IDENTITY_ADDR(sep->addr.type);
8265 	} else {
8266 #else
8267 	if (1) {
8268 #endif
8269 		sep->addr.type = adv->tx_addr;
8270 		memcpy(&sep->addr.a.val[0], &adv->adv_ind.addr[0],
8271 		       sizeof(bt_addr_t));
8272 	}
8273 }
8274 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8275 
8276 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
8277 static void le_vs_scan_req_received(struct pdu_data *pdu,
8278 				    struct node_rx_pdu *node_rx,
8279 				    struct net_buf *buf)
8280 {
8281 	struct pdu_adv *adv = (void *)pdu;
8282 	struct bt_hci_evt_vs_scan_req_rx *sep;
8283 
8284 #if defined(CONFIG_BT_CTLR_PRIVACY)
8285 	uint8_t rl_idx;
8286 #endif
8287 
8288 	if (!(vs_events_mask & BT_EVT_MASK_VS_SCAN_REQ_RX)) {
8289 		return;
8290 	}
8291 
8292 	sep = vs_event(buf, BT_HCI_EVT_VS_SCAN_REQ_RX, sizeof(*sep));
8293 	sep->addr.type = adv->tx_addr;
8294 	memcpy(&sep->addr.a.val[0], &adv->scan_req.scan_addr[0],
8295 	       sizeof(bt_addr_t));
8296 
8297 #if defined(CONFIG_BT_CTLR_PRIVACY)
8298 	rl_idx = node_rx->rx_ftr.rl_idx;
8299 	if (rl_idx < ll_rl_size_get()) {
8300 		/* Store identity address */
8301 		ll_rl_id_addr_get(rl_idx, &sep->addr.type,
8302 				  &sep->addr.a.val[0]);
8303 		/* Mark it as identity address from RPA (0x02, 0x03) */
8304 		MARK_AS_IDENTITY_ADDR(sep->addr.type);
8305 	} else {
8306 #else
8307 	if (1) {
8308 #endif
8309 		sep->addr.type = adv->tx_addr;
8310 		memcpy(&sep->addr.a.val[0], &adv->adv_ind.addr[0],
8311 		       sizeof(bt_addr_t));
8312 	}
8313 
8314 	/* The Link Layer currently returns RSSI as an absolute value */
8315 	sep->rssi = -(node_rx->rx_ftr.rssi);
8316 }
8317 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
8318 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8319 
8320 #if defined(CONFIG_BT_CONN)
8321 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
8322 			     struct net_buf *buf)
8323 {
8324 	struct bt_hci_evt_le_conn_complete *lecc;
8325 	struct node_rx_cc *cc;
8326 	uint8_t status;
8327 	void *node;
8328 
8329 	/* Check for pdu field being aligned before accessing connection
8330 	 * complete event.
8331 	 */
8332 	node = pdu_data;
8333 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cc));
8334 
8335 	cc = node;
8336 	status = cc->status;
8337 
8338 #if defined(CONFIG_BT_CTLR_PRIVACY)
8339 	if (!status) {
8340 		/* Update current RPA */
8341 		ll_rl_crpa_set(cc->peer_addr_type,
8342 			       &cc->peer_addr[0], 0xff,
8343 			       &cc->peer_rpa[0]);
8344 	}
8345 #endif
8346 
8347 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8348 	    (!(le_event_mask & BT_EVT_MASK_LE_CONN_COMPLETE) &&
8349 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
8350 	     !(le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE))) {
8351 #else
8352 	     1)) {
8353 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
8354 		return;
8355 	}
8356 
8357 	if (!status) {
8358 		conn_count++;
8359 	}
8360 
8361 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
8362 	if (le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE) {
8363 		struct bt_hci_evt_le_enh_conn_complete *leecc;
8364 
8365 		leecc = meta_evt(buf, BT_HCI_EVT_LE_ENH_CONN_COMPLETE,
8366 				 sizeof(*leecc));
8367 
8368 		if (status) {
8369 			(void)memset(leecc, 0x00, sizeof(*leecc));
8370 			leecc->status = status;
8371 			return;
8372 		}
8373 
8374 		leecc->status = 0x00;
8375 		leecc->handle = sys_cpu_to_le16(handle);
8376 		leecc->role = cc->role;
8377 
8378 		leecc->peer_addr.type = cc->peer_addr_type;
8379 		memcpy(&leecc->peer_addr.a.val[0], &cc->peer_addr[0],
8380 		       BDADDR_SIZE);
8381 
8382 #if defined(CONFIG_BT_CTLR_PRIVACY)
8383 		memcpy(&leecc->local_rpa.val[0], &cc->local_rpa[0],
8384 		       BDADDR_SIZE);
8385 		memcpy(&leecc->peer_rpa.val[0], &cc->peer_rpa[0],
8386 		       BDADDR_SIZE);
8387 #else /* !CONFIG_BT_CTLR_PRIVACY */
8388 		memset(&leecc->local_rpa.val[0], 0, BDADDR_SIZE);
8389 		memset(&leecc->peer_rpa.val[0], 0, BDADDR_SIZE);
8390 #endif /* !CONFIG_BT_CTLR_PRIVACY */
8391 
8392 		leecc->interval = sys_cpu_to_le16(cc->interval);
8393 		leecc->latency = sys_cpu_to_le16(cc->latency);
8394 		leecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
8395 		leecc->clock_accuracy = cc->sca;
8396 		return;
8397 	}
8398 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
8399 
8400 	lecc = meta_evt(buf, BT_HCI_EVT_LE_CONN_COMPLETE, sizeof(*lecc));
8401 
8402 	if (status) {
8403 		(void)memset(lecc, 0x00, sizeof(*lecc));
8404 		lecc->status = status;
8405 		return;
8406 	}
8407 
8408 	lecc->status = 0x00;
8409 	lecc->handle = sys_cpu_to_le16(handle);
8410 	lecc->role = cc->role;
8411 	lecc->peer_addr.type = cc->peer_addr_type & 0x1;
8412 	memcpy(&lecc->peer_addr.a.val[0], &cc->peer_addr[0], BDADDR_SIZE);
8413 	lecc->interval = sys_cpu_to_le16(cc->interval);
8414 	lecc->latency = sys_cpu_to_le16(cc->latency);
8415 	lecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
8416 	lecc->clock_accuracy = cc->sca;
8417 }
8418 
8419 void hci_disconn_complete_encode(struct pdu_data *pdu_data, uint16_t handle,
8420 				 struct net_buf *buf)
8421 {
8422 	struct bt_hci_evt_disconn_complete *ep;
8423 
8424 	if (!(event_mask & BT_EVT_MASK_DISCONN_COMPLETE)) {
8425 		return;
8426 	}
8427 
8428 	hci_evt_create(buf, BT_HCI_EVT_DISCONN_COMPLETE, sizeof(*ep));
8429 	ep = net_buf_add(buf, sizeof(*ep));
8430 
8431 	ep->status = 0x00;
8432 	ep->handle = sys_cpu_to_le16(handle);
8433 	ep->reason = *((uint8_t *)pdu_data);
8434 }
8435 
8436 void hci_disconn_complete_process(uint16_t handle)
8437 {
8438 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8439 	/* Clear any pending packets upon disconnection */
8440 	/* Note: This requires linear handle values starting from 0 */
8441 	if (handle >= ARRAY_SIZE(hci_hbuf_pend)) {
8442 		return;
8443 	}
8444 
8445 	hci_hbuf_acked += hci_hbuf_pend[handle];
8446 	hci_hbuf_pend[handle] = 0U;
8447 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
8448 
8449 	conn_count--;
8450 }
8451 
8452 static void le_conn_update_complete(struct pdu_data *pdu_data, uint16_t handle,
8453 				    struct net_buf *buf)
8454 {
8455 	struct bt_hci_evt_le_conn_update_complete *sep;
8456 	struct node_rx_cu *cu;
8457 	void *node;
8458 
8459 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8460 	    !(le_event_mask & BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE)) {
8461 		return;
8462 	}
8463 
8464 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE, sizeof(*sep));
8465 
8466 	/* Check for pdu field being aligned before accessing connection
8467 	 * update complete event.
8468 	 */
8469 	node = pdu_data;
8470 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cu));
8471 
8472 	cu = node;
8473 	sep->status = cu->status;
8474 	sep->handle = sys_cpu_to_le16(handle);
8475 	sep->interval = sys_cpu_to_le16(cu->interval);
8476 	sep->latency = sys_cpu_to_le16(cu->latency);
8477 	sep->supv_timeout = sys_cpu_to_le16(cu->timeout);
8478 }
8479 
8480 #if defined(CONFIG_BT_CTLR_LE_ENC)
8481 static void enc_refresh_complete(struct pdu_data *pdu_data, uint16_t handle,
8482 				 struct net_buf *buf)
8483 {
8484 	struct bt_hci_evt_encrypt_key_refresh_complete *ep;
8485 
8486 	if (!(event_mask & BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE)) {
8487 		return;
8488 	}
8489 
8490 	hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
8491 		       sizeof(*ep));
8492 	ep = net_buf_add(buf, sizeof(*ep));
8493 
8494 	ep->status = 0x00;
8495 	ep->handle = sys_cpu_to_le16(handle);
8496 }
8497 #endif /* CONFIG_BT_CTLR_LE_ENC */
8498 
8499 #if defined(CONFIG_BT_CTLR_LE_PING)
8500 static void auth_payload_timeout_exp(struct pdu_data *pdu_data, uint16_t handle,
8501 				     struct net_buf *buf)
8502 {
8503 	struct bt_hci_evt_auth_payload_timeout_exp *ep;
8504 
8505 	if (!(event_mask_page_2 & BT_EVT_MASK_AUTH_PAYLOAD_TIMEOUT_EXP)) {
8506 		return;
8507 	}
8508 
8509 	hci_evt_create(buf, BT_HCI_EVT_AUTH_PAYLOAD_TIMEOUT_EXP, sizeof(*ep));
8510 	ep = net_buf_add(buf, sizeof(*ep));
8511 
8512 	ep->handle = sys_cpu_to_le16(handle);
8513 }
8514 #endif /* CONFIG_BT_CTLR_LE_PING */
8515 
8516 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8517 static void le_chan_sel_algo(struct pdu_data *pdu_data, uint16_t handle,
8518 			     struct net_buf *buf)
8519 {
8520 	struct bt_hci_evt_le_chan_sel_algo *sep;
8521 	struct node_rx_cs *cs;
8522 
8523 	cs = (void *)pdu_data;
8524 
8525 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8526 	    !(le_event_mask & BT_EVT_MASK_LE_CHAN_SEL_ALGO)) {
8527 		LOG_DBG("handle: 0x%04x, CSA: %x.", handle, cs->csa);
8528 		return;
8529 	}
8530 
8531 	sep = meta_evt(buf, BT_HCI_EVT_LE_CHAN_SEL_ALGO, sizeof(*sep));
8532 
8533 	sep->handle = sys_cpu_to_le16(handle);
8534 	sep->chan_sel_algo = cs->csa;
8535 }
8536 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8537 
8538 #if defined(CONFIG_BT_CTLR_PHY)
8539 static void le_phy_upd_complete(struct pdu_data *pdu_data, uint16_t handle,
8540 				struct net_buf *buf)
8541 {
8542 	struct bt_hci_evt_le_phy_update_complete *sep;
8543 	struct node_rx_pu *pu;
8544 
8545 	pu = (void *)pdu_data;
8546 
8547 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8548 	    !(le_event_mask & BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE)) {
8549 		LOG_WRN("handle: 0x%04x, status: %x, tx: %x, rx: %x.", handle, pu->status,
8550 			find_lsb_set(pu->tx), find_lsb_set(pu->rx));
8551 		return;
8552 	}
8553 
8554 	sep = meta_evt(buf, BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE, sizeof(*sep));
8555 
8556 	sep->status = pu->status;
8557 	sep->handle = sys_cpu_to_le16(handle);
8558 	sep->tx_phy = find_lsb_set(pu->tx);
8559 	sep->rx_phy = find_lsb_set(pu->rx);
8560 }
8561 #endif /* CONFIG_BT_CTLR_PHY */
8562 
8563 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8564 static void le_req_peer_sca_complete(struct pdu_data *pdu, uint16_t handle,
8565 				struct net_buf *buf)
8566 {
8567 	struct bt_hci_evt_le_req_peer_sca_complete *sep;
8568 	struct node_rx_sca *scau;
8569 
8570 	scau = (void *)pdu;
8571 
8572 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8573 	    !(le_event_mask & BT_EVT_MASK_LE_REQ_PEER_SCA_COMPLETE)) {
8574 		LOG_WRN("handle: 0x%04x, status: %x, sca: %x.", handle,
8575 			scau->status,
8576 			scau->sca);
8577 		return;
8578 	}
8579 
8580 	sep = meta_evt(buf, BT_HCI_EVT_LE_REQ_PEER_SCA_COMPLETE, sizeof(*sep));
8581 
8582 	sep->status = scau->status;
8583 	sep->handle = sys_cpu_to_le16(handle);
8584 	sep->sca = scau->sca;
8585 }
8586 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8587 #endif /* CONFIG_BT_CONN */
8588 
8589 #if defined(CONFIG_BT_HCI_MESH_EXT)
8590 static void mesh_adv_cplt(struct pdu_data *pdu_data,
8591 			  struct node_rx_pdu *node_rx,
8592 			  struct net_buf *buf)
8593 {
8594 	struct bt_hci_evt_mesh_adv_complete *mep;
8595 
8596 	mep = mesh_evt(buf, BT_HCI_EVT_MESH_ADV_COMPLETE, sizeof(*mep));
8597 	mep->adv_slot = ((uint8_t *)pdu_data)[0];
8598 }
8599 #endif /* CONFIG_BT_HCI_MESH_EXT */
8600 
8601 /**
8602  * @brief Encode a control-PDU into an HCI buffer
8603  * @details Execution context: Host thread
8604  *
8605  * @param node_rx_pdu[in] RX node containing header and PDU
8606  * @param pdu_data[in]    PDU. Same as node_rx_pdu->pdu, but more convenient
8607  * @param net_buf[out]    Upwards-going HCI buffer to fill
8608  */
8609 static void encode_control(struct node_rx_pdu *node_rx,
8610 			   struct pdu_data *pdu_data, struct net_buf *buf)
8611 {
8612 	uint16_t handle;
8613 
8614 	handle = node_rx->hdr.handle;
8615 
8616 	switch (node_rx->hdr.type) {
8617 #if defined(CONFIG_BT_OBSERVER)
8618 	case NODE_RX_TYPE_REPORT:
8619 		le_advertising_report(pdu_data, node_rx, buf);
8620 		break;
8621 
8622 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8623 	case NODE_RX_TYPE_EXT_1M_REPORT:
8624 		le_adv_ext_1M_report(pdu_data, node_rx, buf);
8625 		break;
8626 
8627 	case NODE_RX_TYPE_EXT_2M_REPORT:
8628 		le_adv_ext_2M_report(pdu_data, node_rx, buf);
8629 		break;
8630 
8631 	case NODE_RX_TYPE_EXT_CODED_REPORT:
8632 		le_adv_ext_coded_report(pdu_data, node_rx, buf);
8633 		break;
8634 
8635 	case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
8636 		le_scan_timeout(pdu_data, node_rx, buf);
8637 		break;
8638 
8639 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
8640 	case NODE_RX_TYPE_SYNC:
8641 		le_per_adv_sync_established(pdu_data, node_rx, buf);
8642 		break;
8643 
8644 	case NODE_RX_TYPE_SYNC_REPORT:
8645 		le_per_adv_sync_report(pdu_data, node_rx, buf);
8646 		break;
8647 
8648 	case NODE_RX_TYPE_SYNC_LOST:
8649 		le_per_adv_sync_lost(pdu_data, node_rx, buf);
8650 		break;
8651 
8652 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
8653 	case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
8654 		le_per_adv_sync_transfer_received(pdu_data, node_rx, buf);
8655 		return;
8656 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
8657 
8658 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
8659 	case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
8660 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
8661 		vs_le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8662 #else
8663 		le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8664 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
8665 		break;
8666 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
8667 
8668 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8669 	case NODE_RX_TYPE_SYNC_ISO:
8670 		le_big_sync_established(pdu_data, node_rx, buf);
8671 		break;
8672 
8673 	case NODE_RX_TYPE_SYNC_ISO_LOST:
8674 		le_big_sync_lost(pdu_data, node_rx, buf);
8675 		break;
8676 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8677 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8678 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8679 #endif /* CONFIG_BT_OBSERVER */
8680 
8681 #if defined(CONFIG_BT_BROADCASTER)
8682 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8683 	case NODE_RX_TYPE_EXT_ADV_TERMINATE:
8684 		le_adv_ext_terminate(pdu_data, node_rx, buf);
8685 		break;
8686 
8687 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8688 	case NODE_RX_TYPE_BIG_COMPLETE:
8689 		le_big_complete(pdu_data, node_rx, buf);
8690 		break;
8691 	case NODE_RX_TYPE_BIG_TERMINATE:
8692 		le_big_terminate(pdu_data, node_rx, buf);
8693 		break;
8694 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8695 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8696 #endif /* CONFIG_BT_BROADCASTER */
8697 
8698 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8699 	case NODE_RX_TYPE_SCAN_REQ:
8700 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8701 		le_scan_req_received(pdu_data, node_rx, buf);
8702 #elif defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
8703 		le_vs_scan_req_received(pdu_data, node_rx, buf);
8704 #else
8705 		LL_ASSERT(0);
8706 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8707 		break;
8708 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8709 
8710 #if defined(CONFIG_BT_CONN)
8711 	case NODE_RX_TYPE_CONNECTION:
8712 		le_conn_complete(pdu_data, handle, buf);
8713 		break;
8714 
8715 	case NODE_RX_TYPE_TERMINATE:
8716 		hci_disconn_complete_encode(pdu_data, handle, buf);
8717 		break;
8718 
8719 	case NODE_RX_TYPE_CONN_UPDATE:
8720 		le_conn_update_complete(pdu_data, handle, buf);
8721 		break;
8722 
8723 #if defined(CONFIG_BT_CTLR_LE_ENC)
8724 	case NODE_RX_TYPE_ENC_REFRESH:
8725 		enc_refresh_complete(pdu_data, handle, buf);
8726 		break;
8727 #endif /* CONFIG_BT_CTLR_LE_ENC */
8728 
8729 #if defined(CONFIG_BT_CTLR_LE_PING)
8730 	case NODE_RX_TYPE_APTO:
8731 		auth_payload_timeout_exp(pdu_data, handle, buf);
8732 		break;
8733 #endif /* CONFIG_BT_CTLR_LE_PING */
8734 
8735 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8736 	case NODE_RX_TYPE_CHAN_SEL_ALGO:
8737 		le_chan_sel_algo(pdu_data, handle, buf);
8738 		break;
8739 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8740 
8741 #if defined(CONFIG_BT_CTLR_PHY)
8742 	case NODE_RX_TYPE_PHY_UPDATE:
8743 		le_phy_upd_complete(pdu_data, handle, buf);
8744 		return;
8745 #endif /* CONFIG_BT_CTLR_PHY */
8746 
8747 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
8748 	case NODE_RX_TYPE_RSSI:
8749 		LOG_INF("handle: 0x%04x, rssi: -%d dB.", handle, pdu_data->rssi);
8750 		return;
8751 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
8752 
8753 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
8754 	case NODE_RX_TYPE_CIS_REQUEST:
8755 		le_cis_request(pdu_data, node_rx, buf);
8756 		return;
8757 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
8758 
8759 #if defined(CONFIG_BT_CTLR_CONN_ISO)
8760 	case NODE_RX_TYPE_CIS_ESTABLISHED:
8761 		le_cis_established(pdu_data, node_rx, buf);
8762 		return;
8763 #endif /* CONFIG_BT_CTLR_CONN_ISO */
8764 
8765 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8766 	case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
8767 		le_req_peer_sca_complete(pdu_data, handle, buf);
8768 		return;
8769 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8770 
8771 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
8772 	case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
8773 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
8774 		vs_le_df_connection_iq_report(node_rx, buf);
8775 #else
8776 		le_df_connection_iq_report(node_rx, buf);
8777 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
8778 		return;
8779 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
8780 #endif /* CONFIG_BT_CONN */
8781 
8782 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
8783 	case NODE_RX_TYPE_ADV_INDICATION:
8784 		LOG_INF("Advertised.");
8785 		return;
8786 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
8787 
8788 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
8789 	case NODE_RX_TYPE_SCAN_INDICATION:
8790 		LOG_INF("Scanned.");
8791 		return;
8792 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
8793 
8794 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
8795 	case NODE_RX_TYPE_PROFILE:
8796 		LOG_INF("l: %u, %u, %u; t: %u, %u, %u; cpu: %u (%u), %u (%u), %u (%u), %u (%u).",
8797 			pdu_data->profile.lcur, pdu_data->profile.lmin, pdu_data->profile.lmax,
8798 			pdu_data->profile.cur, pdu_data->profile.min, pdu_data->profile.max,
8799 			pdu_data->profile.radio, pdu_data->profile.radio_ticks,
8800 			pdu_data->profile.lll, pdu_data->profile.lll_ticks,
8801 			pdu_data->profile.ull_high, pdu_data->profile.ull_high_ticks,
8802 			pdu_data->profile.ull_low, pdu_data->profile.ull_low_ticks);
8803 		return;
8804 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
8805 
8806 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
8807 	case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
8808 		le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8809 		return;
8810 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
8811 
8812 #if defined(CONFIG_BT_HCI_MESH_EXT)
8813 	case NODE_RX_TYPE_MESH_ADV_CPLT:
8814 		mesh_adv_cplt(pdu_data, node_rx, buf);
8815 		return;
8816 
8817 	case NODE_RX_TYPE_MESH_REPORT:
8818 		le_advertising_report(pdu_data, node_rx, buf);
8819 		return;
8820 #endif /* CONFIG_BT_HCI_MESH_EXT */
8821 
8822 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
8823 	case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
8824 		hci_user_ext_encode_control(node_rx, pdu_data, buf);
8825 		return;
8826 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
8827 
8828 	default:
8829 		LL_ASSERT(0);
8830 		return;
8831 	}
8832 }
8833 
8834 #if defined(CONFIG_BT_CTLR_LE_ENC)
8835 static void le_ltk_request(struct pdu_data *pdu_data, uint16_t handle,
8836 			   struct net_buf *buf)
8837 {
8838 	struct bt_hci_evt_le_ltk_request *sep;
8839 
8840 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8841 	    !(le_event_mask & BT_EVT_MASK_LE_LTK_REQUEST)) {
8842 		return;
8843 	}
8844 
8845 	sep = meta_evt(buf, BT_HCI_EVT_LE_LTK_REQUEST, sizeof(*sep));
8846 
8847 	sep->handle = sys_cpu_to_le16(handle);
8848 	memcpy(&sep->rand, pdu_data->llctrl.enc_req.rand, sizeof(uint64_t));
8849 	memcpy(&sep->ediv, pdu_data->llctrl.enc_req.ediv, sizeof(uint16_t));
8850 }
8851 
8852 static void encrypt_change(uint8_t err, uint16_t handle,
8853 			   struct net_buf *buf, bool encryption_on)
8854 {
8855 	struct bt_hci_evt_encrypt_change *ep;
8856 
8857 	if (!(event_mask & BT_EVT_MASK_ENCRYPT_CHANGE)) {
8858 		return;
8859 	}
8860 
8861 	hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_CHANGE, sizeof(*ep));
8862 	ep = net_buf_add(buf, sizeof(*ep));
8863 
8864 	ep->status = err ? err : (encryption_on ? err : BT_HCI_ERR_UNSPECIFIED);
8865 	ep->handle = sys_cpu_to_le16(handle);
8866 	ep->encrypt = encryption_on ? 1 : 0;
8867 }
8868 #endif /* CONFIG_BT_CTLR_LE_ENC */
8869 
8870 static void le_remote_feat_complete(uint8_t status, struct pdu_data *pdu_data,
8871 				    uint16_t handle, struct net_buf *buf)
8872 {
8873 	struct bt_hci_evt_le_remote_feat_complete *sep;
8874 
8875 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8876 	    !(le_event_mask & BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE)) {
8877 		return;
8878 	}
8879 
8880 	sep = meta_evt(buf, BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE, sizeof(*sep));
8881 
8882 	sep->status = status;
8883 	sep->handle = sys_cpu_to_le16(handle);
8884 	if (!status) {
8885 		memcpy(&sep->features[0],
8886 		       &pdu_data->llctrl.feature_rsp.features[0],
8887 		       sizeof(sep->features));
8888 	} else {
8889 		(void)memset(&sep->features[0], 0x00, sizeof(sep->features));
8890 	}
8891 }
8892 
8893 static void le_unknown_rsp(struct pdu_data *pdu_data, uint16_t handle,
8894 			   struct net_buf *buf)
8895 {
8896 
8897 	switch (pdu_data->llctrl.unknown_rsp.type) {
8898 	case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
8899 		le_remote_feat_complete(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE,
8900 					    NULL, handle, buf);
8901 		break;
8902 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8903 	case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8904 		le_df_cte_req_failed(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, handle, buf);
8905 		break;
8906 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8907 	default:
8908 		LOG_WRN("type: 0x%02x",	pdu_data->llctrl.unknown_rsp.type);
8909 		break;
8910 	}
8911 }
8912 
8913 static void le_reject_ext_ind(struct pdu_data *pdu, uint16_t handle, struct net_buf *buf)
8914 {
8915 	switch (pdu->llctrl.reject_ext_ind.reject_opcode) {
8916 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8917 	case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8918 		le_df_cte_req_failed(pdu->llctrl.reject_ext_ind.error_code, handle, buf);
8919 		break;
8920 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8921 	default:
8922 		LOG_WRN("reject opcode: 0x%02x", pdu->llctrl.reject_ext_ind.reject_opcode);
8923 		break;
8924 	}
8925 }
8926 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
8927 static void le_conn_param_req(struct pdu_data *pdu_data, uint16_t handle,
8928 			      struct net_buf *buf)
8929 {
8930 	struct bt_hci_evt_le_conn_param_req *sep;
8931 
8932 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8933 	    !(le_event_mask & BT_EVT_MASK_LE_CONN_PARAM_REQ)) {
8934 		/* event masked, reject the conn param req */
8935 		ll_conn_update(handle, 2, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, 0,
8936 			       0, 0, 0, NULL);
8937 
8938 		return;
8939 	}
8940 
8941 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_PARAM_REQ, sizeof(*sep));
8942 
8943 	sep->handle = sys_cpu_to_le16(handle);
8944 	sep->interval_min = pdu_data->llctrl.conn_param_req.interval_min;
8945 	sep->interval_max = pdu_data->llctrl.conn_param_req.interval_max;
8946 	sep->latency = pdu_data->llctrl.conn_param_req.latency;
8947 	sep->timeout = pdu_data->llctrl.conn_param_req.timeout;
8948 }
8949 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
8950 
8951 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
8952 static void le_data_len_change(struct pdu_data *pdu_data, uint16_t handle,
8953 			       struct net_buf *buf)
8954 {
8955 	struct bt_hci_evt_le_data_len_change *sep;
8956 
8957 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8958 	    !(le_event_mask & BT_EVT_MASK_LE_DATA_LEN_CHANGE)) {
8959 		return;
8960 	}
8961 
8962 	sep = meta_evt(buf, BT_HCI_EVT_LE_DATA_LEN_CHANGE, sizeof(*sep));
8963 
8964 	sep->handle = sys_cpu_to_le16(handle);
8965 	sep->max_tx_octets = pdu_data->llctrl.length_rsp.max_tx_octets;
8966 	sep->max_tx_time = pdu_data->llctrl.length_rsp.max_tx_time;
8967 	sep->max_rx_octets = pdu_data->llctrl.length_rsp.max_rx_octets;
8968 	sep->max_rx_time = pdu_data->llctrl.length_rsp.max_rx_time;
8969 }
8970 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
8971 
8972 #if defined(CONFIG_BT_REMOTE_VERSION)
8973 static void remote_version_info_encode(struct pdu_data *pdu_data,
8974 				       uint16_t handle, struct net_buf *buf)
8975 {
8976 	struct pdu_data_llctrl_version_ind *ver_ind;
8977 	struct bt_hci_evt_remote_version_info *ep;
8978 
8979 	if (!(event_mask & BT_EVT_MASK_REMOTE_VERSION_INFO)) {
8980 		return;
8981 	}
8982 
8983 	hci_evt_create(buf, BT_HCI_EVT_REMOTE_VERSION_INFO, sizeof(*ep));
8984 	ep = net_buf_add(buf, sizeof(*ep));
8985 
8986 	ver_ind = &pdu_data->llctrl.version_ind;
8987 	ep->status = 0x00;
8988 	ep->handle = sys_cpu_to_le16(handle);
8989 	ep->version = ver_ind->version_number;
8990 	ep->manufacturer = ver_ind->company_id;
8991 	ep->subversion = ver_ind->sub_version_number;
8992 }
8993 #endif /* CONFIG_BT_REMOTE_VERSION */
8994 
8995 static void encode_data_ctrl(struct node_rx_pdu *node_rx,
8996 			     struct pdu_data *pdu_data, struct net_buf *buf)
8997 {
8998 	uint16_t handle = node_rx->hdr.handle;
8999 
9000 	switch (pdu_data->llctrl.opcode) {
9001 
9002 #if defined(CONFIG_BT_CTLR_LE_ENC)
9003 	case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
9004 		le_ltk_request(pdu_data, handle, buf);
9005 		break;
9006 
9007 	case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
9008 		encrypt_change(0x00, handle, buf, true);
9009 		break;
9010 #endif /* CONFIG_BT_CTLR_LE_ENC */
9011 
9012 #if defined(CONFIG_BT_REMOTE_VERSION)
9013 	case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
9014 		remote_version_info_encode(pdu_data, handle, buf);
9015 		break;
9016 #endif /* defined(CONFIG_BT_REMOTE_VERSION) */
9017 
9018 	case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
9019 		le_remote_feat_complete(0x00, pdu_data, handle, buf);
9020 		break;
9021 
9022 #if defined(CONFIG_BT_CTLR_LE_ENC)
9023 	case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
9024 		encrypt_change(pdu_data->llctrl.reject_ind.error_code, handle,
9025 			       buf, false);
9026 		break;
9027 #endif /* CONFIG_BT_CTLR_LE_ENC */
9028 
9029 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
9030 	case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
9031 		le_conn_param_req(pdu_data, handle, buf);
9032 		break;
9033 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
9034 
9035 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
9036 	case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
9037 	case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
9038 		le_data_len_change(pdu_data, handle, buf);
9039 		break;
9040 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
9041 
9042 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
9043 	case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
9044 		le_df_cte_req_failed(BT_HCI_CTE_REQ_STATUS_RSP_WITHOUT_CTE, handle, buf);
9045 		break;
9046 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
9047 
9048 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
9049 		le_unknown_rsp(pdu_data, handle, buf);
9050 		break;
9051 
9052 	case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
9053 		le_reject_ext_ind(pdu_data, handle, buf);
9054 		break;
9055 
9056 	default:
9057 		LL_ASSERT(0);
9058 		return;
9059 	}
9060 }
9061 
9062 #if defined(CONFIG_BT_CONN)
9063 void hci_acl_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
9064 {
9065 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
9066 	struct bt_hci_acl_hdr *acl;
9067 	uint16_t handle_flags;
9068 	uint16_t handle;
9069 	uint8_t *data;
9070 
9071 	handle = node_rx->hdr.handle;
9072 
9073 	switch (pdu_data->ll_id) {
9074 	case PDU_DATA_LLID_DATA_CONTINUE:
9075 	case PDU_DATA_LLID_DATA_START:
9076 		acl = (void *)net_buf_add(buf, sizeof(*acl));
9077 		if (pdu_data->ll_id == PDU_DATA_LLID_DATA_START) {
9078 			handle_flags = bt_acl_handle_pack(handle, BT_ACL_START);
9079 		} else {
9080 			handle_flags = bt_acl_handle_pack(handle, BT_ACL_CONT);
9081 		}
9082 		acl->handle = sys_cpu_to_le16(handle_flags);
9083 		acl->len = sys_cpu_to_le16(pdu_data->len);
9084 		data = (void *)net_buf_add(buf, pdu_data->len);
9085 		memcpy(data, pdu_data->lldata, pdu_data->len);
9086 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
9087 		if (hci_hbuf_total > 0) {
9088 			LL_ASSERT((hci_hbuf_sent - hci_hbuf_acked) <
9089 				  hci_hbuf_total);
9090 			hci_hbuf_sent++;
9091 			/* Note: This requires linear handle values starting
9092 			 * from 0
9093 			 */
9094 			LL_ASSERT(handle < ARRAY_SIZE(hci_hbuf_pend));
9095 			hci_hbuf_pend[handle]++;
9096 		}
9097 #endif
9098 		break;
9099 
9100 	default:
9101 		LL_ASSERT(0);
9102 		break;
9103 	}
9104 }
9105 #endif /* CONFIG_BT_CONN */
9106 
9107 void hci_evt_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
9108 {
9109 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
9110 
9111 	if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
9112 		encode_control(node_rx, pdu_data, buf);
9113 	} else if (IS_ENABLED(CONFIG_BT_CONN)) {
9114 		encode_data_ctrl(node_rx, pdu_data, buf);
9115 	}
9116 }
9117 
9118 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
9119 	defined(CONFIG_BT_CTLR_CONN_ISO)
9120 void hci_num_cmplt_encode(struct net_buf *buf, uint16_t handle, uint8_t num)
9121 {
9122 	struct bt_hci_evt_num_completed_packets *ep;
9123 	struct bt_hci_handle_count *hc;
9124 	uint8_t num_handles;
9125 	uint8_t len;
9126 
9127 	num_handles = 1U;
9128 
9129 	len = (sizeof(*ep) + (sizeof(*hc) * num_handles));
9130 	hci_evt_create(buf, BT_HCI_EVT_NUM_COMPLETED_PACKETS, len);
9131 
9132 	ep = net_buf_add(buf, len);
9133 	ep->num_handles = num_handles;
9134 	hc = &ep->h[0];
9135 	hc->handle = sys_cpu_to_le16(handle);
9136 	hc->count = sys_cpu_to_le16(num);
9137 }
9138 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
9139 
9140 uint8_t hci_get_class(struct node_rx_pdu *node_rx)
9141 {
9142 #if defined(CONFIG_BT_CONN)
9143 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
9144 #endif
9145 
9146 	if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
9147 
9148 		switch (node_rx->hdr.type) {
9149 #if defined(CONFIG_BT_OBSERVER) || \
9150 	defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
9151 	defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
9152 	defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
9153 	defined(CONFIG_BT_CTLR_PROFILE_ISR)
9154 #if defined(CONFIG_BT_OBSERVER)
9155 		case NODE_RX_TYPE_REPORT:
9156 #endif /* CONFIG_BT_OBSERVER */
9157 
9158 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
9159 		case NODE_RX_TYPE_SCAN_REQ:
9160 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
9161 
9162 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
9163 		case NODE_RX_TYPE_ADV_INDICATION:
9164 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
9165 
9166 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
9167 		case NODE_RX_TYPE_SCAN_INDICATION:
9168 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
9169 
9170 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
9171 		case NODE_RX_TYPE_PROFILE:
9172 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
9173 			return HCI_CLASS_EVT_DISCARDABLE;
9174 #endif
9175 
9176 #if defined(CONFIG_BT_HCI_MESH_EXT)
9177 		case NODE_RX_TYPE_MESH_ADV_CPLT:
9178 		case NODE_RX_TYPE_MESH_REPORT:
9179 #endif /* CONFIG_BT_HCI_MESH_EXT */
9180 
9181 #if defined(CONFIG_BT_CTLR_ADV_EXT)
9182 #if defined(CONFIG_BT_BROADCASTER)
9183 		case NODE_RX_TYPE_EXT_ADV_TERMINATE:
9184 
9185 #if defined(CONFIG_BT_CTLR_ADV_ISO)
9186 		case NODE_RX_TYPE_BIG_COMPLETE:
9187 		case NODE_RX_TYPE_BIG_TERMINATE:
9188 #endif /* CONFIG_BT_CTLR_ADV_ISO */
9189 #endif /* CONFIG_BT_BROADCASTER */
9190 
9191 #if defined(CONFIG_BT_OBSERVER)
9192 		case NODE_RX_TYPE_EXT_1M_REPORT:
9193 		case NODE_RX_TYPE_EXT_2M_REPORT:
9194 		case NODE_RX_TYPE_EXT_CODED_REPORT:
9195 		case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
9196 
9197 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
9198 		case NODE_RX_TYPE_SYNC:
9199 		case NODE_RX_TYPE_SYNC_REPORT:
9200 		case NODE_RX_TYPE_SYNC_LOST:
9201 
9202 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
9203 		case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
9204 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
9205 
9206 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
9207 		case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
9208 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
9209 
9210 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
9211 		case NODE_RX_TYPE_SYNC_ISO:
9212 		case NODE_RX_TYPE_SYNC_ISO_LOST:
9213 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
9214 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
9215 #endif /* CONFIG_BT_OBSERVER */
9216 
9217 			return HCI_CLASS_EVT_REQUIRED;
9218 #endif /* CONFIG_BT_CTLR_ADV_EXT */
9219 
9220 #if defined(CONFIG_BT_CONN)
9221 		case NODE_RX_TYPE_CONNECTION:
9222 
9223 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
9224 		case NODE_RX_TYPE_CIS_REQUEST:
9225 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
9226 
9227 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
9228 		case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
9229 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
9230 
9231 #if defined(CONFIG_BT_CTLR_CONN_ISO)
9232 		case NODE_RX_TYPE_CIS_ESTABLISHED:
9233 #endif /* CONFIG_BT_CTLR_CONN_ISO */
9234 
9235 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
9236 		case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
9237 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
9238 
9239 			return HCI_CLASS_EVT_REQUIRED;
9240 
9241 		case NODE_RX_TYPE_TERMINATE:
9242 		case NODE_RX_TYPE_CONN_UPDATE:
9243 
9244 #if defined(CONFIG_BT_CTLR_LE_ENC)
9245 		case NODE_RX_TYPE_ENC_REFRESH:
9246 #endif /* CONFIG_BT_CTLR_LE_ENC */
9247 
9248 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
9249 		case NODE_RX_TYPE_RSSI:
9250 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
9251 
9252 #if defined(CONFIG_BT_CTLR_LE_PING)
9253 		case NODE_RX_TYPE_APTO:
9254 #endif /* CONFIG_BT_CTLR_LE_PING */
9255 
9256 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
9257 		case NODE_RX_TYPE_CHAN_SEL_ALGO:
9258 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
9259 
9260 #if defined(CONFIG_BT_CTLR_PHY)
9261 		case NODE_RX_TYPE_PHY_UPDATE:
9262 #endif /* CONFIG_BT_CTLR_PHY */
9263 
9264 			return HCI_CLASS_EVT_CONNECTION;
9265 #endif /* CONFIG_BT_CONN */
9266 
9267 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
9268 		case NODE_RX_TYPE_ISO_PDU:
9269 			return HCI_CLASS_ISO_DATA;
9270 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
9271 
9272 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
9273 		case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
9274 			return HCI_CLASS_EVT_REQUIRED;
9275 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
9276 
9277 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
9278 		case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
9279 			return hci_user_ext_get_class(node_rx);
9280 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
9281 
9282 		default:
9283 			return HCI_CLASS_NONE;
9284 		}
9285 
9286 #if defined(CONFIG_BT_CONN)
9287 	} else if (pdu_data->ll_id == PDU_DATA_LLID_CTRL) {
9288 		return HCI_CLASS_EVT_LLCP;
9289 	} else {
9290 		return HCI_CLASS_ACL_DATA;
9291 	}
9292 #else
9293 	} else {
9294 		return HCI_CLASS_NONE;
9295 	}
9296 #endif
9297 }
9298 
9299 void hci_init(struct k_poll_signal *signal_host_buf)
9300 {
9301 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
9302 	hbuf_signal = signal_host_buf;
9303 #endif
9304 	reset(NULL, NULL);
9305 }
9306