1 /*
2  * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <stddef.h>
9 #include <string.h>
10 
11 #include <zephyr/version.h>
12 #include <errno.h>
13 
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/atomic.h>
17 
18 #include <zephyr/drivers/bluetooth.h>
19 
20 #include <zephyr/bluetooth/hci_types.h>
21 #include <zephyr/bluetooth/hci_vs.h>
22 #include <zephyr/bluetooth/buf.h>
23 
24 #include "common/hci_common_internal.h"
25 
26 #include "util/util.h"
27 #include "util/memq.h"
28 #include "util/mem.h"
29 #include "util/dbuf.h"
30 
31 #include "hal/ecb.h"
32 #include "hal/ccm.h"
33 #include "hal/ticker.h"
34 
35 #include "ticker/ticker.h"
36 
37 #include "ll_sw/pdu_df.h"
38 #include "lll/pdu_vendor.h"
39 #include "ll_sw/pdu.h"
40 
41 #include "ll_sw/lll.h"
42 #include "lll/lll_adv_types.h"
43 #include "ll_sw/lll_adv.h"
44 #include "lll/lll_adv_pdu.h"
45 #include "ll_sw/lll_scan.h"
46 #include "lll/lll_df_types.h"
47 #include "ll_sw/lll_sync.h"
48 #include "ll_sw/lll_sync_iso.h"
49 #include "ll_sw/lll_conn.h"
50 #include "ll_sw/lll_conn_iso.h"
51 #include "ll_sw/lll_iso_tx.h"
52 
53 #include "ll_sw/isoal.h"
54 
55 #include "ll_sw/ull_tx_queue.h"
56 
57 #include "ll_sw/ull_adv_types.h"
58 #include "ll_sw/ull_scan_types.h"
59 #include "ll_sw/ull_sync_types.h"
60 #include "ll_sw/ull_conn_types.h"
61 #include "ll_sw/ull_iso_types.h"
62 #include "ll_sw/ull_conn_iso_types.h"
63 #include "ll_sw/ull_conn_iso_internal.h"
64 #include "ll_sw/ull_df_types.h"
65 #include "ll_sw/ull_internal.h"
66 
67 #include "ll_sw/ull_adv_internal.h"
68 #include "ll_sw/ull_sync_internal.h"
69 #include "ll_sw/ull_conn_internal.h"
70 #include "ll_sw/ull_sync_iso_internal.h"
71 #include "ll_sw/ull_iso_internal.h"
72 #include "ll_sw/ull_df_internal.h"
73 
74 #include "ll.h"
75 #include "ll_feat.h"
76 #include "ll_settings.h"
77 
78 #include "hci_internal.h"
79 #include "hci_vendor.h"
80 
81 #if defined(CONFIG_BT_HCI_MESH_EXT)
82 #include "ll_sw/ll_mesh.h"
83 #endif /* CONFIG_BT_HCI_MESH_EXT */
84 
85 #if defined(CONFIG_BT_CTLR_DTM_HCI)
86 #include "ll_sw/ll_test.h"
87 #endif /* CONFIG_BT_CTLR_DTM_HCI */
88 
89 #if defined(CONFIG_BT_CTLR_USER_EXT)
90 #include "hci_user_ext.h"
91 #endif /* CONFIG_BT_CTLR_USER_EXT */
92 
93 #include "common/bt_str.h"
94 #include "hal/debug.h"
95 
96 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
97 #include <zephyr/logging/log.h>
98 LOG_MODULE_REGISTER(bt_ctlr_hci);
99 
100 #define STR_NULL_TERMINATOR 0x00
101 
102 /* opcode of the HCI command currently being processed. The opcode is stored
103  * by hci_cmd_handle() and then used during the creation of cmd complete and
104  * cmd status events to avoid passing it up the call chain.
105  */
106 static uint16_t _opcode;
107 
108 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
109 /* NOTE: Duplicate filter uses two LS bits value of standard advertising modes:
110  *       0 - Non-Connectable Non-Scannable advertising report
111  *       1 - Connectable Non-Scannable advertising report
112  *       2 - Non-Connectable Scannable advertisig report
113  *       3 - Connectable Scannable advertising report
114  *
115  *       FIXME: Duplicate filtering of Connectable Directed low and high duty
116  *              cycle. If advertiser changes between Connectable Non-Scannable,
117  *              Connectable Directed low, and high duty cycle without changing
118  *              SID and DID, then such reports will be filtered out by the
119  *              implementation. Needs enhancement to current implementation.
120  *
121  *       Define a custom duplicate filter mode for periodic advertising:
122  *       4 - Periodic Advertising report
123  */
124 
125 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
126 #define DUP_EXT_ADV_MODE_MAX      5
127 #define DUP_EXT_ADV_MODE_PERIODIC BIT(2)
128 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
129 #define DUP_EXT_ADV_MODE_MAX      4
130 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
131 
132 #define DUP_EXT_ADV_MODE_COUNT    4
133 
134 /* Duplicate filter entries, one per Bluetooth address */
135 static struct dup_entry {
136 	bt_addr_le_t addr;
137 
138 	/* Mask to accumulate advertising PDU type as bitmask */
139 	uint8_t      mask;
140 
141 #if defined(CONFIG_BT_CTLR_ADV_EXT)
142 	struct dup_ext_adv_mode {
143 		uint16_t set_count:5;
144 		uint16_t set_curr:5;
145 		struct dup_ext_adv_set {
146 			uint8_t data_cmplt:1;
147 			struct pdu_adv_adi adi;
148 		} set[CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX];
149 	} adv_mode[DUP_EXT_ADV_MODE_MAX];
150 #endif
151 } dup_filter[CONFIG_BT_CTLR_DUP_FILTER_LEN];
152 
153 /* Duplicate filtering is disabled if count value is set to negative integer */
154 #define DUP_FILTER_DISABLED (-1)
155 
156 /* Duplicate filtering array entry count, filtering disabled if negative */
157 static int32_t dup_count;
158 /* Duplicate filtering current free entry, overwrites entries after rollover */
159 static uint32_t dup_curr;
160 
161 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
162 /* Helper function to reset non-periodic advertising entries in filter table */
163 static void dup_ext_adv_reset(void);
164 /* Flag for advertising reports be filtered for duplicates. */
165 static bool dup_scan;
166 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
167 /* Set constant true so that (dup_count >= 0) decides if advertising duplicate
168  * filter is enabled when Periodic Advertising ADI support is disabled.
169  */
170 static const bool dup_scan = true;
171 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
172 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
173 
174 #if defined(CONFIG_BT_HCI_MESH_EXT)
175 struct scan_filter {
176 	uint8_t count;
177 	uint8_t lengths[CONFIG_BT_CTLR_MESH_SF_PATTERNS];
178 	uint8_t patterns[CONFIG_BT_CTLR_MESH_SF_PATTERNS]
179 		     [BT_HCI_MESH_PATTERN_LEN_MAX];
180 };
181 
182 static struct scan_filter scan_filters[CONFIG_BT_CTLR_MESH_SCAN_FILTERS];
183 static uint8_t sf_curr;
184 #endif
185 
186 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
187 int32_t  hci_hbuf_total;
188 uint32_t hci_hbuf_sent;
189 uint32_t hci_hbuf_acked;
190 uint16_t hci_hbuf_pend[CONFIG_BT_MAX_CONN];
191 atomic_t hci_state_mask;
192 static struct k_poll_signal *hbuf_signal;
193 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
194 
195 #if defined(CONFIG_BT_CONN)
196 static uint32_t conn_count;
197 #endif
198 
199 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
200 static uint32_t cis_pending_count;
201 #endif
202 
203 /* In HCI event PHY indices start at 1 compare to 0 indexed in aux_ptr field in
204  * the Common Extended Payload Format in the PDUs.
205  */
206 #define HCI_AUX_PHY_TO_HCI_PHY(aux_phy) ((aux_phy) + 1)
207 
208 #define DEFAULT_EVENT_MASK           0x1fffffffffff
209 #define DEFAULT_EVENT_MASK_PAGE_2    0x0
210 #define DEFAULT_LE_EVENT_MASK 0x1f
211 
212 static uint64_t event_mask = DEFAULT_EVENT_MASK;
213 static uint64_t event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
214 static uint64_t le_event_mask = DEFAULT_LE_EVENT_MASK;
215 #if defined(CONFIG_BT_HCI_VS)
216 __maybe_unused static uint64_t vs_events_mask = DEFAULT_VS_EVT_MASK;
217 #endif /* CONFIG_BT_HCI_VS */
218 
219 static struct net_buf *cmd_complete_status(uint8_t status);
220 
221 #if defined(CONFIG_BT_CTLR_ADV_EXT)
222 #define BUF_GET_TIMEOUT K_SECONDS(10)
223 
224 #if defined(CONFIG_BT_HCI_RAW)
225 static uint8_t ll_adv_cmds;
226 
ll_adv_cmds_set(uint8_t adv_cmds)227 __weak int ll_adv_cmds_set(uint8_t adv_cmds)
228 {
229 	if (!ll_adv_cmds) {
230 		ll_adv_cmds = adv_cmds;
231 	}
232 
233 	if (ll_adv_cmds != adv_cmds) {
234 		return -EINVAL;
235 	}
236 
237 	return 0;
238 }
239 
ll_adv_cmds_is_ext(void)240 __weak int ll_adv_cmds_is_ext(void)
241 {
242 	return ll_adv_cmds == LL_ADV_CMDS_EXT;
243 }
244 
245 #else /* !CONFIG_BT_HCI_RAW */
ll_adv_cmds_is_ext(void)246 __weak int ll_adv_cmds_is_ext(void)
247 {
248 	return 1;
249 }
250 #endif /* !CONFIG_BT_HCI_RAW */
251 
adv_cmds_legacy_check(struct net_buf ** cc_evt)252 static int adv_cmds_legacy_check(struct net_buf **cc_evt)
253 {
254 	int err;
255 
256 #if defined(CONFIG_BT_HCI_RAW)
257 	err = ll_adv_cmds_set(LL_ADV_CMDS_LEGACY);
258 	if (err && cc_evt) {
259 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
260 	}
261 #else
262 	if (cc_evt) {
263 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
264 	}
265 
266 	err = -EINVAL;
267 #endif /* CONFIG_BT_HCI_RAW */
268 
269 	return err;
270 }
271 
adv_cmds_ext_check(struct net_buf ** cc_evt)272 static int adv_cmds_ext_check(struct net_buf **cc_evt)
273 {
274 	int err;
275 
276 #if defined(CONFIG_BT_HCI_RAW)
277 	err = ll_adv_cmds_set(LL_ADV_CMDS_EXT);
278 	if (err && cc_evt) {
279 		*cc_evt = cmd_complete_status(BT_HCI_ERR_CMD_DISALLOWED);
280 	}
281 #else
282 	err = 0;
283 #endif /* CONFIG_BT_HCI_RAW */
284 
285 	return err;
286 }
287 #else
adv_cmds_legacy_check(struct net_buf ** cc_evt)288 static inline int adv_cmds_legacy_check(struct net_buf **cc_evt)
289 {
290 	return 0;
291 }
292 #endif /* CONFIG_BT_CTLR_ADV_EXT */
293 
294 #if defined(CONFIG_BT_CONN)
295 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
296 			     struct net_buf *buf);
297 #endif /* CONFIG_BT_CONN */
298 
hci_evt_create(struct net_buf * buf,uint8_t evt,uint8_t len)299 static void hci_evt_create(struct net_buf *buf, uint8_t evt, uint8_t len)
300 {
301 	struct bt_hci_evt_hdr *hdr;
302 
303 	hdr = net_buf_add(buf, sizeof(*hdr));
304 	hdr->evt = evt;
305 	hdr->len = len;
306 }
307 
hci_cmd_complete(struct net_buf ** buf,uint8_t plen)308 void *hci_cmd_complete(struct net_buf **buf, uint8_t plen)
309 {
310 	*buf = bt_hci_cmd_complete_create(_opcode, plen);
311 
312 	return net_buf_add(*buf, plen);
313 }
314 
cmd_status(uint8_t status)315 static struct net_buf *cmd_status(uint8_t status)
316 {
317 	return bt_hci_cmd_status_create(_opcode, status);
318 }
319 
cmd_complete_status(uint8_t status)320 static struct net_buf *cmd_complete_status(uint8_t status)
321 {
322 	struct net_buf *buf;
323 	struct bt_hci_evt_cc_status *ccst;
324 
325 	buf = bt_hci_cmd_complete_create(_opcode, sizeof(*ccst));
326 	ccst = net_buf_add(buf, sizeof(*ccst));
327 	ccst->status = status;
328 
329 	return buf;
330 }
331 
meta_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)332 static void *meta_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
333 {
334 	struct bt_hci_evt_le_meta_event *me;
335 
336 	hci_evt_create(buf, BT_HCI_EVT_LE_META_EVENT, sizeof(*me) + melen);
337 	me = net_buf_add(buf, sizeof(*me));
338 	me->subevent = subevt;
339 
340 	return net_buf_add(buf, melen);
341 }
342 
343 #if defined(CONFIG_BT_HCI_VS)
vs_event(struct net_buf * buf,uint8_t subevt,uint8_t evt_len)344 __maybe_unused static void *vs_event(struct net_buf *buf, uint8_t subevt, uint8_t evt_len)
345 {
346 	struct bt_hci_evt_vs *evt;
347 
348 	hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*evt) + evt_len);
349 	evt = net_buf_add(buf, sizeof(*evt));
350 	evt->subevent = subevt;
351 
352 	return net_buf_add(buf, evt_len);
353 }
354 #endif /* CONFIG_BT_HCI_VS */
355 
356 #if defined(CONFIG_BT_HCI_MESH_EXT)
mesh_evt(struct net_buf * buf,uint8_t subevt,uint8_t melen)357 static void *mesh_evt(struct net_buf *buf, uint8_t subevt, uint8_t melen)
358 {
359 	struct bt_hci_evt_mesh *me;
360 
361 	hci_evt_create(buf, BT_HCI_EVT_VENDOR, sizeof(*me) + melen);
362 	me = net_buf_add(buf, sizeof(*me));
363 	me->prefix = BT_HCI_MESH_EVT_PREFIX;
364 	me->subevent = subevt;
365 
366 	return net_buf_add(buf, melen);
367 }
368 #endif /* CONFIG_BT_HCI_MESH_EXT */
369 
370 #if defined(CONFIG_BT_CONN)
disconnect(struct net_buf * buf,struct net_buf ** evt)371 static void disconnect(struct net_buf *buf, struct net_buf **evt)
372 {
373 	struct bt_hci_cp_disconnect *cmd = (void *)buf->data;
374 	uint16_t handle;
375 	uint8_t status;
376 
377 	handle = sys_le16_to_cpu(cmd->handle);
378 	status = ll_terminate_ind_send(handle, cmd->reason);
379 
380 	*evt = cmd_status(status);
381 }
382 
read_remote_ver_info(struct net_buf * buf,struct net_buf ** evt)383 static void read_remote_ver_info(struct net_buf *buf, struct net_buf **evt)
384 {
385 	struct bt_hci_cp_read_remote_version_info *cmd = (void *)buf->data;
386 	uint16_t handle;
387 	uint8_t status;
388 
389 	handle = sys_le16_to_cpu(cmd->handle);
390 	status = ll_version_ind_send(handle);
391 
392 	*evt = cmd_status(status);
393 }
394 #endif /* CONFIG_BT_CONN */
395 
link_control_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)396 static int link_control_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
397 				   struct net_buf **evt)
398 {
399 	switch (ocf) {
400 #if defined(CONFIG_BT_CONN)
401 	case BT_OCF(BT_HCI_OP_DISCONNECT):
402 		disconnect(cmd, evt);
403 		break;
404 	case BT_OCF(BT_HCI_OP_READ_REMOTE_VERSION_INFO):
405 		read_remote_ver_info(cmd, evt);
406 		break;
407 #endif /* CONFIG_BT_CONN */
408 	default:
409 		return -EINVAL;
410 	}
411 
412 	return 0;
413 }
414 
set_event_mask(struct net_buf * buf,struct net_buf ** evt)415 static void set_event_mask(struct net_buf *buf, struct net_buf **evt)
416 {
417 	struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
418 
419 	event_mask = sys_get_le64(cmd->events);
420 
421 	*evt = cmd_complete_status(0x00);
422 }
423 
set_event_mask_page_2(struct net_buf * buf,struct net_buf ** evt)424 static void set_event_mask_page_2(struct net_buf *buf, struct net_buf **evt)
425 {
426 	struct bt_hci_cp_set_event_mask_page_2 *cmd = (void *)buf->data;
427 
428 	event_mask_page_2 = sys_get_le64(cmd->events_page_2);
429 
430 	*evt = cmd_complete_status(0x00);
431 }
432 
reset(struct net_buf * buf,struct net_buf ** evt)433 static void reset(struct net_buf *buf, struct net_buf **evt)
434 {
435 #if defined(CONFIG_BT_HCI_MESH_EXT)
436 	int i;
437 
438 	for (i = 0; i < ARRAY_SIZE(scan_filters); i++) {
439 		scan_filters[i].count = 0U;
440 	}
441 	sf_curr = 0xFF;
442 #endif
443 
444 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
445 	dup_count = DUP_FILTER_DISABLED;
446 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
447 	dup_scan = false;
448 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
449 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
450 
451 	/* reset event masks */
452 	event_mask = DEFAULT_EVENT_MASK;
453 	event_mask_page_2 = DEFAULT_EVENT_MASK_PAGE_2;
454 	le_event_mask = DEFAULT_LE_EVENT_MASK;
455 
456 	if (buf) {
457 		ll_reset();
458 		*evt = cmd_complete_status(0x00);
459 	}
460 
461 #if defined(CONFIG_BT_CONN)
462 	conn_count = 0U;
463 #endif
464 
465 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
466 	cis_pending_count = 0U;
467 #endif
468 
469 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
470 	hci_hbuf_total = 0;
471 	hci_hbuf_sent = 0U;
472 	hci_hbuf_acked = 0U;
473 	(void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
474 	if (buf) {
475 		atomic_set_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
476 		k_poll_signal_raise(hbuf_signal, 0x0);
477 	}
478 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
479 
480 	hci_recv_fifo_reset();
481 }
482 
483 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_ctl_to_host_flow(struct net_buf * buf,struct net_buf ** evt)484 static void set_ctl_to_host_flow(struct net_buf *buf, struct net_buf **evt)
485 {
486 	struct bt_hci_cp_set_ctl_to_host_flow *cmd = (void *)buf->data;
487 	uint8_t flow_enable = cmd->flow_enable;
488 	struct bt_hci_evt_cc_status *ccst;
489 
490 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
491 
492 	/* require host buffer size before enabling flow control, and
493 	 * disallow if any connections are up
494 	 */
495 	if (!hci_hbuf_total || conn_count) {
496 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
497 		return;
498 	} else {
499 		ccst->status = 0x00;
500 	}
501 
502 	switch (flow_enable) {
503 	case BT_HCI_CTL_TO_HOST_FLOW_DISABLE:
504 		if (hci_hbuf_total < 0) {
505 			/* already disabled */
506 			return;
507 		}
508 		break;
509 	case BT_HCI_CTL_TO_HOST_FLOW_ENABLE:
510 		if (hci_hbuf_total > 0) {
511 			/* already enabled */
512 			return;
513 		}
514 		break;
515 	default:
516 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
517 		return;
518 	}
519 
520 	hci_hbuf_sent = 0U;
521 	hci_hbuf_acked = 0U;
522 	(void)memset(hci_hbuf_pend, 0, sizeof(hci_hbuf_pend));
523 	hci_hbuf_total = -hci_hbuf_total;
524 }
525 
526 /* Host Number of Completed Packets command does not follow normal flow
527  * control of HCI commands and the Controller side HCI drivers that
528  * allocates HCI command buffers with K_NO_WAIT can end up running out
529  * of command buffers.
530  *
531  * Host will generate up to acl_pkts number of Host Number of Completed
532  * Packets command plus a number of normal HCI commands.
533  *
534  * Normal HCI commands follow the HCI command flow control using
535  * Num_HCI_Command_Packets return in HCI command complete and status.
536  *
537  * Note: Zephyr Controller does not support Num_HCI_Command_Packets > 1.
538  */
539 BUILD_ASSERT(BT_BUF_HCI_ACL_RX_COUNT < BT_BUF_CMD_TX_COUNT,
540 	     "Too low HCI command buffers compare to ACL Rx buffers.");
541 
host_buffer_size(struct net_buf * buf,struct net_buf ** evt)542 static void host_buffer_size(struct net_buf *buf, struct net_buf **evt)
543 {
544 	struct bt_hci_cp_host_buffer_size *cmd = (void *)buf->data;
545 	uint16_t acl_pkts = sys_le16_to_cpu(cmd->acl_pkts);
546 	uint16_t acl_mtu = sys_le16_to_cpu(cmd->acl_mtu);
547 	struct bt_hci_evt_cc_status *ccst;
548 
549 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
550 
551 	if (hci_hbuf_total) {
552 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
553 		return;
554 	}
555 
556 	/* Fragmentation from Controller to Host not supported, require
557 	 * ACL MTU to be at least the LL MTU
558 	 */
559 	if (acl_mtu < LL_LENGTH_OCTETS_RX_MAX) {
560 		LOG_ERR("FC: Require Host ACL MTU (%u) >= LL Max Data Length (%u)", acl_mtu,
561 			LL_LENGTH_OCTETS_RX_MAX);
562 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
563 		return;
564 	}
565 
566 	/* Host Number of Completed Packets command does not follow normal flow
567 	 * control of HCI commands and the Controller side HCI drivers that
568 	 * allocates HCI command buffers with K_NO_WAIT can end up running out
569 	 * of command buffers.
570 	 *
571 	 * Host will generate up to acl_pkts number of Host Number of Completed
572 	 * Packets command plus a number of normal HCI commands.
573 	 *
574 	 * Normal HCI commands follow the HCI command flow control using
575 	 * Num_HCI_Command_Packets return in HCI command complete and status.
576 	 *
577 	 * Note: Zephyr Controller does not support Num_HCI_Command_Packets > 1.
578 	 */
579 	if (acl_pkts >= BT_BUF_CMD_TX_COUNT) {
580 		LOG_WRN("FC: Host ACL packets (%u), BT_BUF_CMD_TX_COUNT (%u)", acl_pkts,
581 			BT_BUF_CMD_TX_COUNT);
582 		acl_pkts = BT_BUF_CMD_TX_COUNT - CONFIG_BT_CTLR_HCI_NUM_CMD_PKT_MAX;
583 	}
584 
585 	LOG_DBG("FC: host buf size %u count %u", acl_mtu, acl_pkts);
586 
587 	hci_hbuf_total = -acl_pkts;
588 }
589 
host_num_completed_packets(struct net_buf * buf,struct net_buf ** evt)590 static void host_num_completed_packets(struct net_buf *buf,
591 				       struct net_buf **evt)
592 {
593 	struct bt_hci_cp_host_num_completed_packets *cmd = (void *)buf->data;
594 	struct bt_hci_evt_cc_status *ccst;
595 	uint32_t count = 0U;
596 
597 	/* special case, no event returned except for error conditions */
598 	if (hci_hbuf_total <= 0) {
599 		ccst = hci_cmd_complete(evt, sizeof(*ccst));
600 		ccst->status = BT_HCI_ERR_CMD_DISALLOWED;
601 		return;
602 	} else if (!conn_count) {
603 		ccst = hci_cmd_complete(evt, sizeof(*ccst));
604 		ccst->status = BT_HCI_ERR_INVALID_PARAM;
605 		return;
606 	}
607 
608 	/* leave *evt == NULL so no event is generated */
609 	for (uint8_t i = 0; i < cmd->num_handles; i++) {
610 		uint16_t h = sys_le16_to_cpu(cmd->h[i].handle);
611 		uint16_t c = sys_le16_to_cpu(cmd->h[i].count);
612 
613 		if ((h >= ARRAY_SIZE(hci_hbuf_pend)) ||
614 		    (c > hci_hbuf_pend[h])) {
615 			ccst = hci_cmd_complete(evt, sizeof(*ccst));
616 			ccst->status = BT_HCI_ERR_INVALID_PARAM;
617 			return;
618 		}
619 
620 		hci_hbuf_pend[h] -= c;
621 		count += c;
622 	}
623 
624 	LOG_DBG("FC: acked: %d", count);
625 	hci_hbuf_acked += count;
626 	k_poll_signal_raise(hbuf_signal, 0x0);
627 }
628 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
629 
630 #if defined(CONFIG_BT_CTLR_LE_PING)
read_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)631 static void read_auth_payload_timeout(struct net_buf *buf, struct net_buf **evt)
632 {
633 	struct bt_hci_cp_read_auth_payload_timeout *cmd = (void *)buf->data;
634 	struct bt_hci_rp_read_auth_payload_timeout *rp;
635 	uint16_t auth_payload_timeout;
636 	uint16_t handle;
637 	uint8_t status;
638 
639 	handle = sys_le16_to_cpu(cmd->handle);
640 
641 	status = ll_apto_get(handle, &auth_payload_timeout);
642 
643 	rp = hci_cmd_complete(evt, sizeof(*rp));
644 	rp->status = status;
645 	rp->handle = sys_cpu_to_le16(handle);
646 	rp->auth_payload_timeout = sys_cpu_to_le16(auth_payload_timeout);
647 }
648 
write_auth_payload_timeout(struct net_buf * buf,struct net_buf ** evt)649 static void write_auth_payload_timeout(struct net_buf *buf,
650 				       struct net_buf **evt)
651 {
652 	struct bt_hci_cp_write_auth_payload_timeout *cmd = (void *)buf->data;
653 	struct bt_hci_rp_write_auth_payload_timeout *rp;
654 	uint16_t auth_payload_timeout;
655 	uint16_t handle;
656 	uint8_t status;
657 
658 	handle = sys_le16_to_cpu(cmd->handle);
659 	auth_payload_timeout = sys_le16_to_cpu(cmd->auth_payload_timeout);
660 
661 	status = ll_apto_set(handle, auth_payload_timeout);
662 
663 	rp = hci_cmd_complete(evt, sizeof(*rp));
664 	rp->status = status;
665 	rp->handle = sys_cpu_to_le16(handle);
666 }
667 #endif /* CONFIG_BT_CTLR_LE_PING */
668 
669 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
configure_data_path(struct net_buf * buf,struct net_buf ** evt)670 static void configure_data_path(struct net_buf *buf,
671 				struct net_buf **evt)
672 {
673 	struct bt_hci_cp_configure_data_path *cmd = (void *)buf->data;
674 	struct bt_hci_rp_configure_data_path *rp;
675 
676 	uint8_t *vs_config;
677 	uint8_t status;
678 
679 	vs_config = &cmd->vs_config[0];
680 
681 	if (IS_ENABLED(CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH)) {
682 		status = ll_configure_data_path(cmd->data_path_dir,
683 						cmd->data_path_id,
684 						cmd->vs_config_len,
685 						vs_config);
686 	} else {
687 		status = BT_HCI_ERR_INVALID_PARAM;
688 	}
689 
690 	rp = hci_cmd_complete(evt, sizeof(*rp));
691 	rp->status = status;
692 }
693 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
694 
695 #if defined(CONFIG_BT_CTLR_CONN_ISO)
read_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)696 static void read_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
697 {
698 	struct bt_hci_rp_read_conn_accept_timeout *rp;
699 	uint16_t timeout;
700 
701 	ARG_UNUSED(buf);
702 
703 	rp = hci_cmd_complete(evt, sizeof(*rp));
704 
705 	rp->status = ll_conn_iso_accept_timeout_get(&timeout);
706 	rp->conn_accept_timeout = sys_cpu_to_le16(timeout);
707 }
708 
write_conn_accept_timeout(struct net_buf * buf,struct net_buf ** evt)709 static void write_conn_accept_timeout(struct net_buf *buf, struct net_buf **evt)
710 {
711 	struct bt_hci_cp_write_conn_accept_timeout *cmd = (void *)buf->data;
712 	struct bt_hci_rp_write_conn_accept_timeout *rp;
713 	uint16_t timeout;
714 
715 	timeout = sys_le16_to_cpu(cmd->conn_accept_timeout);
716 
717 	rp = hci_cmd_complete(evt, sizeof(*rp));
718 
719 	rp->status = ll_conn_iso_accept_timeout_set(timeout);
720 }
721 #endif /* CONFIG_BT_CTLR_CONN_ISO */
722 
723 #if defined(CONFIG_BT_CONN)
read_tx_power_level(struct net_buf * buf,struct net_buf ** evt)724 static void read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
725 {
726 	struct bt_hci_cp_read_tx_power_level *cmd = (void *)buf->data;
727 	struct bt_hci_rp_read_tx_power_level *rp;
728 	uint16_t handle;
729 	uint8_t status;
730 	uint8_t type;
731 
732 	handle = sys_le16_to_cpu(cmd->handle);
733 	type = cmd->type;
734 
735 	rp = hci_cmd_complete(evt, sizeof(*rp));
736 
737 	status = ll_tx_pwr_lvl_get(BT_HCI_VS_LL_HANDLE_TYPE_CONN,
738 				   handle, type, &rp->tx_power_level);
739 
740 	rp->status = status;
741 	rp->handle = sys_cpu_to_le16(handle);
742 }
743 #endif /* CONFIG_BT_CONN */
744 
ctrl_bb_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)745 static int ctrl_bb_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
746 			      struct net_buf **evt)
747 {
748 	switch (ocf) {
749 	case BT_OCF(BT_HCI_OP_SET_EVENT_MASK):
750 		set_event_mask(cmd, evt);
751 		break;
752 
753 	case BT_OCF(BT_HCI_OP_RESET):
754 		reset(cmd, evt);
755 		break;
756 
757 	case BT_OCF(BT_HCI_OP_SET_EVENT_MASK_PAGE_2):
758 		set_event_mask_page_2(cmd, evt);
759 		break;
760 
761 #if defined(CONFIG_BT_CTLR_CONN_ISO)
762 	case BT_OCF(BT_HCI_OP_READ_CONN_ACCEPT_TIMEOUT):
763 		read_conn_accept_timeout(cmd, evt);
764 		break;
765 
766 	case BT_OCF(BT_HCI_OP_WRITE_CONN_ACCEPT_TIMEOUT):
767 		write_conn_accept_timeout(cmd, evt);
768 		break;
769 #endif /* CONFIG_BT_CTLR_CONN_ISO */
770 
771 #if defined(CONFIG_BT_CONN)
772 	case BT_OCF(BT_HCI_OP_READ_TX_POWER_LEVEL):
773 		read_tx_power_level(cmd, evt);
774 		break;
775 #endif /* CONFIG_BT_CONN */
776 
777 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
778 	case BT_OCF(BT_HCI_OP_SET_CTL_TO_HOST_FLOW):
779 		set_ctl_to_host_flow(cmd, evt);
780 		break;
781 
782 	case BT_OCF(BT_HCI_OP_HOST_BUFFER_SIZE):
783 		host_buffer_size(cmd, evt);
784 		break;
785 
786 	case BT_OCF(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS):
787 		host_num_completed_packets(cmd, evt);
788 		break;
789 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
790 
791 #if defined(CONFIG_BT_CTLR_LE_PING)
792 	case BT_OCF(BT_HCI_OP_READ_AUTH_PAYLOAD_TIMEOUT):
793 		read_auth_payload_timeout(cmd, evt);
794 		break;
795 
796 	case BT_OCF(BT_HCI_OP_WRITE_AUTH_PAYLOAD_TIMEOUT):
797 		write_auth_payload_timeout(cmd, evt);
798 		break;
799 #endif /* CONFIG_BT_CTLR_LE_PING */
800 
801 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
802 	case BT_OCF(BT_HCI_OP_CONFIGURE_DATA_PATH):
803 		configure_data_path(cmd, evt);
804 		break;
805 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
806 
807 	default:
808 		return -EINVAL;
809 	}
810 
811 	return 0;
812 }
813 
read_local_version_info(struct net_buf * buf,struct net_buf ** evt)814 static void read_local_version_info(struct net_buf *buf, struct net_buf **evt)
815 {
816 	struct bt_hci_rp_read_local_version_info *rp;
817 
818 	rp = hci_cmd_complete(evt, sizeof(*rp));
819 
820 	rp->status = 0x00;
821 	rp->hci_version = LL_VERSION_NUMBER;
822 	rp->hci_revision = sys_cpu_to_le16(0);
823 	rp->lmp_version = LL_VERSION_NUMBER;
824 	rp->manufacturer = sys_cpu_to_le16(ll_settings_company_id());
825 	rp->lmp_subversion = sys_cpu_to_le16(ll_settings_subversion_number());
826 }
827 
read_supported_commands(struct net_buf * buf,struct net_buf ** evt)828 static void read_supported_commands(struct net_buf *buf, struct net_buf **evt)
829 {
830 	struct bt_hci_rp_read_supported_commands *rp;
831 
832 	rp = hci_cmd_complete(evt, sizeof(*rp));
833 
834 	rp->status = 0x00;
835 	(void)memset(&rp->commands[0], 0, sizeof(rp->commands));
836 
837 #if defined(CONFIG_BT_REMOTE_VERSION)
838 	/* Read Remote Version Info. */
839 	rp->commands[2] |= BIT(7);
840 #endif
841 	/* Set Event Mask, and Reset. */
842 	rp->commands[5] |= BIT(6) | BIT(7);
843 
844 #if defined(CONFIG_BT_CTLR_CONN_ISO)
845 	/* Read/Write Connection Accept Timeout */
846 	rp->commands[7] |= BIT(2) | BIT(3);
847 #endif /* CONFIG_BT_CTLR_CONN_ISO */
848 
849 	/* Read TX Power Level. */
850 	rp->commands[10] |= BIT(2);
851 
852 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
853 	/* Set FC, Host Buffer Size and Host Num Completed */
854 	rp->commands[10] |= BIT(5) | BIT(6) | BIT(7);
855 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
856 
857 	/* Read Local Version Info, Read Local Supported Features. */
858 	rp->commands[14] |= BIT(3) | BIT(5);
859 	/* Read BD ADDR. */
860 	rp->commands[15] |= BIT(1);
861 
862 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
863 	/* Read RSSI. */
864 	rp->commands[15] |= BIT(5);
865 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
866 
867 	/* Set Event Mask Page 2 */
868 	rp->commands[22] |= BIT(2);
869 	/* LE Set Event Mask, LE Read Buffer Size, LE Read Local Supp Feats,
870 	 * Set Random Addr
871 	 */
872 	rp->commands[25] |= BIT(0) | BIT(1) | BIT(2) | BIT(4);
873 
874 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
875 	/* LE Read FAL Size, LE Clear FAL */
876 	rp->commands[26] |= BIT(6) | BIT(7);
877 	/* LE Add Dev to FAL, LE Remove Dev from FAL */
878 	rp->commands[27] |= BIT(0) | BIT(1);
879 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
880 
881 	/* LE Encrypt, LE Rand */
882 	rp->commands[27] |= BIT(6) | BIT(7);
883 	/* LE Read Supported States */
884 	rp->commands[28] |= BIT(3);
885 
886 #if defined(CONFIG_BT_BROADCASTER)
887 	/* LE Set Adv Params, LE Read Adv Channel TX Power, LE Set Adv Data */
888 	rp->commands[25] |= BIT(5) | BIT(6) | BIT(7);
889 	/* LE Set Scan Response Data, LE Set Adv Enable */
890 	rp->commands[26] |= BIT(0) | BIT(1);
891 
892 #if defined(CONFIG_BT_CTLR_ADV_EXT)
893 	/* LE Set Adv Set Random Addr, LE Set Ext Adv Params, LE Set Ext Adv
894 	 * Data, LE Set Ext Adv Scan Rsp Data, LE Set Ext Adv Enable, LE Read
895 	 * Max Adv Data Len, LE Read Num Supp Adv Sets
896 	 */
897 	rp->commands[36] |= BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
898 			    BIT(6) | BIT(7);
899 	/* LE Remove Adv Set, LE Clear Adv Sets */
900 	rp->commands[37] |= BIT(0) | BIT(1);
901 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
902 	/* LE Set PA Params, LE Set PA Data, LE Set PA Enable */
903 	rp->commands[37] |= BIT(2) | BIT(3) | BIT(4);
904 #if defined(CONFIG_BT_CTLR_ADV_ISO)
905 	/* LE Create BIG, LE Create BIG Test, LE Terminate BIG */
906 	rp->commands[42] |= BIT(5) | BIT(6) | BIT(7);
907 #endif /* CONFIG_BT_CTLR_ADV_ISO */
908 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
909 #endif /* CONFIG_BT_CTLR_ADV_EXT */
910 #endif /* CONFIG_BT_BROADCASTER */
911 
912 #if defined(CONFIG_BT_OBSERVER)
913 	/* LE Set Scan Params, LE Set Scan Enable */
914 	rp->commands[26] |= BIT(2) | BIT(3);
915 
916 #if defined(CONFIG_BT_CTLR_ADV_EXT)
917 	/* LE Set Extended Scan Params, LE Set Extended Scan Enable */
918 	rp->commands[37] |= BIT(5) | BIT(6);
919 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
920 	/* LE PA Create Sync, LE PA Create Sync Cancel, LE PA Terminate Sync */
921 	rp->commands[38] |= BIT(0) | BIT(1) | BIT(2);
922 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
923 	/* LE PA Add Device to Periodic Advertiser List,
924 	 * LE PA Remove Device from Periodic Advertiser List,
925 	 * LE Clear Periodic Advertiser List,
926 	 * LE Read Periodic Adveritiser List Size
927 	 */
928 	rp->commands[38] |= BIT(3) | BIT(4) | BIT(5) | BIT(6);
929 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
930 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
931 	/* LE Set PA Receive Enable */
932 	rp->commands[40] |= BIT(5);
933 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
934 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
935 	/* LE BIG Create Sync, LE BIG Terminate Sync */
936 	rp->commands[43] |= BIT(0) | BIT(1);
937 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
938 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
939 #endif /* CONFIG_BT_CTLR_ADV_EXT */
940 
941 #endif /* CONFIG_BT_OBSERVER */
942 
943 #if defined(CONFIG_BT_CONN)
944 #if defined(CONFIG_BT_CENTRAL)
945 	/* LE Create Connection, LE Create Connection Cancel */
946 	rp->commands[26] |= BIT(4) | BIT(5);
947 	/* Set Host Channel Classification */
948 	rp->commands[27] |= BIT(3);
949 
950 #if defined(CONFIG_BT_CTLR_ADV_EXT)
951 	/* LE Extended Create Connection */
952 	rp->commands[37] |= BIT(7);
953 #endif /* CONFIG_BT_CTLR_ADV_EXT */
954 
955 #if defined(CONFIG_BT_CTLR_LE_ENC)
956 	/* LE Start Encryption */
957 	rp->commands[28] |= BIT(0);
958 #endif /* CONFIG_BT_CTLR_LE_ENC */
959 
960 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
961 	/* LE Set CIG Parameters */
962 	rp->commands[41] |= BIT(7);
963 	/* LE Set CIG Parameters Test, LE Create CIS, LE Remove CIS */
964 	rp->commands[42] |= BIT(0) | BIT(1) | BIT(2);
965 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
966 #endif /* CONFIG_BT_CENTRAL */
967 
968 #if defined(CONFIG_BT_PERIPHERAL)
969 #if defined(CONFIG_BT_CTLR_LE_ENC)
970 	/* LE LTK Request Reply, LE LTK Request Negative Reply */
971 	rp->commands[28] |= BIT(1) | BIT(2);
972 #endif /* CONFIG_BT_CTLR_LE_ENC */
973 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
974 	/* LE Accept CIS Request, LE Reject CIS Request */
975 	rp->commands[42] |= BIT(3) | BIT(4);
976 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
977 #endif /* CONFIG_BT_PERIPHERAL */
978 
979 	/* Disconnect. */
980 	rp->commands[0] |= BIT(5);
981 	/* LE Connection Update, LE Read Channel Map, LE Read Remote Features */
982 	rp->commands[27] |= BIT(2) | BIT(4) | BIT(5);
983 
984 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
985 	/* LE Remote Conn Param Req and Neg Reply */
986 	rp->commands[33] |= BIT(4) | BIT(5);
987 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
988 
989 #if defined(CONFIG_BT_CTLR_LE_PING)
990 	/* Read and Write authenticated payload timeout */
991 	rp->commands[32] |= BIT(4) | BIT(5);
992 #endif /* CONFIG_BT_CTLR_LE_PING */
993 
994 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
995 	/* LE Set Data Length, and LE Read Suggested Data Length. */
996 	rp->commands[33] |= BIT(6) | BIT(7);
997 	/* LE Write Suggested Data Length. */
998 	rp->commands[34] |= BIT(0);
999 	/* LE Read Maximum Data Length. */
1000 	rp->commands[35] |= BIT(3);
1001 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1002 
1003 #if defined(CONFIG_BT_CTLR_PHY)
1004 	/* LE Read PHY Command. */
1005 	rp->commands[35] |= BIT(4);
1006 	/* LE Set Default PHY Command. */
1007 	rp->commands[35] |= BIT(5);
1008 	/* LE Set PHY Command. */
1009 	rp->commands[35] |= BIT(6);
1010 #endif /* CONFIG_BT_CTLR_PHY */
1011 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1012 	/* LE Request Peer SCA */
1013 	rp->commands[43] |= BIT(2);
1014 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1015 #endif /* CONFIG_BT_CONN */
1016 
1017 #if defined(CONFIG_BT_CTLR_DTM_HCI)
1018 	/* LE RX Test, LE TX Test, LE Test End */
1019 	rp->commands[28] |= BIT(4) | BIT(5) | BIT(6);
1020 	/* LE Enhanced RX Test. */
1021 	rp->commands[35] |= BIT(7);
1022 	/* LE Enhanced TX Test. */
1023 	rp->commands[36] |= BIT(0);
1024 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
1025 	rp->commands[39] |= BIT(3);
1026 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
1027 
1028 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
1029 	rp->commands[39] |= BIT(4);
1030 #endif
1031 
1032 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
1033 	rp->commands[45] |= BIT(0);
1034 #endif
1035 #endif /* CONFIG_BT_CTLR_DTM_HCI */
1036 
1037 #if defined(CONFIG_BT_CTLR_PRIVACY)
1038 	/* LE resolving list commands, LE Read Peer RPA */
1039 	rp->commands[34] |= BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7);
1040 	/* LE Read Local RPA, LE Set AR Enable, Set RPA Timeout */
1041 	rp->commands[35] |= BIT(0) | BIT(1) | BIT(2);
1042 	/* LE Set Privacy Mode */
1043 	rp->commands[39] |= BIT(2);
1044 #endif /* CONFIG_BT_CTLR_PRIVACY */
1045 
1046 #if defined(CONFIG_BT_CTLR_DF)
1047 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1048 	/* LE Set Connectionless CTE Transmit Parameters,
1049 	 * LE Set Connectionless CTE Transmit Enable
1050 	 */
1051 	rp->commands[39] |= BIT(5) | BIT(6);
1052 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1053 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1054 	/* LE Set Connectionless IQ Sampling Enable */
1055 	rp->commands[39] |= BIT(7);
1056 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1057 	/* LE Read Antenna Information */
1058 	rp->commands[40] |= BIT(4);
1059 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1060 	/* LE Set Connection CTE Transmit Parameters */
1061 	rp->commands[40] |= BIT(1);
1062 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1063 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1064 	/* LE Set Connection CTE Receive Parameters */
1065 	rp->commands[40] |= BIT(0);
1066 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1067 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1068 	/* LE Connection CTE Request Enable */
1069 	rp->commands[40] |= BIT(2);
1070 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1071 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1072 	/* LE Connection CTE Response Enable */
1073 	rp->commands[40] |= BIT(3);
1074 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1075 
1076 #endif /* CONFIG_BT_CTLR_DF */
1077 
1078 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1079 	/* LE Periodic Advertising Sync Transfer */
1080 	rp->commands[40] |= BIT(6);
1081 	/* LE Periodic Advertising Set Info Transfer */
1082 	rp->commands[40] |= BIT(7);
1083 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1084 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1085 	/* LE Set Periodic Advertising Sync Transfer Parameters */
1086 	rp->commands[41] |= BIT(0);
1087 	/* LE Set Default Periodic Advertising Sync Transfer Parameters */
1088 	rp->commands[41] |= BIT(1);
1089 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1090 
1091 	/* LE Read TX Power. */
1092 	rp->commands[38] |= BIT(7);
1093 
1094 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1095 	/* LE Read Buffer Size v2, LE Read ISO TX Sync */
1096 	rp->commands[41] |= BIT(5) | BIT(6);
1097 	/* LE ISO Transmit Test */
1098 	rp->commands[43] |= BIT(5);
1099 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1100 
1101 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
1102 	/* LE ISO Receive Test, LE ISO Read Test Counters */
1103 	rp->commands[43] |= BIT(6) | BIT(7);
1104 
1105 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
1106 	/* LE Read ISO Link Quality */
1107 	rp->commands[44] |= BIT(2);
1108 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
1109 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
1110 
1111 #if defined(CONFIG_BT_CTLR_ISO)
1112 	/* LE Setup ISO Data Path, LE Remove ISO Data Path */
1113 	rp->commands[43] |= BIT(3) | BIT(4);
1114 	/* LE ISO Test End */
1115 	rp->commands[44] |= BIT(0);
1116 #endif /* CONFIG_BT_CTLR_ISO */
1117 
1118 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
1119 	/* LE Set Host Feature */
1120 	rp->commands[44] |= BIT(1);
1121 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
1122 
1123 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1124 	/* Read Supported Codecs [v2], Codec Capabilities, Controller Delay, Configure Data Path */
1125 	rp->commands[45] |= BIT(2) | BIT(3) | BIT(4) | BIT(5);
1126 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1127 }
1128 
read_local_features(struct net_buf * buf,struct net_buf ** evt)1129 static void read_local_features(struct net_buf *buf, struct net_buf **evt)
1130 {
1131 	struct bt_hci_rp_read_local_features *rp;
1132 
1133 	rp = hci_cmd_complete(evt, sizeof(*rp));
1134 
1135 	rp->status = 0x00;
1136 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1137 	/* BR/EDR not supported and LE supported */
1138 	rp->features[4] = (1 << 5) | (1 << 6);
1139 }
1140 
read_bd_addr(struct net_buf * buf,struct net_buf ** evt)1141 static void read_bd_addr(struct net_buf *buf, struct net_buf **evt)
1142 {
1143 	struct bt_hci_rp_read_bd_addr *rp;
1144 
1145 	rp = hci_cmd_complete(evt, sizeof(*rp));
1146 
1147 	rp->status = 0x00;
1148 
1149 	(void)ll_addr_read(0, &rp->bdaddr.val[0]);
1150 }
1151 
1152 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
hci_vendor_read_std_codecs(const struct bt_hci_std_codec_info_v2 ** codecs)1153 uint8_t __weak hci_vendor_read_std_codecs(
1154 	const struct bt_hci_std_codec_info_v2 **codecs)
1155 {
1156 	ARG_UNUSED(codecs);
1157 
1158 	/* return number of supported codecs */
1159 	return 0;
1160 }
1161 
hci_vendor_read_vs_codecs(const struct bt_hci_vs_codec_info_v2 ** codecs)1162 uint8_t __weak hci_vendor_read_vs_codecs(
1163 	const struct bt_hci_vs_codec_info_v2 **codecs)
1164 {
1165 	ARG_UNUSED(codecs);
1166 
1167 	/* return number of supported codecs */
1168 	return 0;
1169 }
1170 
1171 /* NOTE: Not implementing the [v1] version.
1172  * Refer to BT Spec v5.3 Vol 4, Part E 7.4.8 Read Local Supported Codecs command
1173  * The [v1] version of this command shall only return codecs supported on the
1174  * BR/EDR physical transport, while the [v2] version shall return codecs
1175  * supported on all physical transports.
1176  */
read_codecs_v2(struct net_buf * buf,struct net_buf ** evt)1177 static void read_codecs_v2(struct net_buf *buf, struct net_buf **evt)
1178 {
1179 	struct bt_hci_rp_read_codecs_v2 *rp;
1180 	const struct bt_hci_std_codec_info_v2 *std_codec_info;
1181 	const struct bt_hci_vs_codec_info_v2 *vs_codec_info;
1182 	struct bt_hci_std_codecs_v2 *std_codecs;
1183 	struct bt_hci_vs_codecs_v2 *vs_codecs;
1184 	size_t std_codecs_bytes;
1185 	size_t vs_codecs_bytes;
1186 	uint8_t num_std_codecs;
1187 	uint8_t num_vs_codecs;
1188 	uint8_t i;
1189 
1190 	/* read standard codec information */
1191 	num_std_codecs = hci_vendor_read_std_codecs(&std_codec_info);
1192 	std_codecs_bytes = sizeof(struct bt_hci_std_codecs_v2) +
1193 		num_std_codecs * sizeof(struct bt_hci_std_codec_info_v2);
1194 	/* read vendor-specific codec information */
1195 	num_vs_codecs = hci_vendor_read_vs_codecs(&vs_codec_info);
1196 	vs_codecs_bytes = sizeof(struct bt_hci_vs_codecs_v2) +
1197 		num_vs_codecs *	sizeof(struct bt_hci_vs_codec_info_v2);
1198 
1199 	/* allocate response packet */
1200 	rp = hci_cmd_complete(evt, sizeof(*rp) +
1201 			      std_codecs_bytes +
1202 			      vs_codecs_bytes);
1203 	rp->status = 0x00;
1204 
1205 	/* copy standard codec information */
1206 	std_codecs = (struct bt_hci_std_codecs_v2 *)&rp->codecs[0];
1207 	std_codecs->num_codecs = num_std_codecs;
1208 	for (i = 0; i < num_std_codecs; i++) {
1209 		struct bt_hci_std_codec_info_v2 *codec;
1210 
1211 		codec = &std_codecs->codec_info[i];
1212 		codec->codec_id = std_codec_info[i].codec_id;
1213 		codec->transports = std_codec_info[i].transports;
1214 	}
1215 
1216 	/* copy vendor specific codec information  */
1217 	vs_codecs = (struct bt_hci_vs_codecs_v2 *)&rp->codecs[std_codecs_bytes];
1218 	vs_codecs->num_codecs = num_vs_codecs;
1219 	for (i = 0; i < num_vs_codecs; i++) {
1220 		struct bt_hci_vs_codec_info_v2 *codec;
1221 
1222 		codec = &vs_codecs->codec_info[i];
1223 		codec->company_id =
1224 			sys_cpu_to_le16(vs_codec_info[i].company_id);
1225 		codec->codec_id = sys_cpu_to_le16(vs_codec_info[i].codec_id);
1226 		codec->transports = vs_codec_info[i].transports;
1227 	}
1228 }
1229 
hci_vendor_read_codec_capabilities(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t * num_capabilities,size_t * capabilities_bytes,const uint8_t ** capabilities)1230 uint8_t __weak hci_vendor_read_codec_capabilities(uint8_t coding_format,
1231 						  uint16_t company_id,
1232 						  uint16_t vs_codec_id,
1233 						  uint8_t transport,
1234 						  uint8_t direction,
1235 						  uint8_t *num_capabilities,
1236 						  size_t *capabilities_bytes,
1237 						  const uint8_t **capabilities)
1238 {
1239 	ARG_UNUSED(coding_format);
1240 	ARG_UNUSED(company_id);
1241 	ARG_UNUSED(vs_codec_id);
1242 	ARG_UNUSED(transport);
1243 	ARG_UNUSED(direction);
1244 	ARG_UNUSED(capabilities);
1245 
1246 	*num_capabilities = 0;
1247 	*capabilities_bytes = 0;
1248 
1249 	/* return status */
1250 	return 0x00;
1251 }
1252 
read_codec_capabilities(struct net_buf * buf,struct net_buf ** evt)1253 static void read_codec_capabilities(struct net_buf *buf, struct net_buf **evt)
1254 {
1255 	struct bt_hci_cp_read_codec_capabilities *cmd = (void *)buf->data;
1256 	struct bt_hci_rp_read_codec_capabilities *rp;
1257 	const uint8_t *capabilities;
1258 	size_t capabilities_bytes;
1259 	uint8_t num_capabilities;
1260 	uint16_t vs_codec_id;
1261 	uint16_t company_id;
1262 	uint8_t status;
1263 
1264 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1265 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1266 
1267 	/* read codec capabilities */
1268 	status = hci_vendor_read_codec_capabilities(cmd->codec_id.coding_format,
1269 						    company_id,
1270 						    vs_codec_id,
1271 						    cmd->transport,
1272 						    cmd->direction,
1273 						    &num_capabilities,
1274 						    &capabilities_bytes,
1275 						    &capabilities);
1276 
1277 	/* allocate response packet */
1278 	rp = hci_cmd_complete(evt, sizeof(*rp) + capabilities_bytes);
1279 	rp->status = status;
1280 
1281 	/* copy codec capabilities information */
1282 	rp->num_capabilities = num_capabilities;
1283 	memcpy(&rp->capabilities, capabilities, capabilities_bytes);
1284 }
1285 
hci_vendor_read_ctlr_delay(uint8_t coding_format,uint16_t company_id,uint16_t vs_codec_id,uint8_t transport,uint8_t direction,uint8_t codec_config_len,const uint8_t * codec_config,uint32_t * min_delay,uint32_t * max_delay)1286 uint8_t __weak hci_vendor_read_ctlr_delay(uint8_t coding_format,
1287 					  uint16_t company_id,
1288 					  uint16_t vs_codec_id,
1289 					  uint8_t transport,
1290 					  uint8_t direction,
1291 					  uint8_t codec_config_len,
1292 					  const uint8_t *codec_config,
1293 					  uint32_t *min_delay,
1294 					  uint32_t *max_delay)
1295 {
1296 	ARG_UNUSED(coding_format);
1297 	ARG_UNUSED(company_id);
1298 	ARG_UNUSED(vs_codec_id);
1299 	ARG_UNUSED(transport);
1300 	ARG_UNUSED(direction);
1301 	ARG_UNUSED(codec_config_len);
1302 	ARG_UNUSED(codec_config);
1303 
1304 	*min_delay = 0;
1305 	*max_delay = 0x3D0900; /* 4 seconds, maximum value allowed by spec */
1306 
1307 	/* return status */
1308 	return 0x00;
1309 }
1310 
read_ctlr_delay(struct net_buf * buf,struct net_buf ** evt)1311 static void read_ctlr_delay(struct net_buf *buf, struct net_buf **evt)
1312 {
1313 	struct bt_hci_cp_read_ctlr_delay *cmd = (void *)buf->data;
1314 	struct bt_hci_rp_read_ctlr_delay *rp;
1315 	uint16_t vs_codec_id;
1316 	uint16_t company_id;
1317 	uint32_t min_delay;
1318 	uint32_t max_delay;
1319 	uint8_t status;
1320 
1321 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
1322 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
1323 
1324 	status = hci_vendor_read_ctlr_delay(cmd->codec_id.coding_format,
1325 					    company_id,
1326 					    vs_codec_id,
1327 					    cmd->transport,
1328 					    cmd->direction,
1329 					    cmd->codec_config_len,
1330 					    cmd->codec_config,
1331 					    &min_delay,
1332 					    &max_delay);
1333 
1334 	rp = hci_cmd_complete(evt, sizeof(*rp));
1335 	rp->status = status;
1336 	sys_put_le24(min_delay, rp->min_ctlr_delay);
1337 	sys_put_le24(max_delay, rp->max_ctlr_delay);
1338 }
1339 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1340 
info_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1341 static int info_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
1342 			   struct net_buf **evt)
1343 {
1344 	switch (ocf) {
1345 	case BT_OCF(BT_HCI_OP_READ_LOCAL_VERSION_INFO):
1346 		read_local_version_info(cmd, evt);
1347 		break;
1348 
1349 	case BT_OCF(BT_HCI_OP_READ_SUPPORTED_COMMANDS):
1350 		read_supported_commands(cmd, evt);
1351 		break;
1352 
1353 	case BT_OCF(BT_HCI_OP_READ_LOCAL_FEATURES):
1354 		read_local_features(cmd, evt);
1355 		break;
1356 
1357 	case BT_OCF(BT_HCI_OP_READ_BD_ADDR):
1358 		read_bd_addr(cmd, evt);
1359 		break;
1360 
1361 #if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
1362 	case BT_OCF(BT_HCI_OP_READ_CODECS_V2):
1363 		read_codecs_v2(cmd, evt);
1364 		break;
1365 
1366 	case BT_OCF(BT_HCI_OP_READ_CODEC_CAPABILITIES):
1367 		read_codec_capabilities(cmd, evt);
1368 		break;
1369 
1370 	case BT_OCF(BT_HCI_OP_READ_CTLR_DELAY):
1371 		read_ctlr_delay(cmd, evt);
1372 		break;
1373 #endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
1374 
1375 	default:
1376 		return -EINVAL;
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
read_rssi(struct net_buf * buf,struct net_buf ** evt)1383 static void read_rssi(struct net_buf *buf, struct net_buf **evt)
1384 {
1385 	struct bt_hci_cp_read_rssi *cmd = (void *)buf->data;
1386 	struct bt_hci_rp_read_rssi *rp;
1387 	uint16_t handle;
1388 
1389 	handle = sys_le16_to_cpu(cmd->handle);
1390 
1391 	rp = hci_cmd_complete(evt, sizeof(*rp));
1392 
1393 	rp->status = ll_rssi_get(handle, &rp->rssi);
1394 
1395 	rp->handle = sys_cpu_to_le16(handle);
1396 	/* The Link Layer currently returns RSSI as an absolute value */
1397 	rp->rssi = (!rp->status) ? -rp->rssi : 127;
1398 }
1399 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1400 
status_cmd_handle(uint16_t ocf,struct net_buf * cmd,struct net_buf ** evt)1401 static int status_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
1402 			     struct net_buf **evt)
1403 {
1404 	switch (ocf) {
1405 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1406 	case BT_OCF(BT_HCI_OP_READ_RSSI):
1407 		read_rssi(cmd, evt);
1408 		break;
1409 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1410 
1411 	default:
1412 		return -EINVAL;
1413 	}
1414 
1415 	return 0;
1416 }
1417 
le_set_event_mask(struct net_buf * buf,struct net_buf ** evt)1418 static void le_set_event_mask(struct net_buf *buf, struct net_buf **evt)
1419 {
1420 	struct bt_hci_cp_set_event_mask *cmd = (void *)buf->data;
1421 
1422 	le_event_mask = sys_get_le64(cmd->events);
1423 
1424 	*evt = cmd_complete_status(0x00);
1425 }
1426 
le_read_buffer_size(struct net_buf * buf,struct net_buf ** evt)1427 static void le_read_buffer_size(struct net_buf *buf, struct net_buf **evt)
1428 {
1429 	struct bt_hci_rp_le_read_buffer_size *rp;
1430 
1431 	rp = hci_cmd_complete(evt, sizeof(*rp));
1432 
1433 	rp->status = 0x00;
1434 
1435 	rp->le_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1436 	rp->le_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1437 }
1438 
1439 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_read_buffer_size_v2(struct net_buf * buf,struct net_buf ** evt)1440 static void le_read_buffer_size_v2(struct net_buf *buf, struct net_buf **evt)
1441 {
1442 	struct bt_hci_rp_le_read_buffer_size_v2 *rp;
1443 
1444 	rp = hci_cmd_complete(evt, sizeof(*rp));
1445 
1446 	rp->status = 0x00;
1447 
1448 	rp->acl_max_len = sys_cpu_to_le16(LL_LENGTH_OCTETS_TX_MAX);
1449 	rp->acl_max_num = CONFIG_BT_BUF_ACL_TX_COUNT;
1450 	rp->iso_max_len = sys_cpu_to_le16(BT_CTLR_ISO_TX_BUFFER_SIZE);
1451 	rp->iso_max_num = CONFIG_BT_CTLR_ISO_TX_BUFFERS;
1452 }
1453 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
1454 
le_read_local_features(struct net_buf * buf,struct net_buf ** evt)1455 static void le_read_local_features(struct net_buf *buf, struct net_buf **evt)
1456 {
1457 	struct bt_hci_rp_le_read_local_features *rp;
1458 
1459 	rp = hci_cmd_complete(evt, sizeof(*rp));
1460 
1461 	rp->status = 0x00;
1462 
1463 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
1464 	sys_put_le64(ll_feat_get(), rp->features);
1465 }
1466 
le_set_random_address(struct net_buf * buf,struct net_buf ** evt)1467 static void le_set_random_address(struct net_buf *buf, struct net_buf **evt)
1468 {
1469 	struct bt_hci_cp_le_set_random_address *cmd = (void *)buf->data;
1470 	uint8_t status;
1471 
1472 	status = ll_addr_set(1, &cmd->bdaddr.val[0]);
1473 
1474 	*evt = cmd_complete_status(status);
1475 }
1476 
1477 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
le_read_fal_size(struct net_buf * buf,struct net_buf ** evt)1478 static void le_read_fal_size(struct net_buf *buf, struct net_buf **evt)
1479 {
1480 	struct bt_hci_rp_le_read_fal_size *rp;
1481 
1482 	rp = hci_cmd_complete(evt, sizeof(*rp));
1483 	rp->status = 0x00;
1484 
1485 	rp->fal_size = ll_fal_size_get();
1486 }
1487 
le_clear_fal(struct net_buf * buf,struct net_buf ** evt)1488 static void le_clear_fal(struct net_buf *buf, struct net_buf **evt)
1489 {
1490 	uint8_t status;
1491 
1492 	status = ll_fal_clear();
1493 
1494 	*evt = cmd_complete_status(status);
1495 }
1496 
le_add_dev_to_fal(struct net_buf * buf,struct net_buf ** evt)1497 static void le_add_dev_to_fal(struct net_buf *buf, struct net_buf **evt)
1498 {
1499 	struct bt_hci_cp_le_add_dev_to_fal *cmd = (void *)buf->data;
1500 	uint8_t status;
1501 
1502 	status = ll_fal_add(&cmd->addr);
1503 
1504 	*evt = cmd_complete_status(status);
1505 }
1506 
le_rem_dev_from_fal(struct net_buf * buf,struct net_buf ** evt)1507 static void le_rem_dev_from_fal(struct net_buf *buf, struct net_buf **evt)
1508 {
1509 	struct bt_hci_cp_le_rem_dev_from_fal *cmd = (void *)buf->data;
1510 	uint8_t status;
1511 
1512 	status = ll_fal_remove(&cmd->addr);
1513 
1514 	*evt = cmd_complete_status(status);
1515 }
1516 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1517 
1518 #if defined(CONFIG_BT_CTLR_CRYPTO)
le_encrypt(struct net_buf * buf,struct net_buf ** evt)1519 static void le_encrypt(struct net_buf *buf, struct net_buf **evt)
1520 {
1521 	struct bt_hci_cp_le_encrypt *cmd = (void *)buf->data;
1522 	struct bt_hci_rp_le_encrypt *rp;
1523 	uint8_t enc_data[16];
1524 
1525 	ecb_encrypt(cmd->key, cmd->plaintext, enc_data, NULL);
1526 
1527 	rp = hci_cmd_complete(evt, sizeof(*rp));
1528 
1529 	rp->status = 0x00;
1530 	memcpy(rp->enc_data, enc_data, 16);
1531 }
1532 #endif /* CONFIG_BT_CTLR_CRYPTO */
1533 
le_rand(struct net_buf * buf,struct net_buf ** evt)1534 static void le_rand(struct net_buf *buf, struct net_buf **evt)
1535 {
1536 	struct bt_hci_rp_le_rand *rp;
1537 	uint8_t count = sizeof(rp->rand);
1538 
1539 	rp = hci_cmd_complete(evt, sizeof(*rp));
1540 	rp->status = 0x00;
1541 
1542 	lll_csrand_get(rp->rand, count);
1543 }
1544 
le_read_supp_states(struct net_buf * buf,struct net_buf ** evt)1545 static void le_read_supp_states(struct net_buf *buf, struct net_buf **evt)
1546 {
1547 	struct bt_hci_rp_le_read_supp_states *rp;
1548 	uint64_t states = 0U;
1549 
1550 	rp = hci_cmd_complete(evt, sizeof(*rp));
1551 	rp->status = 0x00;
1552 
1553 #define ST_ADV (BIT64(0)  | BIT64(1)  | BIT64(8)  | BIT64(9)  | BIT64(12) | \
1554 		BIT64(13) | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1555 		BIT64(20) | BIT64(21))
1556 
1557 #define ST_SCA (BIT64(4)  | BIT64(5)  | BIT64(8)  | BIT64(9)  | BIT64(10) | \
1558 		BIT64(11) | BIT64(12) | BIT64(13) | BIT64(14) | BIT64(15) | \
1559 		BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(26) | \
1560 		BIT64(27) | BIT64(30) | BIT64(31))
1561 
1562 #define ST_PER (BIT64(2)  | BIT64(3)  | BIT64(7)  | BIT64(10) | BIT64(11) | \
1563 		BIT64(14) | BIT64(15) | BIT64(20) | BIT64(21) | BIT64(26) | \
1564 		BIT64(27) | BIT64(29) | BIT64(30) | BIT64(31) | BIT64(32) | \
1565 		BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | BIT64(37) | \
1566 		BIT64(38) | BIT64(39) | BIT64(40) | BIT64(41))
1567 
1568 #define ST_CEN (BIT64(6)  | BIT64(16) | BIT64(17) | BIT64(18) | BIT64(19) | \
1569 		BIT64(22) | BIT64(23) | BIT64(24) | BIT64(25) | BIT64(28) | \
1570 		BIT64(32) | BIT64(33) | BIT64(34) | BIT64(35) | BIT64(36) | \
1571 		BIT64(37) | BIT64(41))
1572 
1573 #if defined(CONFIG_BT_BROADCASTER)
1574 	states |= ST_ADV;
1575 #else
1576 	states &= ~ST_ADV;
1577 #endif
1578 #if defined(CONFIG_BT_OBSERVER)
1579 	states |= ST_SCA;
1580 #else
1581 	states &= ~ST_SCA;
1582 #endif
1583 #if defined(CONFIG_BT_PERIPHERAL)
1584 	states |= ST_PER;
1585 #else
1586 	states &= ~ST_PER;
1587 #endif
1588 #if defined(CONFIG_BT_CENTRAL)
1589 	states |= ST_CEN;
1590 #else
1591 	states &= ~ST_CEN;
1592 #endif
1593 	/* All states and combinations supported except:
1594 	 * Initiating State + Passive Scanning
1595 	 * Initiating State + Active Scanning
1596 	 */
1597 	states &= ~(BIT64(22) | BIT64(23));
1598 	LOG_DBG("states: 0x%08x%08x", (uint32_t)(states >> 32), (uint32_t)(states & 0xffffffff));
1599 	sys_put_le64(states, rp->le_states);
1600 }
1601 
1602 #if defined(CONFIG_BT_BROADCASTER)
le_set_adv_param(struct net_buf * buf,struct net_buf ** evt)1603 static void le_set_adv_param(struct net_buf *buf, struct net_buf **evt)
1604 {
1605 	struct bt_hci_cp_le_set_adv_param *cmd = (void *)buf->data;
1606 	uint16_t min_interval;
1607 	uint8_t status;
1608 
1609 	if (adv_cmds_legacy_check(evt)) {
1610 		return;
1611 	}
1612 
1613 	min_interval = sys_le16_to_cpu(cmd->min_interval);
1614 
1615 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
1616 	    (cmd->type != BT_HCI_ADV_DIRECT_IND)) {
1617 		uint16_t max_interval = sys_le16_to_cpu(cmd->max_interval);
1618 
1619 		if ((min_interval > max_interval) ||
1620 		    (min_interval < 0x0020) ||
1621 		    (max_interval > 0x4000)) {
1622 			*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
1623 			return;
1624 		}
1625 	}
1626 
1627 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1628 	status = ll_adv_params_set(0, 0, min_interval, cmd->type,
1629 				   cmd->own_addr_type, cmd->direct_addr.type,
1630 				   &cmd->direct_addr.a.val[0], cmd->channel_map,
1631 				   cmd->filter_policy, 0, 0, 0, 0, 0, 0);
1632 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1633 	status = ll_adv_params_set(min_interval, cmd->type,
1634 				   cmd->own_addr_type, cmd->direct_addr.type,
1635 				   &cmd->direct_addr.a.val[0], cmd->channel_map,
1636 				   cmd->filter_policy);
1637 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1638 
1639 	*evt = cmd_complete_status(status);
1640 }
1641 
le_read_adv_chan_tx_power(struct net_buf * buf,struct net_buf ** evt)1642 static void le_read_adv_chan_tx_power(struct net_buf *buf, struct net_buf **evt)
1643 {
1644 	struct bt_hci_rp_le_read_chan_tx_power *rp;
1645 
1646 	if (adv_cmds_legacy_check(evt)) {
1647 		return;
1648 	}
1649 
1650 	rp = hci_cmd_complete(evt, sizeof(*rp));
1651 
1652 	rp->status = 0x00;
1653 
1654 	rp->tx_power_level = 0;
1655 }
1656 
le_set_adv_data(struct net_buf * buf,struct net_buf ** evt)1657 static void le_set_adv_data(struct net_buf *buf, struct net_buf **evt)
1658 {
1659 	struct bt_hci_cp_le_set_adv_data *cmd = (void *)buf->data;
1660 	uint8_t status;
1661 
1662 	if (adv_cmds_legacy_check(evt)) {
1663 		return;
1664 	}
1665 
1666 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1667 	status = ll_adv_data_set(0, cmd->len, &cmd->data[0]);
1668 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1669 	status = ll_adv_data_set(cmd->len, &cmd->data[0]);
1670 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1671 
1672 	*evt = cmd_complete_status(status);
1673 }
1674 
le_set_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)1675 static void le_set_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
1676 {
1677 	struct bt_hci_cp_le_set_scan_rsp_data *cmd = (void *)buf->data;
1678 	uint8_t status;
1679 
1680 	if (adv_cmds_legacy_check(evt)) {
1681 		return;
1682 	}
1683 
1684 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1685 	status = ll_adv_scan_rsp_set(0, cmd->len, &cmd->data[0]);
1686 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1687 	status = ll_adv_scan_rsp_set(cmd->len, &cmd->data[0]);
1688 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1689 
1690 	*evt = cmd_complete_status(status);
1691 }
1692 
le_set_adv_enable(struct net_buf * buf,struct net_buf ** evt)1693 static void le_set_adv_enable(struct net_buf *buf, struct net_buf **evt)
1694 {
1695 	struct bt_hci_cp_le_set_adv_enable *cmd = (void *)buf->data;
1696 	uint8_t status;
1697 
1698 	if (adv_cmds_legacy_check(evt)) {
1699 		return;
1700 	}
1701 
1702 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
1703 #if defined(CONFIG_BT_HCI_MESH_EXT)
1704 	status = ll_adv_enable(0, cmd->enable, 0, 0, 0, 0, 0);
1705 #else /* !CONFIG_BT_HCI_MESH_EXT */
1706 	status = ll_adv_enable(0, cmd->enable, 0, 0);
1707 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1708 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1709 	status = ll_adv_enable(cmd->enable);
1710 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
1711 
1712 	*evt = cmd_complete_status(status);
1713 }
1714 
1715 #if defined(CONFIG_BT_CTLR_ADV_ISO)
le_create_big(struct net_buf * buf,struct net_buf ** evt)1716 static void le_create_big(struct net_buf *buf, struct net_buf **evt)
1717 {
1718 	struct bt_hci_cp_le_create_big *cmd = (void *)buf->data;
1719 	uint32_t sdu_interval;
1720 	uint16_t max_latency;
1721 	uint8_t big_handle;
1722 	uint8_t adv_handle;
1723 	uint16_t max_sdu;
1724 	uint8_t status;
1725 
1726 	status = ll_adv_iso_by_hci_handle_new(cmd->big_handle, &big_handle);
1727 	if (status) {
1728 		*evt = cmd_status(status);
1729 		return;
1730 	}
1731 
1732 	status = ll_adv_set_by_hci_handle_get(cmd->adv_handle, &adv_handle);
1733 	if (status) {
1734 		*evt = cmd_status(status);
1735 		return;
1736 	}
1737 
1738 	sdu_interval = sys_get_le24(cmd->sdu_interval);
1739 	max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1740 	max_latency = sys_le16_to_cpu(cmd->max_latency);
1741 
1742 	status = ll_big_create(big_handle, adv_handle, cmd->num_bis,
1743 			       sdu_interval, max_sdu, max_latency, cmd->rtn,
1744 			       cmd->phy, cmd->packing, cmd->framing,
1745 			       cmd->encryption, cmd->bcode);
1746 
1747 	*evt = cmd_status(status);
1748 }
1749 
le_create_big_test(struct net_buf * buf,struct net_buf ** evt)1750 static void le_create_big_test(struct net_buf *buf, struct net_buf **evt)
1751 {
1752 	struct bt_hci_cp_le_create_big_test *cmd = (void *)buf->data;
1753 	uint32_t sdu_interval;
1754 	uint16_t iso_interval;
1755 	uint16_t max_sdu;
1756 	uint16_t max_pdu;
1757 	uint8_t status;
1758 
1759 	sdu_interval = sys_get_le24(cmd->sdu_interval);
1760 	iso_interval = sys_le16_to_cpu(cmd->iso_interval);
1761 	max_sdu = sys_le16_to_cpu(cmd->max_sdu);
1762 	max_pdu = sys_le16_to_cpu(cmd->max_pdu);
1763 
1764 	status = ll_big_test_create(cmd->big_handle, cmd->adv_handle,
1765 				    cmd->num_bis, sdu_interval, iso_interval,
1766 				    cmd->nse, max_sdu, max_pdu, cmd->phy,
1767 				    cmd->packing, cmd->framing, cmd->bn,
1768 				    cmd->irc, cmd->pto, cmd->encryption,
1769 				    cmd->bcode);
1770 
1771 	*evt = cmd_status(status);
1772 }
1773 
le_terminate_big(struct net_buf * buf,struct net_buf ** evt)1774 static void le_terminate_big(struct net_buf *buf, struct net_buf **evt)
1775 {
1776 	struct bt_hci_cp_le_terminate_big *cmd = (void *)buf->data;
1777 	uint8_t status;
1778 
1779 	status = ll_big_terminate(cmd->big_handle, cmd->reason);
1780 
1781 	*evt = cmd_status(status);
1782 }
1783 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1784 #endif /* CONFIG_BT_BROADCASTER */
1785 
1786 #if defined(CONFIG_BT_OBSERVER)
le_set_scan_param(struct net_buf * buf,struct net_buf ** evt)1787 static void le_set_scan_param(struct net_buf *buf, struct net_buf **evt)
1788 {
1789 	struct bt_hci_cp_le_set_scan_param *cmd = (void *)buf->data;
1790 	uint16_t interval;
1791 	uint16_t window;
1792 	uint8_t status;
1793 
1794 	if (adv_cmds_legacy_check(evt)) {
1795 		return;
1796 	}
1797 
1798 	interval = sys_le16_to_cpu(cmd->interval);
1799 	window = sys_le16_to_cpu(cmd->window);
1800 
1801 	status = ll_scan_params_set(cmd->scan_type, interval, window,
1802 				    cmd->addr_type, cmd->filter_policy);
1803 
1804 	*evt = cmd_complete_status(status);
1805 }
1806 
le_set_scan_enable(struct net_buf * buf,struct net_buf ** evt)1807 static void le_set_scan_enable(struct net_buf *buf, struct net_buf **evt)
1808 {
1809 	struct bt_hci_cp_le_set_scan_enable *cmd = (void *)buf->data;
1810 	uint8_t status;
1811 
1812 	if (adv_cmds_legacy_check(evt)) {
1813 		return;
1814 	}
1815 
1816 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
1817 	/* Initialize duplicate filtering */
1818 	if (cmd->enable && cmd->filter_dup) {
1819 		if (0) {
1820 
1821 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1822 		} else if (dup_count == DUP_FILTER_DISABLED) {
1823 			dup_scan = true;
1824 
1825 			/* All entries reset */
1826 			dup_count = 0;
1827 			dup_curr = 0U;
1828 		} else if (!dup_scan) {
1829 			dup_scan = true;
1830 			dup_ext_adv_reset();
1831 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1832 
1833 		} else {
1834 			/* All entries reset */
1835 			dup_count = 0;
1836 			dup_curr = 0U;
1837 		}
1838 	} else {
1839 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1840 		dup_scan = false;
1841 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1842 		dup_count = DUP_FILTER_DISABLED;
1843 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
1844 	}
1845 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
1846 
1847 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1848 	status = ll_scan_enable(cmd->enable, 0, 0);
1849 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1850 	status = ll_scan_enable(cmd->enable);
1851 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1852 
1853 	/* NOTE: As filter duplicates is implemented here in HCI source code,
1854 	 *       enabling of already enabled scanning shall succeed after
1855 	 *       updates to filter duplicates is handled in the above
1856 	 *       statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
1857 	 */
1858 	if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
1859 	    (status == BT_HCI_ERR_CMD_DISALLOWED)) {
1860 		status = BT_HCI_ERR_SUCCESS;
1861 	}
1862 
1863 	*evt = cmd_complete_status(status);
1864 }
1865 
1866 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
le_big_create_sync(struct net_buf * buf,struct net_buf ** evt)1867 static void le_big_create_sync(struct net_buf *buf, struct net_buf **evt)
1868 {
1869 	struct bt_hci_cp_le_big_create_sync *cmd = (void *)buf->data;
1870 	uint8_t status;
1871 	uint16_t sync_handle;
1872 	uint16_t sync_timeout;
1873 
1874 	sync_handle = sys_le16_to_cpu(cmd->sync_handle);
1875 	sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
1876 
1877 	status = ll_big_sync_create(cmd->big_handle, sync_handle,
1878 				    cmd->encryption, cmd->bcode, cmd->mse,
1879 				    sync_timeout, cmd->num_bis, cmd->bis);
1880 
1881 	*evt = cmd_status(status);
1882 }
1883 
1884 
le_big_terminate_sync(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)1885 static void le_big_terminate_sync(struct net_buf *buf, struct net_buf **evt,
1886 				  void **node_rx)
1887 {
1888 	struct bt_hci_cp_le_big_terminate_sync *cmd = (void *)buf->data;
1889 	struct bt_hci_rp_le_big_terminate_sync *rp;
1890 	uint8_t big_handle;
1891 	uint8_t status;
1892 
1893 	big_handle = cmd->big_handle;
1894 	status = ll_big_sync_terminate(big_handle, node_rx);
1895 
1896 	rp = hci_cmd_complete(evt, sizeof(*rp));
1897 	rp->status = status;
1898 	rp->big_handle = big_handle;
1899 }
1900 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1901 #endif /* CONFIG_BT_OBSERVER */
1902 
1903 #if defined(CONFIG_BT_CENTRAL)
check_cconn_params(bool ext,uint16_t scan_interval,uint16_t scan_window,uint16_t conn_interval_max,uint16_t conn_latency,uint16_t supervision_timeout)1904 static uint8_t check_cconn_params(bool ext, uint16_t scan_interval,
1905 				  uint16_t scan_window,
1906 				  uint16_t conn_interval_max,
1907 				  uint16_t conn_latency,
1908 				  uint16_t supervision_timeout)
1909 {
1910 	if (scan_interval < 0x0004 || scan_window < 0x0004 ||
1911 	    (!ext && (scan_interval > 0x4000 || scan_window > 0x4000))) {
1912 		return BT_HCI_ERR_INVALID_PARAM;
1913 	}
1914 
1915 	if (conn_interval_max < 0x0006 || conn_interval_max > 0x0C80) {
1916 		return BT_HCI_ERR_INVALID_PARAM;
1917 	}
1918 
1919 	if (conn_latency > 0x01F3) {
1920 		return BT_HCI_ERR_INVALID_PARAM;
1921 	}
1922 
1923 	if (supervision_timeout < 0x000A || supervision_timeout > 0x0C80) {
1924 		return BT_HCI_ERR_INVALID_PARAM;
1925 	}
1926 
1927 	/* sto * 10ms > (1 + lat) * ci * 1.25ms * 2
1928 	 * sto * 10 > (1 + lat) * ci * 2.5
1929 	 * sto * 2 > (1 + lat) * ci * 0.5
1930 	 * sto * 4 > (1 + lat) * ci
1931 	 */
1932 	if ((supervision_timeout << 2) <= ((1 + conn_latency) *
1933 					   conn_interval_max)) {
1934 		return BT_HCI_ERR_INVALID_PARAM;
1935 	}
1936 
1937 	return 0;
1938 }
1939 
le_create_connection(struct net_buf * buf,struct net_buf ** evt)1940 static void le_create_connection(struct net_buf *buf, struct net_buf **evt)
1941 {
1942 	struct bt_hci_cp_le_create_conn *cmd = (void *)buf->data;
1943 	uint16_t supervision_timeout;
1944 	uint16_t conn_interval_max;
1945 	uint16_t scan_interval;
1946 	uint16_t conn_latency;
1947 	uint16_t scan_window;
1948 	uint8_t status;
1949 
1950 	if (adv_cmds_legacy_check(NULL)) {
1951 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
1952 		return;
1953 	}
1954 
1955 	scan_interval = sys_le16_to_cpu(cmd->scan_interval);
1956 	scan_window = sys_le16_to_cpu(cmd->scan_window);
1957 	conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
1958 	conn_latency = sys_le16_to_cpu(cmd->conn_latency);
1959 	supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
1960 
1961 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
1962 		status = check_cconn_params(false, scan_interval,
1963 					    scan_window,
1964 					    conn_interval_max,
1965 					    conn_latency,
1966 					    supervision_timeout);
1967 		if (status) {
1968 			*evt = cmd_status(status);
1969 			return;
1970 		}
1971 	}
1972 
1973 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1974 	status = ll_create_connection(scan_interval, scan_window,
1975 				      cmd->filter_policy,
1976 				      cmd->peer_addr.type,
1977 				      &cmd->peer_addr.a.val[0],
1978 				      cmd->own_addr_type, conn_interval_max,
1979 				      conn_latency, supervision_timeout,
1980 				      PHY_LEGACY);
1981 	if (status) {
1982 		*evt = cmd_status(status);
1983 		return;
1984 	}
1985 
1986 	status = ll_connect_enable(0U);
1987 
1988 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1989 	status = ll_create_connection(scan_interval, scan_window,
1990 				      cmd->filter_policy,
1991 				      cmd->peer_addr.type,
1992 				      &cmd->peer_addr.a.val[0],
1993 				      cmd->own_addr_type, conn_interval_max,
1994 				      conn_latency, supervision_timeout);
1995 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1996 
1997 	*evt = cmd_status(status);
1998 }
1999 
le_create_conn_cancel(struct net_buf * buf,struct net_buf ** evt,void ** node_rx)2000 static void le_create_conn_cancel(struct net_buf *buf, struct net_buf **evt,
2001 				  void **node_rx)
2002 {
2003 	uint8_t status;
2004 
2005 	status = ll_connect_disable(node_rx);
2006 
2007 	*evt = cmd_complete_status(status);
2008 }
2009 
le_set_host_chan_classif(struct net_buf * buf,struct net_buf ** evt)2010 static void le_set_host_chan_classif(struct net_buf *buf, struct net_buf **evt)
2011 {
2012 	struct bt_hci_cp_le_set_host_chan_classif *cmd = (void *)buf->data;
2013 	uint8_t status;
2014 
2015 	status = ll_chm_update(&cmd->ch_map[0]);
2016 
2017 	*evt = cmd_complete_status(status);
2018 }
2019 
2020 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_start_encryption(struct net_buf * buf,struct net_buf ** evt)2021 static void le_start_encryption(struct net_buf *buf, struct net_buf **evt)
2022 {
2023 	struct bt_hci_cp_le_start_encryption *cmd = (void *)buf->data;
2024 	uint16_t handle;
2025 	uint8_t status;
2026 
2027 	handle = sys_le16_to_cpu(cmd->handle);
2028 	status = ll_enc_req_send(handle,
2029 				 (uint8_t *)&cmd->rand,
2030 				 (uint8_t *)&cmd->ediv,
2031 				 &cmd->ltk[0]);
2032 
2033 	*evt = cmd_status(status);
2034 }
2035 #endif /* CONFIG_BT_CTLR_LE_ENC */
2036 
2037 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
le_set_cig_parameters(struct net_buf * buf,struct net_buf ** evt)2038 static void le_set_cig_parameters(struct net_buf *buf, struct net_buf **evt)
2039 {
2040 	struct bt_hci_cp_le_set_cig_params *cmd = (void *)buf->data;
2041 	struct bt_hci_rp_le_set_cig_params *rp;
2042 	uint32_t c_interval;
2043 	uint32_t p_interval;
2044 	uint16_t c_latency;
2045 	uint16_t p_latency;
2046 	uint8_t cis_count;
2047 	uint8_t cig_id;
2048 	uint8_t status;
2049 	uint8_t i;
2050 
2051 	c_interval = sys_get_le24(cmd->c_interval);
2052 	p_interval = sys_get_le24(cmd->p_interval);
2053 	c_latency = sys_le16_to_cpu(cmd->c_latency);
2054 	p_latency = sys_le16_to_cpu(cmd->p_latency);
2055 
2056 	cig_id = cmd->cig_id;
2057 	cis_count = cmd->num_cis;
2058 
2059 	/* Create CIG or start modifying existing CIG */
2060 	status = ll_cig_parameters_open(cig_id, c_interval, p_interval,
2061 					cmd->sca, cmd->packing, cmd->framing,
2062 					c_latency, p_latency, cis_count);
2063 
2064 	/* Configure individual CISes */
2065 	for (i = 0; !status && i < cis_count; i++) {
2066 		struct bt_hci_cis_params *params = &cmd->cis[i];
2067 		uint16_t c_sdu;
2068 		uint16_t p_sdu;
2069 
2070 		c_sdu = sys_le16_to_cpu(params->c_sdu);
2071 		p_sdu = sys_le16_to_cpu(params->p_sdu);
2072 
2073 		status = ll_cis_parameters_set(params->cis_id, c_sdu, p_sdu,
2074 					       params->c_phy, params->p_phy,
2075 					       params->c_rtn, params->p_rtn);
2076 	}
2077 
2078 	rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2079 	rp->cig_id = cig_id;
2080 
2081 	/* Only apply parameters if all went well */
2082 	if (!status) {
2083 		uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2084 
2085 		status = ll_cig_parameters_commit(cig_id, handles);
2086 
2087 		if (status == BT_HCI_ERR_SUCCESS) {
2088 			for (uint8_t j = 0; j < cis_count; j++) {
2089 				rp->handle[j] = sys_cpu_to_le16(handles[j]);
2090 			}
2091 		}
2092 	}
2093 
2094 	rp->num_handles = status ? 0U : cis_count;
2095 	rp->status = status;
2096 }
2097 
le_set_cig_params_test(struct net_buf * buf,struct net_buf ** evt)2098 static void le_set_cig_params_test(struct net_buf *buf, struct net_buf **evt)
2099 {
2100 	struct bt_hci_cp_le_set_cig_params_test *cmd = (void *)buf->data;
2101 	struct bt_hci_rp_le_set_cig_params_test *rp;
2102 
2103 	uint32_t c_interval;
2104 	uint32_t p_interval;
2105 	uint16_t iso_interval;
2106 	uint8_t cis_count;
2107 	uint8_t cig_id;
2108 	uint8_t status;
2109 	uint8_t i;
2110 
2111 	c_interval = sys_get_le24(cmd->c_interval);
2112 	p_interval = sys_get_le24(cmd->p_interval);
2113 	iso_interval = sys_le16_to_cpu(cmd->iso_interval);
2114 
2115 	cig_id = cmd->cig_id;
2116 	cis_count = cmd->num_cis;
2117 
2118 	/* Create CIG or start modifying existing CIG */
2119 	status = ll_cig_parameters_test_open(cig_id, c_interval,
2120 					     p_interval, cmd->c_ft,
2121 					     cmd->p_ft, iso_interval,
2122 					     cmd->sca, cmd->packing,
2123 					     cmd->framing,
2124 					     cis_count);
2125 
2126 	/* Configure individual CISes */
2127 	for (i = 0; !status && i < cis_count; i++) {
2128 		struct bt_hci_cis_params_test *params = &cmd->cis[i];
2129 		uint16_t c_sdu;
2130 		uint16_t p_sdu;
2131 		uint16_t c_pdu;
2132 		uint16_t p_pdu;
2133 		uint8_t  nse;
2134 
2135 		nse   = params->nse;
2136 		c_sdu = sys_le16_to_cpu(params->c_sdu);
2137 		p_sdu = sys_le16_to_cpu(params->p_sdu);
2138 		c_pdu = sys_le16_to_cpu(params->c_pdu);
2139 		p_pdu = sys_le16_to_cpu(params->p_pdu);
2140 
2141 		status = ll_cis_parameters_test_set(params->cis_id, nse,
2142 						    c_sdu, p_sdu,
2143 						    c_pdu, p_pdu,
2144 						    params->c_phy,
2145 						    params->p_phy,
2146 						    params->c_bn,
2147 						    params->p_bn);
2148 	}
2149 
2150 	rp = hci_cmd_complete(evt, sizeof(*rp) + cis_count * sizeof(uint16_t));
2151 	rp->cig_id = cig_id;
2152 
2153 	/* Only apply parameters if all went well */
2154 	if (!status) {
2155 		uint16_t handles[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
2156 
2157 		status = ll_cig_parameters_commit(cig_id, handles);
2158 
2159 		if (status == BT_HCI_ERR_SUCCESS) {
2160 			for (uint8_t j = 0; j < cis_count; j++) {
2161 				rp->handle[j] = sys_cpu_to_le16(handles[j]);
2162 			}
2163 		}
2164 	}
2165 
2166 	rp->num_handles = status ? 0U : cis_count;
2167 	rp->status = status;
2168 }
2169 
le_create_cis(struct net_buf * buf,struct net_buf ** evt)2170 static void le_create_cis(struct net_buf *buf, struct net_buf **evt)
2171 {
2172 	uint16_t handle_used[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP] = {0};
2173 	struct bt_hci_cp_le_create_cis *cmd = (void *)buf->data;
2174 	uint8_t status;
2175 	uint8_t i;
2176 
2177 	/*
2178 	 * Only create a CIS if the Isochronous Channels (Host Support) feature bit
2179 	 * is set. Refer to BT Spec v5.4 Vol 6 Part B Section 4.6.33.1.
2180 	 */
2181 	if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS))) {
2182 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2183 		return;
2184 	}
2185 
2186 	/*
2187 	 * Creating new CISes is disallowed until all previous CIS
2188 	 * established events have been generated
2189 	 */
2190 	if (cis_pending_count) {
2191 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
2192 		return;
2193 	}
2194 
2195 	/* Check all handles before actually starting to create CISes */
2196 	status = 0x00;
2197 	for (i = 0; !status && i < cmd->num_cis; i++) {
2198 		uint16_t cis_handle;
2199 		uint16_t acl_handle;
2200 		uint8_t cis_idx;
2201 
2202 		cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2203 		acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2204 
2205 		cis_idx = LL_CIS_IDX_FROM_HANDLE(cis_handle);
2206 		if (handle_used[cis_idx]) {
2207 			/* Handle must be unique in request */
2208 			status = BT_HCI_ERR_INVALID_PARAM;
2209 			break;
2210 		}
2211 
2212 		handle_used[cis_idx]++;
2213 		status = ll_cis_create_check(cis_handle, acl_handle);
2214 	}
2215 
2216 	if (status) {
2217 		*evt = cmd_status(status);
2218 		return;
2219 	}
2220 
2221 	/*
2222 	 * Actually create CISes, any errors are to be reported
2223 	 * through CIS established events
2224 	 */
2225 	cis_pending_count = cmd->num_cis;
2226 	for (i = 0; i < cmd->num_cis; i++) {
2227 		uint16_t cis_handle;
2228 		uint16_t acl_handle;
2229 
2230 		cis_handle = sys_le16_to_cpu(cmd->cis[i].cis_handle);
2231 		acl_handle = sys_le16_to_cpu(cmd->cis[i].acl_handle);
2232 		ll_cis_create(cis_handle, acl_handle);
2233 	}
2234 
2235 	*evt = cmd_status(status);
2236 }
2237 
le_remove_cig(struct net_buf * buf,struct net_buf ** evt)2238 static void le_remove_cig(struct net_buf *buf, struct net_buf **evt)
2239 {
2240 	struct bt_hci_cp_le_remove_cig *cmd = (void *)buf->data;
2241 	struct bt_hci_rp_le_remove_cig *rp;
2242 	uint8_t status;
2243 
2244 	status = ll_cig_remove(cmd->cig_id);
2245 
2246 	rp = hci_cmd_complete(evt, sizeof(*rp));
2247 	rp->status = status;
2248 	rp->cig_id = cmd->cig_id;
2249 }
2250 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
2251 
2252 #endif /* CONFIG_BT_CENTRAL */
2253 
2254 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_transmit_test(struct net_buf * buf,struct net_buf ** evt)2255 static void le_iso_transmit_test(struct net_buf *buf, struct net_buf **evt)
2256 {
2257 	struct bt_hci_cp_le_iso_transmit_test *cmd = (void *)buf->data;
2258 	struct bt_hci_rp_le_iso_transmit_test *rp;
2259 	uint16_t handle;
2260 	uint8_t status;
2261 
2262 	handle = sys_le16_to_cpu(cmd->handle);
2263 
2264 	status = ll_iso_transmit_test(handle, cmd->payload_type);
2265 
2266 	rp = hci_cmd_complete(evt, sizeof(*rp));
2267 	rp->status = status;
2268 	rp->handle = sys_cpu_to_le16(handle);
2269 }
2270 
le_read_iso_tx_sync(struct net_buf * buf,struct net_buf ** evt)2271 static void le_read_iso_tx_sync(struct net_buf *buf, struct net_buf **evt)
2272 {
2273 	struct bt_hci_cp_le_read_iso_tx_sync *cmd = (void *)buf->data;
2274 	struct bt_hci_rp_le_read_iso_tx_sync *rp;
2275 	uint16_t handle_le16;
2276 	uint32_t timestamp;
2277 	uint32_t offset;
2278 	uint16_t handle;
2279 	uint8_t status;
2280 	uint16_t seq;
2281 
2282 	handle_le16 = cmd->handle;
2283 	handle = sys_le16_to_cpu(handle_le16);
2284 
2285 	status = ll_read_iso_tx_sync(handle, &seq, &timestamp, &offset);
2286 
2287 	rp = hci_cmd_complete(evt, sizeof(*rp));
2288 	rp->status = status;
2289 	rp->handle = handle_le16;
2290 	rp->seq       = sys_cpu_to_le16(seq);
2291 	rp->timestamp = sys_cpu_to_le32(timestamp);
2292 	sys_put_le24(offset, rp->offset);
2293 }
2294 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2295 
2296 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
le_iso_receive_test(struct net_buf * buf,struct net_buf ** evt)2297 static void le_iso_receive_test(struct net_buf *buf, struct net_buf **evt)
2298 {
2299 	struct bt_hci_cp_le_iso_receive_test *cmd = (void *)buf->data;
2300 	struct bt_hci_rp_le_iso_receive_test *rp;
2301 	uint16_t handle;
2302 	uint8_t status;
2303 
2304 	handle = sys_le16_to_cpu(cmd->handle);
2305 
2306 	status = ll_iso_receive_test(handle, cmd->payload_type);
2307 
2308 	rp = hci_cmd_complete(evt, sizeof(*rp));
2309 	rp->status = status;
2310 	rp->handle = sys_cpu_to_le16(handle);
2311 }
2312 
le_iso_read_test_counters(struct net_buf * buf,struct net_buf ** evt)2313 static void le_iso_read_test_counters(struct net_buf *buf, struct net_buf **evt)
2314 {
2315 	struct bt_hci_cp_le_read_test_counters *cmd = (void *)buf->data;
2316 	struct bt_hci_rp_le_read_test_counters *rp;
2317 	uint32_t received_cnt;
2318 	uint32_t missed_cnt;
2319 	uint32_t failed_cnt;
2320 	uint16_t handle;
2321 	uint8_t status;
2322 
2323 	handle = sys_le16_to_cpu(cmd->handle);
2324 	status = ll_iso_read_test_counters(handle, &received_cnt,
2325 					   &missed_cnt, &failed_cnt);
2326 
2327 	rp = hci_cmd_complete(evt, sizeof(*rp));
2328 	rp->status = status;
2329 	rp->handle = sys_cpu_to_le16(handle);
2330 	rp->received_cnt = sys_cpu_to_le32(received_cnt);
2331 	rp->missed_cnt   = sys_cpu_to_le32(missed_cnt);
2332 	rp->failed_cnt   = sys_cpu_to_le32(failed_cnt);
2333 }
2334 
2335 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
le_read_iso_link_quality(struct net_buf * buf,struct net_buf ** evt)2336 static void le_read_iso_link_quality(struct net_buf *buf, struct net_buf **evt)
2337 {
2338 	struct bt_hci_cp_le_read_iso_link_quality *cmd = (void *)buf->data;
2339 	struct bt_hci_rp_le_read_iso_link_quality *rp;
2340 	uint32_t tx_last_subevent_packets;
2341 	uint32_t retransmitted_packets;
2342 	uint32_t rx_unreceived_packets;
2343 	uint32_t tx_unacked_packets;
2344 	uint32_t tx_flushed_packets;
2345 	uint32_t crc_error_packets;
2346 	uint32_t duplicate_packets;
2347 	uint16_t handle_le16;
2348 	uint16_t handle;
2349 	uint8_t status;
2350 
2351 	handle_le16 = cmd->handle;
2352 	handle = sys_le16_to_cpu(handle_le16);
2353 	status = ll_read_iso_link_quality(handle, &tx_unacked_packets,
2354 					  &tx_flushed_packets,
2355 					  &tx_last_subevent_packets,
2356 					  &retransmitted_packets,
2357 					  &crc_error_packets,
2358 					  &rx_unreceived_packets,
2359 					  &duplicate_packets);
2360 
2361 	rp = hci_cmd_complete(evt, sizeof(*rp));
2362 	rp->status = status;
2363 	rp->handle = handle_le16;
2364 	rp->tx_unacked_packets = sys_cpu_to_le32(tx_unacked_packets);
2365 	rp->tx_flushed_packets = sys_cpu_to_le32(tx_flushed_packets);
2366 	rp->tx_last_subevent_packets =
2367 		sys_cpu_to_le32(tx_last_subevent_packets);
2368 	rp->retransmitted_packets = sys_cpu_to_le32(retransmitted_packets);
2369 	rp->crc_error_packets     = sys_cpu_to_le32(crc_error_packets);
2370 	rp->rx_unreceived_packets = sys_cpu_to_le32(rx_unreceived_packets);
2371 	rp->duplicate_packets     = sys_cpu_to_le32(duplicate_packets);
2372 }
2373 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
2374 
2375 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
2376 
2377 #if defined(CONFIG_BT_CTLR_ISO)
le_setup_iso_path(struct net_buf * buf,struct net_buf ** evt)2378 static void le_setup_iso_path(struct net_buf *buf, struct net_buf **evt)
2379 {
2380 	struct bt_hci_cp_le_setup_iso_path *cmd = (void *)buf->data;
2381 	struct bt_hci_rp_le_setup_iso_path *rp;
2382 	uint32_t controller_delay;
2383 	uint8_t *codec_config;
2384 	uint8_t coding_format;
2385 	uint16_t vs_codec_id;
2386 	uint16_t company_id;
2387 	uint16_t handle;
2388 	uint8_t status;
2389 
2390 	handle = sys_le16_to_cpu(cmd->handle);
2391 	coding_format = cmd->codec_id.coding_format;
2392 	company_id = sys_le16_to_cpu(cmd->codec_id.company_id);
2393 	vs_codec_id = sys_le16_to_cpu(cmd->codec_id.vs_codec_id);
2394 	controller_delay = sys_get_le24(cmd->controller_delay);
2395 	codec_config = &cmd->codec_config[0];
2396 
2397 	status = ll_setup_iso_path(handle, cmd->path_dir, cmd->path_id,
2398 				   coding_format, company_id, vs_codec_id,
2399 				   controller_delay, cmd->codec_config_len,
2400 				   codec_config);
2401 
2402 	rp = hci_cmd_complete(evt, sizeof(*rp));
2403 	rp->status = status;
2404 	rp->handle = sys_cpu_to_le16(handle);
2405 }
2406 
le_remove_iso_path(struct net_buf * buf,struct net_buf ** evt)2407 static void le_remove_iso_path(struct net_buf *buf, struct net_buf **evt)
2408 {
2409 	struct bt_hci_cp_le_remove_iso_path *cmd = (void *)buf->data;
2410 	struct bt_hci_rp_le_remove_iso_path *rp;
2411 	uint16_t handle;
2412 	uint8_t status;
2413 
2414 	handle = sys_le16_to_cpu(cmd->handle);
2415 
2416 	status = ll_remove_iso_path(handle, cmd->path_dir);
2417 
2418 	rp = hci_cmd_complete(evt, sizeof(*rp));
2419 	rp->status = status;
2420 	rp->handle = sys_cpu_to_le16(handle);
2421 }
2422 
le_iso_test_end(struct net_buf * buf,struct net_buf ** evt)2423 static void le_iso_test_end(struct net_buf *buf, struct net_buf **evt)
2424 {
2425 	struct bt_hci_cp_le_iso_test_end *cmd = (void *)buf->data;
2426 	struct bt_hci_rp_le_iso_test_end *rp;
2427 	uint32_t received_cnt;
2428 	uint32_t missed_cnt;
2429 	uint32_t failed_cnt;
2430 	uint16_t handle;
2431 	uint8_t status;
2432 
2433 	handle = sys_le16_to_cpu(cmd->handle);
2434 	status = ll_iso_test_end(handle, &received_cnt, &missed_cnt,
2435 				 &failed_cnt);
2436 
2437 	rp = hci_cmd_complete(evt, sizeof(*rp));
2438 	rp->status = status;
2439 	rp->handle = sys_cpu_to_le16(handle);
2440 	rp->received_cnt = sys_cpu_to_le32(received_cnt);
2441 	rp->missed_cnt   = sys_cpu_to_le32(missed_cnt);
2442 	rp->failed_cnt   = sys_cpu_to_le32(failed_cnt);
2443 }
2444 #endif /* CONFIG_BT_CTLR_ISO */
2445 
2446 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
le_set_host_feature(struct net_buf * buf,struct net_buf ** evt)2447 static void le_set_host_feature(struct net_buf *buf, struct net_buf **evt)
2448 {
2449 	struct bt_hci_cp_le_set_host_feature *cmd = (void *)buf->data;
2450 	struct bt_hci_rp_le_set_host_feature *rp;
2451 	uint8_t status;
2452 
2453 	status = ll_set_host_feature(cmd->bit_number, cmd->bit_value);
2454 
2455 	rp = hci_cmd_complete(evt, sizeof(*rp));
2456 	rp->status = status;
2457 }
2458 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
2459 
2460 #if defined(CONFIG_BT_PERIPHERAL)
2461 #if defined(CONFIG_BT_CTLR_LE_ENC)
le_ltk_req_reply(struct net_buf * buf,struct net_buf ** evt)2462 static void le_ltk_req_reply(struct net_buf *buf, struct net_buf **evt)
2463 {
2464 	struct bt_hci_cp_le_ltk_req_reply *cmd = (void *)buf->data;
2465 	struct bt_hci_rp_le_ltk_req_reply *rp;
2466 	uint16_t handle;
2467 	uint8_t status;
2468 
2469 	handle = sys_le16_to_cpu(cmd->handle);
2470 	status = ll_start_enc_req_send(handle, 0x00, &cmd->ltk[0]);
2471 
2472 	rp = hci_cmd_complete(evt, sizeof(*rp));
2473 	rp->status = status;
2474 	rp->handle = sys_cpu_to_le16(handle);
2475 }
2476 
le_ltk_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2477 static void le_ltk_req_neg_reply(struct net_buf *buf, struct net_buf **evt)
2478 {
2479 	struct bt_hci_cp_le_ltk_req_neg_reply *cmd = (void *)buf->data;
2480 	struct bt_hci_rp_le_ltk_req_neg_reply *rp;
2481 	uint16_t handle;
2482 	uint8_t status;
2483 
2484 	handle = sys_le16_to_cpu(cmd->handle);
2485 	status = ll_start_enc_req_send(handle, BT_HCI_ERR_PIN_OR_KEY_MISSING,
2486 				       NULL);
2487 
2488 	rp = hci_cmd_complete(evt, sizeof(*rp));
2489 	rp->status = status;
2490 	rp->handle = sys_le16_to_cpu(handle);
2491 }
2492 #endif /* CONFIG_BT_CTLR_LE_ENC */
2493 
2494 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
le_accept_cis(struct net_buf * buf,struct net_buf ** evt)2495 static void le_accept_cis(struct net_buf *buf, struct net_buf **evt)
2496 {
2497 	struct bt_hci_cp_le_accept_cis *cmd = (void *)buf->data;
2498 	uint16_t handle;
2499 	uint8_t status;
2500 
2501 	handle = sys_le16_to_cpu(cmd->handle);
2502 	status = ll_cis_accept(handle);
2503 	*evt = cmd_status(status);
2504 }
2505 
le_reject_cis(struct net_buf * buf,struct net_buf ** evt)2506 static void le_reject_cis(struct net_buf *buf, struct net_buf **evt)
2507 {
2508 	struct bt_hci_cp_le_reject_cis *cmd = (void *)buf->data;
2509 	struct bt_hci_rp_le_reject_cis *rp;
2510 	uint16_t handle;
2511 	uint8_t status;
2512 
2513 	handle = sys_le16_to_cpu(cmd->handle);
2514 	status = ll_cis_reject(handle, cmd->reason);
2515 
2516 	rp = hci_cmd_complete(evt, sizeof(*rp));
2517 	rp->status = status;
2518 	rp->handle = sys_cpu_to_le16(handle);
2519 }
2520 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
2521 
2522 #endif /* CONFIG_BT_PERIPHERAL */
2523 
2524 #if defined(CONFIG_BT_CONN)
2525 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
le_req_peer_sca(struct net_buf * buf,struct net_buf ** evt)2526 static void le_req_peer_sca(struct net_buf *buf, struct net_buf **evt)
2527 {
2528 	struct bt_hci_cp_le_req_peer_sca *cmd = (void *)buf->data;
2529 	uint16_t handle;
2530 	uint8_t status;
2531 
2532 	handle = sys_le16_to_cpu(cmd->handle);
2533 	status = ll_req_peer_sca(handle);
2534 
2535 	*evt = cmd_status(status);
2536 }
2537 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
2538 
2539 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
le_read_remote_features(struct net_buf * buf,struct net_buf ** evt)2540 static void le_read_remote_features(struct net_buf *buf, struct net_buf **evt)
2541 {
2542 	struct bt_hci_cp_le_read_remote_features *cmd = (void *)buf->data;
2543 	uint16_t handle;
2544 	uint8_t status;
2545 
2546 	handle = sys_le16_to_cpu(cmd->handle);
2547 	status = ll_feature_req_send(handle);
2548 
2549 	*evt = cmd_status(status);
2550 }
2551 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
2552 
le_read_chan_map(struct net_buf * buf,struct net_buf ** evt)2553 static void le_read_chan_map(struct net_buf *buf, struct net_buf **evt)
2554 {
2555 	struct bt_hci_cp_le_read_chan_map *cmd = (void *)buf->data;
2556 	struct bt_hci_rp_le_read_chan_map *rp;
2557 	uint16_t handle;
2558 	uint8_t status;
2559 
2560 	handle = sys_le16_to_cpu(cmd->handle);
2561 
2562 	rp = hci_cmd_complete(evt, sizeof(*rp));
2563 
2564 	status = ll_chm_get(handle, rp->ch_map);
2565 
2566 	rp->status = status;
2567 	rp->handle = sys_le16_to_cpu(handle);
2568 }
2569 
le_conn_update(struct net_buf * buf,struct net_buf ** evt)2570 static void le_conn_update(struct net_buf *buf, struct net_buf **evt)
2571 {
2572 	struct hci_cp_le_conn_update *cmd = (void *)buf->data;
2573 	uint16_t supervision_timeout;
2574 	uint16_t conn_interval_min;
2575 	uint16_t conn_interval_max;
2576 	uint16_t conn_latency;
2577 	uint16_t handle;
2578 	uint8_t status;
2579 
2580 	handle = sys_le16_to_cpu(cmd->handle);
2581 	conn_interval_min = sys_le16_to_cpu(cmd->conn_interval_min);
2582 	conn_interval_max = sys_le16_to_cpu(cmd->conn_interval_max);
2583 	conn_latency = sys_le16_to_cpu(cmd->conn_latency);
2584 	supervision_timeout = sys_le16_to_cpu(cmd->supervision_timeout);
2585 
2586 	status = ll_conn_update(handle, 0, 0, conn_interval_min,
2587 				conn_interval_max, conn_latency,
2588 				supervision_timeout, NULL);
2589 
2590 	*evt = cmd_status(status);
2591 }
2592 
2593 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
le_conn_param_req_reply(struct net_buf * buf,struct net_buf ** evt)2594 static void le_conn_param_req_reply(struct net_buf *buf, struct net_buf **evt)
2595 {
2596 	struct bt_hci_cp_le_conn_param_req_reply *cmd = (void *)buf->data;
2597 	struct bt_hci_rp_le_conn_param_req_reply *rp;
2598 	uint16_t interval_min;
2599 	uint16_t interval_max;
2600 	uint16_t latency;
2601 	uint16_t timeout;
2602 	uint16_t handle;
2603 	uint8_t status;
2604 
2605 	handle = sys_le16_to_cpu(cmd->handle);
2606 	interval_min = sys_le16_to_cpu(cmd->interval_min);
2607 	interval_max = sys_le16_to_cpu(cmd->interval_max);
2608 	latency = sys_le16_to_cpu(cmd->latency);
2609 	timeout = sys_le16_to_cpu(cmd->timeout);
2610 
2611 	status = ll_conn_update(handle, 2, 0, interval_min, interval_max,
2612 				latency, timeout, NULL);
2613 
2614 	rp = hci_cmd_complete(evt, sizeof(*rp));
2615 	rp->status = status;
2616 	rp->handle = sys_cpu_to_le16(handle);
2617 }
2618 
le_conn_param_req_neg_reply(struct net_buf * buf,struct net_buf ** evt)2619 static void le_conn_param_req_neg_reply(struct net_buf *buf,
2620 					struct net_buf **evt)
2621 {
2622 	struct bt_hci_cp_le_conn_param_req_neg_reply *cmd = (void *)buf->data;
2623 	struct bt_hci_rp_le_conn_param_req_neg_reply *rp;
2624 	uint16_t handle;
2625 	uint8_t status;
2626 
2627 	handle = sys_le16_to_cpu(cmd->handle);
2628 	status = ll_conn_update(handle, 2, cmd->reason, 0, 0, 0, 0, NULL);
2629 
2630 	rp = hci_cmd_complete(evt, sizeof(*rp));
2631 	rp->status = status;
2632 	rp->handle = sys_cpu_to_le16(handle);
2633 }
2634 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2635 
2636 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
le_set_data_len(struct net_buf * buf,struct net_buf ** evt)2637 static void le_set_data_len(struct net_buf *buf, struct net_buf **evt)
2638 {
2639 	struct bt_hci_cp_le_set_data_len *cmd = (void *)buf->data;
2640 	struct bt_hci_rp_le_set_data_len *rp;
2641 	uint16_t tx_octets;
2642 	uint16_t tx_time;
2643 	uint16_t handle;
2644 	uint8_t status;
2645 
2646 	handle = sys_le16_to_cpu(cmd->handle);
2647 	tx_octets = sys_le16_to_cpu(cmd->tx_octets);
2648 	tx_time = sys_le16_to_cpu(cmd->tx_time);
2649 	status = ll_length_req_send(handle, tx_octets, tx_time);
2650 
2651 	rp = hci_cmd_complete(evt, sizeof(*rp));
2652 	rp->status = status;
2653 	rp->handle = sys_cpu_to_le16(handle);
2654 }
2655 
le_read_default_data_len(struct net_buf * buf,struct net_buf ** evt)2656 static void le_read_default_data_len(struct net_buf *buf, struct net_buf **evt)
2657 {
2658 	struct bt_hci_rp_le_read_default_data_len *rp;
2659 	uint16_t max_tx_octets;
2660 	uint16_t max_tx_time;
2661 
2662 	rp = hci_cmd_complete(evt, sizeof(*rp));
2663 
2664 	ll_length_default_get(&max_tx_octets, &max_tx_time);
2665 
2666 	rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2667 	rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2668 	rp->status = 0x00;
2669 }
2670 
le_write_default_data_len(struct net_buf * buf,struct net_buf ** evt)2671 static void le_write_default_data_len(struct net_buf *buf,
2672 				      struct net_buf **evt)
2673 {
2674 	struct bt_hci_cp_le_write_default_data_len *cmd = (void *)buf->data;
2675 	uint16_t max_tx_octets;
2676 	uint16_t max_tx_time;
2677 	uint8_t status;
2678 
2679 	max_tx_octets = sys_le16_to_cpu(cmd->max_tx_octets);
2680 	max_tx_time = sys_le16_to_cpu(cmd->max_tx_time);
2681 	status = ll_length_default_set(max_tx_octets, max_tx_time);
2682 
2683 	*evt = cmd_complete_status(status);
2684 }
2685 
le_read_max_data_len(struct net_buf * buf,struct net_buf ** evt)2686 static void le_read_max_data_len(struct net_buf *buf, struct net_buf **evt)
2687 {
2688 	struct bt_hci_rp_le_read_max_data_len *rp;
2689 	uint16_t max_tx_octets;
2690 	uint16_t max_tx_time;
2691 	uint16_t max_rx_octets;
2692 	uint16_t max_rx_time;
2693 
2694 	rp = hci_cmd_complete(evt, sizeof(*rp));
2695 
2696 	ll_length_max_get(&max_tx_octets, &max_tx_time,
2697 			  &max_rx_octets, &max_rx_time);
2698 
2699 	rp->max_tx_octets = sys_cpu_to_le16(max_tx_octets);
2700 	rp->max_tx_time = sys_cpu_to_le16(max_tx_time);
2701 	rp->max_rx_octets = sys_cpu_to_le16(max_rx_octets);
2702 	rp->max_rx_time = sys_cpu_to_le16(max_rx_time);
2703 	rp->status = 0x00;
2704 }
2705 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2706 
2707 #if defined(CONFIG_BT_CTLR_PHY)
le_read_phy(struct net_buf * buf,struct net_buf ** evt)2708 static void le_read_phy(struct net_buf *buf, struct net_buf **evt)
2709 {
2710 	struct bt_hci_cp_le_read_phy *cmd = (void *)buf->data;
2711 	struct bt_hci_rp_le_read_phy *rp;
2712 	uint16_t handle;
2713 	uint8_t status;
2714 
2715 	handle = sys_le16_to_cpu(cmd->handle);
2716 
2717 	rp = hci_cmd_complete(evt, sizeof(*rp));
2718 
2719 	status = ll_phy_get(handle, &rp->tx_phy, &rp->rx_phy);
2720 
2721 	rp->status = status;
2722 	rp->handle = sys_cpu_to_le16(handle);
2723 	rp->tx_phy = find_lsb_set(rp->tx_phy);
2724 	rp->rx_phy = find_lsb_set(rp->rx_phy);
2725 }
2726 
le_set_default_phy(struct net_buf * buf,struct net_buf ** evt)2727 static void le_set_default_phy(struct net_buf *buf, struct net_buf **evt)
2728 {
2729 	struct bt_hci_cp_le_set_default_phy *cmd = (void *)buf->data;
2730 	uint8_t status;
2731 
2732 	if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2733 		cmd->tx_phys = 0x07;
2734 	}
2735 	if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2736 		cmd->rx_phys = 0x07;
2737 	}
2738 
2739 	status = ll_phy_default_set(cmd->tx_phys, cmd->rx_phys);
2740 
2741 	*evt = cmd_complete_status(status);
2742 }
2743 
le_set_phy(struct net_buf * buf,struct net_buf ** evt)2744 static void le_set_phy(struct net_buf *buf, struct net_buf **evt)
2745 {
2746 	struct bt_hci_cp_le_set_phy *cmd = (void *)buf->data;
2747 	uint16_t phy_opts;
2748 	uint8_t mask_phys;
2749 	uint16_t handle;
2750 	uint8_t status;
2751 
2752 	handle = sys_le16_to_cpu(cmd->handle);
2753 	phy_opts = sys_le16_to_cpu(cmd->phy_opts);
2754 
2755 	mask_phys = BT_HCI_LE_PHY_PREFER_1M;
2756 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_2M)) {
2757 		mask_phys |= BT_HCI_LE_PHY_PREFER_2M;
2758 	}
2759 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
2760 		mask_phys |= BT_HCI_LE_PHY_PREFER_CODED;
2761 	}
2762 
2763 	if (cmd->all_phys & BT_HCI_LE_PHY_TX_ANY) {
2764 		cmd->tx_phys |= mask_phys;
2765 	}
2766 	if (cmd->all_phys & BT_HCI_LE_PHY_RX_ANY) {
2767 		cmd->rx_phys |= mask_phys;
2768 	}
2769 
2770 	if ((cmd->tx_phys | cmd->rx_phys) & ~mask_phys) {
2771 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
2772 
2773 		return;
2774 	}
2775 
2776 	if (!(cmd->tx_phys & 0x07) ||
2777 	    !(cmd->rx_phys & 0x07)) {
2778 		*evt = cmd_status(BT_HCI_ERR_INVALID_PARAM);
2779 
2780 		return;
2781 	}
2782 
2783 	if (phy_opts & 0x03) {
2784 		phy_opts -= 1U;
2785 		phy_opts &= 1;
2786 	} else {
2787 		phy_opts = 0U;
2788 	}
2789 
2790 	status = ll_phy_req_send(handle, cmd->tx_phys, phy_opts,
2791 				 cmd->rx_phys);
2792 
2793 	*evt = cmd_status(status);
2794 }
2795 #endif /* CONFIG_BT_CTLR_PHY */
2796 #endif /* CONFIG_BT_CONN */
2797 
2798 #if defined(CONFIG_BT_CTLR_PRIVACY)
le_add_dev_to_rl(struct net_buf * buf,struct net_buf ** evt)2799 static void le_add_dev_to_rl(struct net_buf *buf, struct net_buf **evt)
2800 {
2801 	struct bt_hci_cp_le_add_dev_to_rl *cmd = (void *)buf->data;
2802 	uint8_t status;
2803 
2804 	status = ll_rl_add(&cmd->peer_id_addr, cmd->peer_irk, cmd->local_irk);
2805 
2806 	*evt = cmd_complete_status(status);
2807 }
2808 
le_rem_dev_from_rl(struct net_buf * buf,struct net_buf ** evt)2809 static void le_rem_dev_from_rl(struct net_buf *buf, struct net_buf **evt)
2810 {
2811 	struct bt_hci_cp_le_rem_dev_from_rl *cmd = (void *)buf->data;
2812 	uint8_t status;
2813 
2814 	status = ll_rl_remove(&cmd->peer_id_addr);
2815 
2816 	*evt = cmd_complete_status(status);
2817 }
2818 
le_clear_rl(struct net_buf * buf,struct net_buf ** evt)2819 static void le_clear_rl(struct net_buf *buf, struct net_buf **evt)
2820 {
2821 	uint8_t status;
2822 
2823 	status = ll_rl_clear();
2824 
2825 	*evt = cmd_complete_status(status);
2826 }
2827 
le_read_rl_size(struct net_buf * buf,struct net_buf ** evt)2828 static void le_read_rl_size(struct net_buf *buf, struct net_buf **evt)
2829 {
2830 	struct bt_hci_rp_le_read_rl_size *rp;
2831 
2832 	rp = hci_cmd_complete(evt, sizeof(*rp));
2833 
2834 	rp->rl_size = ll_rl_size_get();
2835 	rp->status = 0x00;
2836 }
2837 
le_read_peer_rpa(struct net_buf * buf,struct net_buf ** evt)2838 static void le_read_peer_rpa(struct net_buf *buf, struct net_buf **evt)
2839 {
2840 	struct bt_hci_cp_le_read_peer_rpa *cmd = (void *)buf->data;
2841 	struct bt_hci_rp_le_read_peer_rpa *rp;
2842 	bt_addr_le_t peer_id_addr;
2843 
2844 	bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2845 	rp = hci_cmd_complete(evt, sizeof(*rp));
2846 
2847 	rp->status = ll_rl_crpa_get(&peer_id_addr, &rp->peer_rpa);
2848 }
2849 
le_read_local_rpa(struct net_buf * buf,struct net_buf ** evt)2850 static void le_read_local_rpa(struct net_buf *buf, struct net_buf **evt)
2851 {
2852 	struct bt_hci_cp_le_read_local_rpa *cmd = (void *)buf->data;
2853 	struct bt_hci_rp_le_read_local_rpa *rp;
2854 	bt_addr_le_t peer_id_addr;
2855 
2856 	bt_addr_le_copy(&peer_id_addr, &cmd->peer_id_addr);
2857 	rp = hci_cmd_complete(evt, sizeof(*rp));
2858 
2859 	rp->status = ll_rl_lrpa_get(&peer_id_addr, &rp->local_rpa);
2860 }
2861 
le_set_addr_res_enable(struct net_buf * buf,struct net_buf ** evt)2862 static void le_set_addr_res_enable(struct net_buf *buf, struct net_buf **evt)
2863 {
2864 	struct bt_hci_cp_le_set_addr_res_enable *cmd = (void *)buf->data;
2865 	uint8_t status;
2866 
2867 	status = ll_rl_enable(cmd->enable);
2868 
2869 	*evt = cmd_complete_status(status);
2870 }
2871 
le_set_rpa_timeout(struct net_buf * buf,struct net_buf ** evt)2872 static void le_set_rpa_timeout(struct net_buf *buf, struct net_buf **evt)
2873 {
2874 	struct bt_hci_cp_le_set_rpa_timeout *cmd = (void *)buf->data;
2875 	uint16_t timeout = sys_le16_to_cpu(cmd->rpa_timeout);
2876 
2877 	ll_rl_timeout_set(timeout);
2878 
2879 	*evt = cmd_complete_status(0x00);
2880 }
2881 
le_set_privacy_mode(struct net_buf * buf,struct net_buf ** evt)2882 static void le_set_privacy_mode(struct net_buf *buf, struct net_buf **evt)
2883 {
2884 	struct bt_hci_cp_le_set_privacy_mode *cmd = (void *)buf->data;
2885 	uint8_t status;
2886 
2887 	status = ll_priv_mode_set(&cmd->id_addr, cmd->mode);
2888 
2889 	*evt = cmd_complete_status(status);
2890 }
2891 #endif /* CONFIG_BT_CTLR_PRIVACY */
2892 
le_read_tx_power(struct net_buf * buf,struct net_buf ** evt)2893 static void le_read_tx_power(struct net_buf *buf, struct net_buf **evt)
2894 {
2895 	struct bt_hci_rp_le_read_tx_power *rp;
2896 
2897 	rp = hci_cmd_complete(evt, sizeof(*rp));
2898 	rp->status = 0x00;
2899 	ll_tx_pwr_get(&rp->min_tx_power, &rp->max_tx_power);
2900 }
2901 
2902 #if defined(CONFIG_BT_CTLR_DF)
2903 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
le_df_set_cl_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)2904 static void le_df_set_cl_cte_tx_params(struct net_buf *buf,
2905 				       struct net_buf **evt)
2906 {
2907 	struct bt_hci_cp_le_set_cl_cte_tx_params *cmd = (void *)buf->data;
2908 	uint8_t adv_handle;
2909 	uint8_t status;
2910 
2911 	if (adv_cmds_ext_check(evt)) {
2912 		return;
2913 	}
2914 
2915 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &adv_handle);
2916 	if (status) {
2917 		*evt = cmd_complete_status(status);
2918 		return;
2919 	}
2920 
2921 	status = ll_df_set_cl_cte_tx_params(adv_handle, cmd->cte_len,
2922 					    cmd->cte_type, cmd->cte_count,
2923 					    cmd->switch_pattern_len,
2924 					    cmd->ant_ids);
2925 
2926 	*evt = cmd_complete_status(status);
2927 }
2928 
le_df_set_cl_cte_enable(struct net_buf * buf,struct net_buf ** evt)2929 static void le_df_set_cl_cte_enable(struct net_buf *buf, struct net_buf **evt)
2930 {
2931 	struct bt_hci_cp_le_set_cl_cte_tx_enable *cmd = (void *)buf->data;
2932 	uint8_t status;
2933 	uint8_t handle;
2934 
2935 	if (adv_cmds_ext_check(evt)) {
2936 		return;
2937 	}
2938 
2939 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
2940 	if (status) {
2941 		*evt = cmd_complete_status(status);
2942 		return;
2943 	}
2944 
2945 	status = ll_df_set_cl_cte_tx_enable(handle, cmd->cte_enable);
2946 
2947 	*evt = cmd_complete_status(status);
2948 }
2949 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2950 
2951 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
le_df_set_cl_iq_sampling_enable(struct net_buf * buf,struct net_buf ** evt)2952 static void le_df_set_cl_iq_sampling_enable(struct net_buf *buf, struct net_buf **evt)
2953 {
2954 	struct bt_hci_cp_le_set_cl_cte_sampling_enable *cmd = (void *)buf->data;
2955 	struct bt_hci_rp_le_set_cl_cte_sampling_enable *rp;
2956 	uint16_t sync_handle;
2957 	uint8_t status;
2958 
2959 	sync_handle = sys_le16_to_cpu(cmd->sync_handle);
2960 
2961 	status = ll_df_set_cl_iq_sampling_enable(sync_handle,
2962 						 cmd->sampling_enable,
2963 						 cmd->slot_durations,
2964 						 cmd->max_sampled_cte,
2965 						 cmd->switch_pattern_len,
2966 						 cmd->ant_ids);
2967 
2968 	rp = hci_cmd_complete(evt, sizeof(*rp));
2969 
2970 	rp->status = status;
2971 	rp->sync_handle = sys_cpu_to_le16(sync_handle);
2972 }
2973 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
2974 
2975 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) ||      \
2976 	defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
iq_convert_12_to_8_bits(int16_t data)2977 static int8_t iq_convert_12_to_8_bits(int16_t data)
2978 {
2979 	if (data == IQ_SAMPLE_SATURATED_16_BIT) {
2980 		return IQ_SAMPLE_SATURATED_8_BIT;
2981 	}
2982 
2983 #if defined(CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB)
2984 	return (data > INT8_MAX || data < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2985 						    : IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2986 #else  /* !CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2987 	int16_t data_conv = IQ_SAMPLE_CONVERT_12_TO_8_BIT(data);
2988 
2989 	return (data_conv > INT8_MAX || data_conv < INT8_MIN) ? IQ_SAMPLE_SATURATED_8_BIT
2990 							      : (int8_t)data_conv;
2991 #endif /* CONFIG_BT_CTLR_DF_IQ_SAMPLES_CONVERT_USE_8_LSB */
2992 }
2993 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT
2994 	* || CONFIG_BT_CTLR_DF_CONN_CTE_RX
2995 	*/
2996 
2997 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
le_df_connectionless_iq_report(struct pdu_data * pdu_rx,struct node_rx_pdu * node_rx,struct net_buf * buf)2998 static void le_df_connectionless_iq_report(struct pdu_data *pdu_rx,
2999 					   struct node_rx_pdu *node_rx,
3000 					   struct net_buf *buf)
3001 {
3002 	struct bt_hci_evt_le_connectionless_iq_report *sep;
3003 	struct node_rx_iq_report *iq_report;
3004 	struct lll_sync *lll;
3005 	uint8_t samples_cnt;
3006 	int16_t rssi;
3007 	uint16_t sync_handle;
3008 	uint16_t per_evt_counter;
3009 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
3010 	struct ll_sync_set *sync = NULL;
3011 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
3012 
3013 	iq_report =  (struct node_rx_iq_report *)node_rx;
3014 
3015 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3016 	    !(le_event_mask & BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT)) {
3017 		return;
3018 	}
3019 
3020 	lll = iq_report->rx.rx_ftr.param;
3021 
3022 	/* If there is not LLL context and CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT is enabled
3023 	 * the controller is in the Direct Test Mode and may generate
3024 	 * the Connectionless IQ Report.
3025 	 */
3026 	if (!lll && IS_ENABLED(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)) {
3027 		/* Set sync_handle to 0x0FFF according to the BT Core 5.3 specification
3028 		 * Vol 4 7.7.65.21
3029 		 */
3030 		sync_handle = 0x0FFF;
3031 		/* Set periodic event counter to 0 since there is not periodic advertising train. */
3032 		per_evt_counter = 0;
3033 	}
3034 
3035 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
3036 	else {
3037 		sync = HDR_LLL2ULL(lll);
3038 
3039 		/* TX LL thread has higher priority than RX thread. It may happen that
3040 		 * host successfully disables CTE sampling in the meantime.
3041 		 * It should be verified here, to avoid reporting IQ samples after
3042 		 * the functionality was disabled or if sync was lost.
3043 		 */
3044 		if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) ||
3045 		    !sync->timeout_reload) {
3046 			/* Drop further processing of the event. */
3047 			return;
3048 		}
3049 
3050 		/* Get the sync handle corresponding to the LLL context passed in the
3051 		 * node rx footer field.
3052 		 */
3053 		sync_handle = ull_sync_handle_get(sync);
3054 		per_evt_counter = iq_report->event_counter;
3055 	}
3056 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
3057 
3058 	/* If packet status does not indicate insufficient resources for IQ samples and for
3059 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3060 	 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3061 	 */
3062 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3063 		samples_cnt = 0U;
3064 	} else {
3065 		samples_cnt = MAX(1, iq_report->sample_count);
3066 	}
3067 
3068 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT,
3069 		       (sizeof(*sep) +
3070 			(samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3071 
3072 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
3073 
3074 
3075 	sep->sync_handle = sys_cpu_to_le16(sync_handle);
3076 	sep->rssi = sys_cpu_to_le16(rssi);
3077 	sep->rssi_ant_id = iq_report->rssi_ant_id;
3078 	sep->cte_type = iq_report->cte_info.type;
3079 
3080 	sep->chan_idx = iq_report->chan_idx;
3081 	sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
3082 
3083 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3084 		sep->slot_durations = iq_report->local_slot_durations;
3085 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3086 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3087 	} else {
3088 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3089 	}
3090 
3091 	sep->packet_status = iq_report->packet_status;
3092 
3093 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3094 		if (iq_report->sample_count == 0U) {
3095 			sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3096 			sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3097 		} else {
3098 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3099 				sep->sample[idx].i =
3100 					iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3101 				sep->sample[idx].q =
3102 					iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3103 			}
3104 		}
3105 	}
3106 
3107 	sep->sample_count = samples_cnt;
3108 }
3109 #endif /* defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT) */
3110 
3111 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
le_df_set_conn_cte_tx_params(struct net_buf * buf,struct net_buf ** evt)3112 static void le_df_set_conn_cte_tx_params(struct net_buf *buf,
3113 					 struct net_buf **evt)
3114 {
3115 	struct bt_hci_cp_le_set_conn_cte_tx_params *cmd = (void *)buf->data;
3116 	struct bt_hci_rp_le_set_conn_cte_tx_params *rp;
3117 	uint16_t handle, handle_le16;
3118 	uint8_t status;
3119 
3120 	handle_le16 = cmd->handle;
3121 	handle = sys_le16_to_cpu(handle_le16);
3122 
3123 	status = ll_df_set_conn_cte_tx_params(handle, cmd->cte_types,
3124 					      cmd->switch_pattern_len,
3125 					      cmd->ant_ids);
3126 
3127 	rp = hci_cmd_complete(evt, sizeof(*rp));
3128 
3129 	rp->status = status;
3130 	rp->handle = handle_le16;
3131 }
3132 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
3133 
3134 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
le_df_set_conn_cte_rx_params(struct net_buf * buf,struct net_buf ** evt)3135 static void le_df_set_conn_cte_rx_params(struct net_buf *buf, struct net_buf **evt)
3136 {
3137 	struct bt_hci_cp_le_set_conn_cte_rx_params *cmd = (void *)buf->data;
3138 	struct bt_hci_rp_le_set_conn_cte_rx_params *rp;
3139 	uint16_t handle, handle_le16;
3140 	uint8_t status;
3141 
3142 	handle_le16 = cmd->handle;
3143 	handle = sys_le16_to_cpu(handle_le16);
3144 
3145 	status = ll_df_set_conn_cte_rx_params(handle, cmd->sampling_enable, cmd->slot_durations,
3146 					      cmd->switch_pattern_len, cmd->ant_ids);
3147 
3148 	rp = hci_cmd_complete(evt, sizeof(*rp));
3149 
3150 	rp->status = status;
3151 	rp->handle = handle_le16;
3152 }
3153 
le_df_connection_iq_report(struct node_rx_pdu * node_rx,struct net_buf * buf)3154 static void le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
3155 {
3156 	struct bt_hci_evt_le_connection_iq_report *sep;
3157 	struct node_rx_iq_report *iq_report;
3158 	struct lll_conn *lll;
3159 	uint8_t samples_cnt;
3160 	uint8_t phy_rx;
3161 	int16_t rssi;
3162 
3163 	iq_report = (struct node_rx_iq_report *)node_rx;
3164 
3165 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3166 	    !(le_event_mask & BT_EVT_MASK_LE_CONNECTION_IQ_REPORT)) {
3167 		return;
3168 	}
3169 
3170 	lll = iq_report->rx.rx_ftr.param;
3171 
3172 #if defined(CONFIG_BT_CTLR_PHY)
3173 	phy_rx = lll->phy_rx;
3174 
3175 	/* Make sure the report is generated for connection on PHY UNCODED */
3176 	LL_ASSERT(phy_rx != PHY_CODED);
3177 #else
3178 	phy_rx = PHY_1M;
3179 #endif /* CONFIG_BT_CTLR_PHY */
3180 
3181 	/* TX LL thread has higher priority than RX thread. It may happen that host succefully
3182 	 * disables CTE sampling in the meantime. It should be verified here, to avoid reporting
3183 	 * IQ samples after the functionality was disabled.
3184 	 */
3185 	if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
3186 		/* Dropp further processing of the event. */
3187 		return;
3188 	}
3189 
3190 	/* If packet status does not indicate insufficient resources for IQ samples and for
3191 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
3192 	 * storing single I_sample and Q_sample with BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE value.
3193 	 */
3194 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3195 		samples_cnt = 0;
3196 	} else {
3197 		samples_cnt = MAX(1, iq_report->sample_count);
3198 	}
3199 
3200 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONNECTION_IQ_REPORT,
3201 		       (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample))));
3202 
3203 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
3204 
3205 	sep->conn_handle = sys_cpu_to_le16(iq_report->rx.hdr.handle);
3206 	sep->rx_phy = phy_rx;
3207 	sep->rssi = sys_cpu_to_le16(rssi);
3208 	sep->rssi_ant_id = iq_report->rssi_ant_id;
3209 	sep->cte_type = iq_report->cte_info.type;
3210 
3211 	sep->data_chan_idx = iq_report->chan_idx;
3212 	sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
3213 
3214 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
3215 		sep->slot_durations = iq_report->local_slot_durations;
3216 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
3217 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
3218 	} else {
3219 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
3220 	}
3221 
3222 	sep->packet_status = iq_report->packet_status;
3223 
3224 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
3225 		if (iq_report->sample_count == 0U) {
3226 			sep->sample[0].i = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3227 			sep->sample[0].q = BT_HCI_LE_CTE_REPORT_NO_VALID_SAMPLE;
3228 		} else {
3229 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
3230 				sep->sample[idx].i =
3231 					iq_convert_12_to_8_bits(iq_report->sample[idx].i);
3232 				sep->sample[idx].q =
3233 					iq_convert_12_to_8_bits(iq_report->sample[idx].q);
3234 			}
3235 		}
3236 	}
3237 
3238 	sep->sample_count = samples_cnt;
3239 }
3240 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
3241 
3242 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
le_df_set_conn_cte_req_enable(struct net_buf * buf,struct net_buf ** evt)3243 static void le_df_set_conn_cte_req_enable(struct net_buf *buf, struct net_buf **evt)
3244 {
3245 	struct bt_hci_cp_le_conn_cte_req_enable *cmd = (void *)buf->data;
3246 	struct bt_hci_rp_le_conn_cte_req_enable *rp;
3247 	uint16_t handle, handle_le16;
3248 	uint8_t status;
3249 
3250 	handle_le16 = cmd->handle;
3251 	handle = sys_le16_to_cpu(handle_le16);
3252 
3253 	status = ll_df_set_conn_cte_req_enable(handle, cmd->enable,
3254 					       sys_le16_to_cpu(cmd->cte_request_interval),
3255 					       cmd->requested_cte_length, cmd->requested_cte_type);
3256 	rp = hci_cmd_complete(evt, sizeof(*rp));
3257 
3258 	rp->status = status;
3259 	rp->handle = handle_le16;
3260 }
3261 
le_df_cte_req_failed(uint8_t error_code,uint16_t handle,struct net_buf * buf)3262 static void le_df_cte_req_failed(uint8_t error_code, uint16_t handle, struct net_buf *buf)
3263 {
3264 	struct bt_hci_evt_le_cte_req_failed *sep;
3265 
3266 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
3267 	    !(le_event_mask & BT_EVT_MASK_LE_CTE_REQUEST_FAILED)) {
3268 		return;
3269 	}
3270 
3271 	sep = meta_evt(buf, BT_HCI_EVT_LE_CTE_REQUEST_FAILED, sizeof(*sep));
3272 
3273 	sep->status = error_code;
3274 	sep->conn_handle = sys_cpu_to_le16(handle);
3275 }
3276 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
3277 
3278 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
le_df_set_conn_cte_rsp_enable(struct net_buf * buf,struct net_buf ** evt)3279 static void le_df_set_conn_cte_rsp_enable(struct net_buf *buf, struct net_buf **evt)
3280 {
3281 	struct bt_hci_cp_le_conn_cte_rsp_enable *cmd = (void *)buf->data;
3282 	struct bt_hci_rp_le_conn_cte_rsp_enable *rp;
3283 	uint16_t handle, handle_le16;
3284 	uint8_t status;
3285 
3286 	handle_le16 = cmd->handle;
3287 	handle = sys_le16_to_cpu(handle_le16);
3288 
3289 	status = ll_df_set_conn_cte_rsp_enable(handle, cmd->enable);
3290 	rp = hci_cmd_complete(evt, sizeof(*rp));
3291 
3292 	rp->status = status;
3293 	rp->handle = handle_le16;
3294 }
3295 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
3296 
le_df_read_ant_inf(struct net_buf * buf,struct net_buf ** evt)3297 static void le_df_read_ant_inf(struct net_buf *buf, struct net_buf **evt)
3298 {
3299 	struct bt_hci_rp_le_read_ant_info *rp;
3300 	uint8_t max_switch_pattern_len;
3301 	uint8_t switch_sample_rates;
3302 	uint8_t max_cte_len;
3303 	uint8_t num_ant;
3304 
3305 	ll_df_read_ant_inf(&switch_sample_rates, &num_ant,
3306 			   &max_switch_pattern_len, &max_cte_len);
3307 
3308 	rp = hci_cmd_complete(evt, sizeof(*rp));
3309 
3310 	rp->max_switch_pattern_len = max_switch_pattern_len;
3311 	rp->switch_sample_rates = switch_sample_rates;
3312 	rp->max_cte_len = max_cte_len;
3313 	rp->num_ant = num_ant;
3314 	rp->status = 0x00;
3315 }
3316 #endif /* CONFIG_BT_CTLR_DF */
3317 
3318 #if defined(CONFIG_BT_CTLR_DTM_HCI)
le_rx_test(struct net_buf * buf,struct net_buf ** evt)3319 static void le_rx_test(struct net_buf *buf, struct net_buf **evt)
3320 {
3321 	struct bt_hci_cp_le_rx_test *cmd = (void *)buf->data;
3322 	uint8_t status;
3323 
3324 	status = ll_test_rx(cmd->rx_ch, BT_HCI_LE_RX_PHY_1M, BT_HCI_LE_MOD_INDEX_STANDARD,
3325 			    BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3326 			    BT_HCI_LE_TEST_SLOT_DURATION_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3327 			    NULL);
3328 
3329 	*evt = cmd_complete_status(status);
3330 }
3331 
le_tx_test(struct net_buf * buf,struct net_buf ** evt)3332 static void le_tx_test(struct net_buf *buf, struct net_buf **evt)
3333 {
3334 	struct bt_hci_cp_le_tx_test *cmd = (void *)buf->data;
3335 	uint8_t status;
3336 
3337 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload,
3338 			    BT_HCI_LE_TX_PHY_1M, BT_HCI_LE_TEST_CTE_DISABLED,
3339 			    BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY,
3340 			    NULL, BT_HCI_TX_TEST_POWER_MAX_SET);
3341 
3342 	*evt = cmd_complete_status(status);
3343 }
3344 
le_test_end(struct net_buf * buf,struct net_buf ** evt)3345 static void le_test_end(struct net_buf *buf, struct net_buf **evt)
3346 {
3347 	struct bt_hci_rp_le_test_end *rp;
3348 	uint16_t rx_pkt_count;
3349 	uint8_t status;
3350 
3351 	status = ll_test_end(&rx_pkt_count);
3352 
3353 	rp = hci_cmd_complete(evt, sizeof(*rp));
3354 	rp->status = status;
3355 	rp->rx_pkt_count = sys_cpu_to_le16(rx_pkt_count);
3356 }
3357 
le_enh_rx_test(struct net_buf * buf,struct net_buf ** evt)3358 static void le_enh_rx_test(struct net_buf *buf, struct net_buf **evt)
3359 {
3360 	struct bt_hci_cp_le_enh_rx_test *cmd = (void *)buf->data;
3361 	uint8_t status;
3362 
3363 	status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, BT_HCI_LE_TEST_CTE_DISABLED,
3364 			    BT_HCI_LE_TEST_CTE_TYPE_ANY, BT_HCI_LE_TEST_SLOT_DURATION_ANY,
3365 			    BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL);
3366 
3367 	*evt = cmd_complete_status(status);
3368 }
3369 
3370 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
le_rx_test_v3(struct net_buf * buf,struct net_buf ** evt)3371 static void le_rx_test_v3(struct net_buf *buf, struct net_buf **evt)
3372 {
3373 	struct bt_hci_cp_le_rx_test_v3 *cmd = (void *)buf->data;
3374 	uint8_t status;
3375 
3376 	status = ll_test_rx(cmd->rx_ch, cmd->phy, cmd->mod_index, cmd->expected_cte_len,
3377 			    cmd->expected_cte_type, cmd->slot_durations, cmd->switch_pattern_len,
3378 			    cmd->ant_ids);
3379 
3380 	*evt = cmd_complete_status(status);
3381 }
3382 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
3383 
le_enh_tx_test(struct net_buf * buf,struct net_buf ** evt)3384 static void le_enh_tx_test(struct net_buf *buf, struct net_buf **evt)
3385 {
3386 	struct bt_hci_cp_le_enh_tx_test *cmd = (void *)buf->data;
3387 	uint8_t status;
3388 
3389 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3390 			    BT_HCI_LE_TEST_CTE_DISABLED, BT_HCI_LE_TEST_CTE_TYPE_ANY,
3391 			    BT_HCI_LE_TEST_SWITCH_PATTERN_LEN_ANY, NULL,
3392 			    BT_HCI_TX_TEST_POWER_MAX_SET);
3393 
3394 	*evt = cmd_complete_status(status);
3395 }
3396 
3397 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
le_tx_test_v3(struct net_buf * buf,struct net_buf ** evt)3398 static void le_tx_test_v3(struct net_buf *buf, struct net_buf **evt)
3399 {
3400 	struct bt_hci_cp_le_tx_test_v3 *cmd = (void *)buf->data;
3401 	uint8_t status;
3402 
3403 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3404 			    cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3405 			    BT_HCI_TX_TEST_POWER_MAX_SET);
3406 
3407 	*evt = cmd_complete_status(status);
3408 }
3409 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
3410 
3411 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
le_tx_test_v4(struct net_buf * buf,struct net_buf ** evt)3412 static void le_tx_test_v4(struct net_buf *buf, struct net_buf **evt)
3413 {
3414 	struct bt_hci_cp_le_tx_test_v4 *cmd = (void *)buf->data;
3415 	struct bt_hci_cp_le_tx_test_v4_tx_power *tx_power = (void *)(buf->data +
3416 			sizeof(struct bt_hci_cp_le_tx_test_v4) + cmd->switch_pattern_len);
3417 	uint8_t status;
3418 
3419 	status = ll_test_tx(cmd->tx_ch, cmd->test_data_len, cmd->pkt_payload, cmd->phy,
3420 			    cmd->cte_len, cmd->cte_type, cmd->switch_pattern_len, cmd->ant_ids,
3421 			    tx_power->tx_power);
3422 
3423 	*evt = cmd_complete_status(status);
3424 }
3425 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
3426 #endif /* CONFIG_BT_CTLR_DTM_HCI */
3427 
3428 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3429 #if defined(CONFIG_BT_BROADCASTER)
3430 
le_set_adv_set_random_addr(struct net_buf * buf,struct net_buf ** evt)3431 static void le_set_adv_set_random_addr(struct net_buf *buf,
3432 				       struct net_buf **evt)
3433 {
3434 	struct bt_hci_cp_le_set_adv_set_random_addr *cmd = (void *)buf->data;
3435 	uint8_t status;
3436 	uint8_t handle;
3437 
3438 	if (adv_cmds_ext_check(evt)) {
3439 		return;
3440 	}
3441 
3442 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3443 	if (status) {
3444 		*evt = cmd_complete_status(status);
3445 		return;
3446 	}
3447 
3448 	status = ll_adv_aux_random_addr_set(handle, &cmd->bdaddr.val[0]);
3449 
3450 	*evt = cmd_complete_status(status);
3451 }
3452 
le_set_ext_adv_param(struct net_buf * buf,struct net_buf ** evt)3453 static void le_set_ext_adv_param(struct net_buf *buf, struct net_buf **evt)
3454 {
3455 	struct bt_hci_cp_le_set_ext_adv_param *cmd = (void *)buf->data;
3456 	struct bt_hci_rp_le_set_ext_adv_param *rp;
3457 	uint32_t min_interval;
3458 	uint16_t evt_prop;
3459 	uint8_t tx_pwr;
3460 	uint8_t status;
3461 	uint8_t phy_p;
3462 	uint8_t phy_s;
3463 	uint8_t handle;
3464 
3465 	if (adv_cmds_ext_check(evt)) {
3466 		return;
3467 	}
3468 
3469 	if (cmd->handle > BT_HCI_LE_ADV_HANDLE_MAX) {
3470 		*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3471 		return;
3472 	}
3473 
3474 	evt_prop = sys_le16_to_cpu(cmd->props);
3475 	min_interval = sys_get_le24(cmd->prim_min_interval);
3476 
3477 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3478 		const uint32_t max_interval =
3479 					sys_get_le24(cmd->prim_max_interval);
3480 
3481 		/* Compare advertising interval maximum with implementation
3482 		 * supported advertising interval maximum value defined in the
3483 		 * Kconfig CONFIG_BT_CTLR_ADV_INTERVAL_MAX.
3484 		 */
3485 		if ((!(evt_prop & BT_HCI_LE_ADV_PROP_LEGACY) ||
3486 		     !(evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN)) &&
3487 		    ((min_interval > max_interval) ||
3488 		     (min_interval < BT_HCI_LE_PRIM_ADV_INTERVAL_MIN) ||
3489 		     (max_interval > CONFIG_BT_CTLR_ADV_INTERVAL_MAX))) {
3490 			*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3491 			return;
3492 		}
3493 
3494 		if ((cmd->prim_adv_phy > BT_HCI_LE_PHY_CODED) ||
3495 		    (cmd->sec_adv_phy > BT_HCI_LE_PHY_CODED) ||
3496 		    (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
3497 		     ((cmd->prim_adv_phy == BT_HCI_LE_PHY_CODED) ||
3498 		      (cmd->sec_adv_phy == BT_HCI_LE_PHY_CODED)))) {
3499 			*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3500 			return;
3501 		}
3502 	}
3503 
3504 	status = ll_adv_set_by_hci_handle_get_or_new(cmd->handle, &handle);
3505 	if (status) {
3506 		*evt = cmd_complete_status(status);
3507 		return;
3508 	}
3509 
3510 	tx_pwr = cmd->tx_power;
3511 	phy_p = BIT(cmd->prim_adv_phy - 1);
3512 	phy_s = BIT(cmd->sec_adv_phy - 1);
3513 
3514 	status = ll_adv_params_set(handle, evt_prop, min_interval,
3515 				   PDU_ADV_TYPE_EXT_IND, cmd->own_addr_type,
3516 				   cmd->peer_addr.type, cmd->peer_addr.a.val,
3517 				   cmd->prim_channel_map, cmd->filter_policy,
3518 				   &tx_pwr, phy_p, cmd->sec_adv_max_skip, phy_s,
3519 				   cmd->sid, cmd->scan_req_notify_enable);
3520 
3521 	rp = hci_cmd_complete(evt, sizeof(*rp));
3522 	rp->status = status;
3523 	rp->tx_power = tx_pwr;
3524 }
3525 
le_set_ext_adv_data(struct net_buf * buf,struct net_buf ** evt)3526 static void le_set_ext_adv_data(struct net_buf *buf, struct net_buf **evt)
3527 {
3528 	struct bt_hci_cp_le_set_ext_adv_data *cmd = (void *)buf->data;
3529 	uint8_t status;
3530 	uint8_t handle;
3531 
3532 	if (adv_cmds_ext_check(evt)) {
3533 		return;
3534 	}
3535 
3536 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3537 	if (status) {
3538 		*evt = cmd_complete_status(status);
3539 		return;
3540 	}
3541 
3542 	status = ll_adv_aux_ad_data_set(handle, cmd->op, cmd->frag_pref,
3543 					cmd->len, cmd->data);
3544 
3545 	*evt = cmd_complete_status(status);
3546 }
3547 
le_set_ext_scan_rsp_data(struct net_buf * buf,struct net_buf ** evt)3548 static void le_set_ext_scan_rsp_data(struct net_buf *buf, struct net_buf **evt)
3549 {
3550 	struct bt_hci_cp_le_set_ext_scan_rsp_data *cmd = (void *)buf->data;
3551 	uint8_t status;
3552 	uint8_t handle;
3553 
3554 	if (adv_cmds_ext_check(evt)) {
3555 		return;
3556 	}
3557 
3558 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3559 	if (status) {
3560 		*evt = cmd_complete_status(status);
3561 		return;
3562 	}
3563 
3564 	status = ll_adv_aux_sr_data_set(handle, cmd->op, cmd->frag_pref,
3565 					cmd->len, cmd->data);
3566 
3567 	*evt = cmd_complete_status(status);
3568 }
3569 
le_set_ext_adv_enable(struct net_buf * buf,struct net_buf ** evt)3570 static void le_set_ext_adv_enable(struct net_buf *buf, struct net_buf **evt)
3571 {
3572 	struct bt_hci_cp_le_set_ext_adv_enable *cmd = (void *)buf->data;
3573 	struct bt_hci_ext_adv_set *s;
3574 	uint8_t set_num;
3575 	uint8_t status;
3576 	uint8_t handle;
3577 
3578 	if (adv_cmds_ext_check(evt)) {
3579 		return;
3580 	}
3581 
3582 	set_num = cmd->set_num;
3583 	if (!set_num) {
3584 		if (cmd->enable) {
3585 			*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3586 			return;
3587 		}
3588 
3589 		status = ll_adv_disable_all();
3590 
3591 		*evt = cmd_complete_status(status);
3592 
3593 		return;
3594 	}
3595 
3596 	/* Check for duplicate handles */
3597 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3598 		for (uint8_t i = 0U; i < set_num - 1; i++) {
3599 			for (uint8_t j = i + 1U; j < set_num; j++) {
3600 				if (cmd->s[i].handle == cmd->s[j].handle) {
3601 					*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3602 					return;
3603 				}
3604 			}
3605 		}
3606 	}
3607 
3608 	s = (void *) cmd->s;
3609 	do {
3610 		status = ll_adv_set_by_hci_handle_get(s->handle, &handle);
3611 		if (status) {
3612 			break;
3613 		}
3614 
3615 		/* TODO: duration and events parameter use. */
3616 #if defined(CONFIG_BT_HCI_MESH_EXT)
3617 		status = ll_adv_enable(handle, cmd->enable, 0, 0, 0, 0, 0);
3618 #else /* !CONFIG_BT_HCI_MESH_EXT */
3619 		status = ll_adv_enable(handle, cmd->enable,
3620 				       sys_le16_to_cpu(s->duration), s->max_ext_adv_evts);
3621 #endif /* !CONFIG_BT_HCI_MESH_EXT */
3622 		if (status) {
3623 			/* TODO: how to handle succeeded ones before this
3624 			 * error.
3625 			 */
3626 			break;
3627 		}
3628 
3629 		s++;
3630 	} while (--set_num);
3631 
3632 	*evt = cmd_complete_status(status);
3633 }
3634 
le_read_max_adv_data_len(struct net_buf * buf,struct net_buf ** evt)3635 static void le_read_max_adv_data_len(struct net_buf *buf, struct net_buf **evt)
3636 {
3637 	struct bt_hci_rp_le_read_max_adv_data_len *rp;
3638 	uint16_t max_adv_data_len;
3639 
3640 	if (adv_cmds_ext_check(evt)) {
3641 		return;
3642 	}
3643 
3644 	rp = hci_cmd_complete(evt, sizeof(*rp));
3645 
3646 	max_adv_data_len = ll_adv_aux_max_data_length_get();
3647 
3648 	rp->max_adv_data_len = sys_cpu_to_le16(max_adv_data_len);
3649 	rp->status = 0x00;
3650 }
3651 
le_read_num_adv_sets(struct net_buf * buf,struct net_buf ** evt)3652 static void le_read_num_adv_sets(struct net_buf *buf, struct net_buf **evt)
3653 {
3654 	struct bt_hci_rp_le_read_num_adv_sets *rp;
3655 
3656 	if (adv_cmds_ext_check(evt)) {
3657 		return;
3658 	}
3659 
3660 	rp = hci_cmd_complete(evt, sizeof(*rp));
3661 
3662 	rp->num_sets = ll_adv_aux_set_count_get();
3663 	rp->status = 0x00;
3664 }
3665 
le_remove_adv_set(struct net_buf * buf,struct net_buf ** evt)3666 static void le_remove_adv_set(struct net_buf *buf, struct net_buf **evt)
3667 {
3668 	struct bt_hci_cp_le_remove_adv_set *cmd = (void *)buf->data;
3669 	uint8_t status;
3670 	uint8_t handle;
3671 
3672 	if (adv_cmds_ext_check(evt)) {
3673 		return;
3674 	}
3675 
3676 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3677 	if (status) {
3678 		*evt = cmd_complete_status(status);
3679 		return;
3680 	}
3681 
3682 	status = ll_adv_aux_set_remove(handle);
3683 
3684 	*evt = cmd_complete_status(status);
3685 }
3686 
le_clear_adv_sets(struct net_buf * buf,struct net_buf ** evt)3687 static void le_clear_adv_sets(struct net_buf *buf, struct net_buf **evt)
3688 {
3689 	uint8_t status;
3690 
3691 	if (adv_cmds_ext_check(evt)) {
3692 		return;
3693 	}
3694 
3695 	status = ll_adv_aux_set_clear();
3696 
3697 	*evt = cmd_complete_status(status);
3698 }
3699 
3700 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
le_set_per_adv_param(struct net_buf * buf,struct net_buf ** evt)3701 static void le_set_per_adv_param(struct net_buf *buf, struct net_buf **evt)
3702 {
3703 	struct bt_hci_cp_le_set_per_adv_param *cmd = (void *)buf->data;
3704 	uint16_t max_interval;
3705 	uint16_t flags;
3706 	uint8_t status;
3707 	uint8_t handle;
3708 
3709 	if (adv_cmds_ext_check(evt)) {
3710 		return;
3711 	}
3712 
3713 	max_interval = sys_le16_to_cpu(cmd->max_interval);
3714 
3715 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
3716 		const uint32_t min_interval =
3717 					sys_le16_to_cpu(cmd->min_interval);
3718 
3719 		if ((min_interval > max_interval) ||
3720 		    (min_interval < BT_HCI_LE_PER_ADV_INTERVAL_MIN)) {
3721 			*evt = cmd_complete_status(BT_HCI_ERR_INVALID_PARAM);
3722 			return;
3723 		}
3724 
3725 		/* Compare periodic advertising interval with
3726 		 * implementation supported periodic advertising interval
3727 		 * maximum value defined in the Kconfig
3728 		 * CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX.
3729 		 */
3730 		if (min_interval > CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX) {
3731 			*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3732 			return;
3733 		}
3734 
3735 		if (max_interval > CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX) {
3736 			max_interval = CONFIG_BT_CTLR_ADV_PERIODIC_INTERVAL_MAX;
3737 		}
3738 	}
3739 
3740 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3741 	if (status) {
3742 		*evt = cmd_complete_status(status);
3743 		return;
3744 	}
3745 
3746 	flags = sys_le16_to_cpu(cmd->props);
3747 
3748 	status = ll_adv_sync_param_set(handle, max_interval, flags);
3749 
3750 	*evt = cmd_complete_status(status);
3751 }
3752 
le_set_per_adv_data(struct net_buf * buf,struct net_buf ** evt)3753 static void le_set_per_adv_data(struct net_buf *buf, struct net_buf **evt)
3754 {
3755 	struct bt_hci_cp_le_set_per_adv_data *cmd = (void *)buf->data;
3756 	uint8_t status;
3757 	uint8_t handle;
3758 
3759 	if (adv_cmds_ext_check(evt)) {
3760 		return;
3761 	}
3762 
3763 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3764 	if (status) {
3765 		*evt = cmd_complete_status(status);
3766 		return;
3767 	}
3768 
3769 	status = ll_adv_sync_ad_data_set(handle, cmd->op, cmd->len,
3770 					 cmd->data);
3771 
3772 	*evt = cmd_complete_status(status);
3773 }
3774 
le_set_per_adv_enable(struct net_buf * buf,struct net_buf ** evt)3775 static void le_set_per_adv_enable(struct net_buf *buf, struct net_buf **evt)
3776 {
3777 	struct bt_hci_cp_le_set_per_adv_enable *cmd = (void *)buf->data;
3778 	uint8_t status;
3779 	uint8_t handle;
3780 
3781 	if (adv_cmds_ext_check(evt)) {
3782 		return;
3783 	}
3784 
3785 	status = ll_adv_set_by_hci_handle_get(cmd->handle, &handle);
3786 	if (status) {
3787 		*evt = cmd_complete_status(status);
3788 		return;
3789 	}
3790 
3791 	status = ll_adv_sync_enable(handle, cmd->enable);
3792 
3793 	*evt = cmd_complete_status(status);
3794 }
3795 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
3796 #endif /* CONFIG_BT_BROADCASTER */
3797 
3798 #if defined(CONFIG_BT_OBSERVER)
le_set_ext_scan_param(struct net_buf * buf,struct net_buf ** evt)3799 static void le_set_ext_scan_param(struct net_buf *buf, struct net_buf **evt)
3800 {
3801 	struct bt_hci_cp_le_set_ext_scan_param *cmd = (void *)buf->data;
3802 	struct bt_hci_ext_scan_phy *p;
3803 	uint8_t own_addr_type;
3804 	uint8_t filter_policy;
3805 	uint8_t phys_bitmask;
3806 	uint8_t status;
3807 	uint8_t phys;
3808 
3809 	if (adv_cmds_ext_check(evt)) {
3810 		return;
3811 	}
3812 
3813 	/* Number of bits set indicate scan sets to be configured by calling
3814 	 * ll_scan_params_set function.
3815 	 */
3816 	phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
3817 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
3818 		phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
3819 	}
3820 
3821 	phys = cmd->phys;
3822 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
3823 	    (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
3824 		*evt = cmd_complete_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3825 
3826 		return;
3827 	}
3828 
3829 	own_addr_type = cmd->own_addr_type;
3830 	filter_policy = cmd->filter_policy;
3831 	p = cmd->p;
3832 
3833 	/* Irrespective of enabled PHYs to scan for, ll_scan_params_set needs
3834 	 * to be called to initialise the scan sets.
3835 	 * Passing interval and window as 0, disable the particular scan set
3836 	 * from being enabled.
3837 	 */
3838 	do {
3839 		uint16_t interval;
3840 		uint16_t window;
3841 		uint8_t type;
3842 		uint8_t phy;
3843 
3844 		/* Get single PHY bit from the loop bitmask */
3845 		phy = BIT(find_lsb_set(phys_bitmask) - 1);
3846 
3847 		/* Pass the PHY (1M or Coded) of scan set in MSbits of type
3848 		 * parameter
3849 		 */
3850 		type = (phy << 1);
3851 
3852 		/* If current PHY is one of the PHY in the Scanning_PHYs,
3853 		 * pick the supplied scan type, interval and window.
3854 		 */
3855 		if (phys & phy) {
3856 			type |= (p->type & 0x01);
3857 			interval = sys_le16_to_cpu(p->interval);
3858 			window = sys_le16_to_cpu(p->window);
3859 			p++;
3860 		} else {
3861 			interval = 0U;
3862 			window = 0U;
3863 		}
3864 
3865 		status = ll_scan_params_set(type, interval, window,
3866 					    own_addr_type, filter_policy);
3867 		if (status) {
3868 			break;
3869 		}
3870 
3871 		phys_bitmask &= (phys_bitmask - 1);
3872 	} while (phys_bitmask);
3873 
3874 	*evt = cmd_complete_status(status);
3875 }
3876 
le_set_ext_scan_enable(struct net_buf * buf,struct net_buf ** evt)3877 static void le_set_ext_scan_enable(struct net_buf *buf, struct net_buf **evt)
3878 {
3879 	struct bt_hci_cp_le_set_ext_scan_enable *cmd = (void *)buf->data;
3880 	uint8_t status;
3881 
3882 	if (adv_cmds_ext_check(evt)) {
3883 		return;
3884 	}
3885 
3886 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3887 	/* Initialize duplicate filtering */
3888 	if (cmd->enable && cmd->filter_dup) {
3889 		if (0) {
3890 
3891 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3892 		} else if (dup_count == DUP_FILTER_DISABLED) {
3893 			dup_scan = true;
3894 
3895 			/* All entries reset */
3896 			dup_count = 0;
3897 			dup_curr = 0U;
3898 		} else if (!dup_scan) {
3899 			dup_scan = true;
3900 			dup_ext_adv_reset();
3901 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3902 
3903 		} else {
3904 			/* All entries reset */
3905 			dup_count = 0;
3906 			dup_curr = 0U;
3907 		}
3908 	} else {
3909 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
3910 		dup_scan = false;
3911 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3912 		dup_count = DUP_FILTER_DISABLED;
3913 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
3914 	}
3915 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
3916 
3917 	status = ll_scan_enable(cmd->enable, sys_le16_to_cpu(cmd->duration),
3918 				sys_le16_to_cpu(cmd->period));
3919 
3920 	/* NOTE: As filter duplicates is implemented here in HCI source code,
3921 	 *       enabling of already enabled scanning shall succeed after
3922 	 *       updates to filter duplicates is handled in the above
3923 	 *       statements. Refer to BT Spec v5.0 Vol 2 Part E Section 7.8.11.
3924 	 */
3925 	if (!IS_ENABLED(CONFIG_BT_CTLR_SCAN_ENABLE_STRICT) &&
3926 	    (status == BT_HCI_ERR_CMD_DISALLOWED)) {
3927 		status = BT_HCI_ERR_SUCCESS;
3928 	}
3929 
3930 	*evt = cmd_complete_status(status);
3931 }
3932 
3933 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
le_per_adv_create_sync(struct net_buf * buf,struct net_buf ** evt)3934 static void le_per_adv_create_sync(struct net_buf *buf, struct net_buf **evt)
3935 {
3936 	struct bt_hci_cp_le_per_adv_create_sync *cmd = (void *)buf->data;
3937 	uint16_t sync_timeout;
3938 	uint8_t status;
3939 	uint16_t skip;
3940 
3941 	if (adv_cmds_ext_check(NULL)) {
3942 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
3943 		return;
3944 	}
3945 
3946 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
3947 	    (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST)) {
3948 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3949 		return;
3950 	}
3951 
3952 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
3953 	    (cmd->options &
3954 	     (BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED |
3955 	      BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE)) ==
3956 	    BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3957 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
3958 		return;
3959 	}
3960 
3961 	/* FIXME: Check for HCI LE Set Periodic Advertising Receive Enable
3962 	 * command support and if reporting is initially disabled then
3963 	 * return error code Connection Failed to be Established /
3964 	 * Synchronization Timeout (0x3E).
3965 	 */
3966 
3967 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
3968 	/* Initialize duplicate filtering */
3969 	if (cmd->options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) {
3970 		if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
3971 			dup_count = 0;
3972 			dup_curr = 0U;
3973 		} else {
3974 			/* NOTE: Invalidate dup_ext_adv_mode array entries is
3975 			 *       done when sync is established.
3976 			 */
3977 		}
3978 	} else if (!dup_scan) {
3979 		dup_count = DUP_FILTER_DISABLED;
3980 	}
3981 #endif
3982 
3983 	skip = sys_le16_to_cpu(cmd->skip);
3984 	sync_timeout = sys_le16_to_cpu(cmd->sync_timeout);
3985 
3986 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
3987 	if ((cmd->cte_type & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_INVALID_VALUE) != 0) {
3988 		status = BT_HCI_ERR_CMD_DISALLOWED;
3989 #else
3990 	if (cmd->cte_type != BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_FILTERING) {
3991 		status = BT_HCI_ERR_INVALID_PARAM;
3992 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
3993 	} else {
3994 		status = ll_sync_create(cmd->options, cmd->sid, cmd->addr.type, cmd->addr.a.val,
3995 					skip, sync_timeout, cmd->cte_type);
3996 	}
3997 	*evt = cmd_status(status);
3998 }
3999 
4000 static void le_per_adv_create_sync_cancel(struct net_buf *buf,
4001 					  struct net_buf **evt, void **node_rx)
4002 {
4003 	struct bt_hci_evt_cc_status *ccst;
4004 	uint8_t status;
4005 
4006 	if (adv_cmds_ext_check(evt)) {
4007 		return;
4008 	}
4009 
4010 	status = ll_sync_create_cancel(node_rx);
4011 
4012 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
4013 	ccst->status = status;
4014 }
4015 
4016 static void le_per_adv_terminate_sync(struct net_buf *buf, struct net_buf **evt)
4017 {
4018 	struct bt_hci_cp_le_per_adv_terminate_sync *cmd = (void *)buf->data;
4019 	struct bt_hci_evt_cc_status *ccst;
4020 	uint16_t handle;
4021 	uint8_t status;
4022 
4023 	if (adv_cmds_ext_check(evt)) {
4024 		return;
4025 	}
4026 
4027 	handle = sys_le16_to_cpu(cmd->handle);
4028 
4029 	status = ll_sync_terminate(handle);
4030 
4031 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
4032 	ccst->status = status;
4033 }
4034 
4035 static void le_per_adv_recv_enable(struct net_buf *buf, struct net_buf **evt)
4036 {
4037 	struct bt_hci_cp_le_set_per_adv_recv_enable *cmd = (void *)buf->data;
4038 	struct bt_hci_evt_cc_status *ccst;
4039 	uint16_t handle;
4040 	uint8_t status;
4041 
4042 	if (adv_cmds_ext_check(evt)) {
4043 		return;
4044 	}
4045 
4046 	handle = sys_le16_to_cpu(cmd->handle);
4047 
4048 	status = ll_sync_recv_enable(handle, cmd->enable);
4049 
4050 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
4051 	if (!status) {
4052 		if (cmd->enable &
4053 		    BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) {
4054 			if (!dup_scan || (dup_count == DUP_FILTER_DISABLED)) {
4055 				dup_count = 0;
4056 				dup_curr = 0U;
4057 			} else {
4058 				/* NOTE: Invalidate dup_ext_adv_mode array
4059 				 *       entries is done when sync is
4060 				 *       established.
4061 				 */
4062 			}
4063 		} else if (!dup_scan) {
4064 			dup_count = DUP_FILTER_DISABLED;
4065 		}
4066 	}
4067 #endif
4068 
4069 	ccst = hci_cmd_complete(evt, sizeof(*ccst));
4070 	ccst->status = status;
4071 }
4072 
4073 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4074 static void le_add_dev_to_pal(struct net_buf *buf, struct net_buf **evt)
4075 {
4076 	struct bt_hci_cp_le_add_dev_to_per_adv_list *cmd = (void *)buf->data;
4077 	uint8_t status;
4078 
4079 	if (adv_cmds_ext_check(evt)) {
4080 		return;
4081 	}
4082 
4083 	status = ll_pal_add(&cmd->addr, cmd->sid);
4084 
4085 	*evt = cmd_complete_status(status);
4086 }
4087 
4088 static void le_rem_dev_from_pal(struct net_buf *buf, struct net_buf **evt)
4089 {
4090 	struct bt_hci_cp_le_rem_dev_from_per_adv_list *cmd = (void *)buf->data;
4091 	uint8_t status;
4092 
4093 	if (adv_cmds_ext_check(evt)) {
4094 		return;
4095 	}
4096 
4097 	status = ll_pal_remove(&cmd->addr, cmd->sid);
4098 
4099 	*evt = cmd_complete_status(status);
4100 }
4101 
4102 static void le_clear_pal(struct net_buf *buf, struct net_buf **evt)
4103 {
4104 	uint8_t status;
4105 
4106 	if (adv_cmds_ext_check(evt)) {
4107 		return;
4108 	}
4109 
4110 	status = ll_pal_clear();
4111 
4112 	*evt = cmd_complete_status(status);
4113 }
4114 
4115 static void le_read_pal_size(struct net_buf *buf, struct net_buf **evt)
4116 {
4117 	struct bt_hci_rp_le_read_per_adv_list_size *rp;
4118 
4119 	if (adv_cmds_ext_check(evt)) {
4120 		return;
4121 	}
4122 
4123 	rp = hci_cmd_complete(evt, sizeof(*rp));
4124 	rp->status = 0x00;
4125 
4126 	rp->list_size = ll_pal_size_get();
4127 }
4128 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4129 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4130 #endif /* CONFIG_BT_OBSERVER */
4131 
4132 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
4133 static void le_per_adv_sync_transfer(struct net_buf *buf, struct net_buf **evt)
4134 {
4135 	struct bt_hci_cp_le_per_adv_sync_transfer *cmd = (void *)buf->data;
4136 	struct bt_hci_rp_le_per_adv_sync_transfer *rp;
4137 	uint16_t conn_handle, conn_handle_le16;
4138 	uint16_t service_data;
4139 	uint16_t sync_handle;
4140 	uint8_t status;
4141 
4142 	conn_handle_le16 = cmd->conn_handle;
4143 
4144 	conn_handle = sys_le16_to_cpu(cmd->conn_handle);
4145 	service_data = sys_le16_to_cpu(cmd->service_data);
4146 	sync_handle = sys_le16_to_cpu(cmd->sync_handle);
4147 
4148 	status = ll_sync_transfer(conn_handle, service_data, sync_handle);
4149 
4150 	rp = hci_cmd_complete(evt, sizeof(*rp));
4151 	rp->conn_handle = conn_handle_le16;
4152 	rp->status = status;
4153 }
4154 
4155 static void le_per_adv_set_info_transfer(struct net_buf *buf, struct net_buf **evt)
4156 {
4157 	struct bt_hci_cp_le_per_adv_set_info_transfer *cmd = (void *)buf->data;
4158 	struct bt_hci_rp_le_per_adv_set_info_transfer *rp;
4159 	uint16_t conn_handle, conn_handle_le16;
4160 	uint16_t service_data;
4161 	uint8_t adv_handle;
4162 	uint8_t status;
4163 
4164 	conn_handle_le16 = cmd->conn_handle;
4165 
4166 	conn_handle = sys_le16_to_cpu(cmd->conn_handle);
4167 	service_data = sys_le16_to_cpu(cmd->service_data);
4168 	adv_handle = cmd->adv_handle;
4169 
4170 	status = ll_adv_sync_set_info_transfer(conn_handle, service_data, adv_handle);
4171 
4172 	rp = hci_cmd_complete(evt, sizeof(*rp));
4173 	rp->conn_handle = conn_handle_le16;
4174 	rp->status = status;
4175 }
4176 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
4177 
4178 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
4179 static void le_past_param(struct net_buf *buf, struct net_buf **evt)
4180 {
4181 	struct bt_hci_cp_le_past_param *cmd = (void *)buf->data;
4182 	struct bt_hci_rp_le_past_param *rp;
4183 	uint16_t conn_handle_le16;
4184 	uint16_t conn_handle;
4185 	uint16_t timeout;
4186 	uint8_t cte_type;
4187 	uint8_t status;
4188 	uint16_t skip;
4189 	uint8_t mode;
4190 
4191 	if (adv_cmds_ext_check(evt)) {
4192 		return;
4193 	}
4194 
4195 	conn_handle_le16 = cmd->conn_handle;
4196 
4197 	conn_handle = sys_le16_to_cpu(cmd->conn_handle);
4198 	mode = cmd->mode;
4199 	skip = sys_le16_to_cpu(cmd->skip);
4200 	timeout = sys_le16_to_cpu(cmd->timeout);
4201 	cte_type = cmd->cte_type;
4202 
4203 	status = ll_past_param(conn_handle, mode, skip, timeout, cte_type);
4204 
4205 	rp = hci_cmd_complete(evt, sizeof(*rp));
4206 	rp->conn_handle = conn_handle_le16;
4207 	rp->status = status;
4208 }
4209 
4210 static void le_default_past_param(struct net_buf *buf, struct net_buf **evt)
4211 {
4212 	struct bt_hci_cp_le_default_past_param *cmd = (void *)buf->data;
4213 	struct bt_hci_rp_le_default_past_param *rp;
4214 	uint16_t timeout;
4215 	uint8_t cte_type;
4216 	uint8_t status;
4217 	uint16_t skip;
4218 	uint8_t mode;
4219 
4220 	if (adv_cmds_ext_check(evt)) {
4221 		return;
4222 	}
4223 
4224 	mode = cmd->mode;
4225 	skip = sys_le16_to_cpu(cmd->skip);
4226 	timeout = sys_le16_to_cpu(cmd->timeout);
4227 	cte_type = cmd->cte_type;
4228 
4229 	status = ll_default_past_param(mode, skip, timeout, cte_type);
4230 
4231 	rp = hci_cmd_complete(evt, sizeof(*rp));
4232 	rp->status = status;
4233 }
4234 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
4235 
4236 #if defined(CONFIG_BT_CENTRAL)
4237 static void le_ext_create_connection(struct net_buf *buf, struct net_buf **evt)
4238 {
4239 	struct bt_hci_cp_le_ext_create_conn *cmd = (void *)buf->data;
4240 	struct bt_hci_ext_conn_phy *p;
4241 	uint8_t peer_addr_type;
4242 	uint8_t own_addr_type;
4243 	uint8_t filter_policy;
4244 	uint8_t phys_bitmask;
4245 	uint8_t *peer_addr;
4246 	uint8_t status;
4247 	uint8_t phys;
4248 
4249 	if (adv_cmds_ext_check(NULL)) {
4250 		*evt = cmd_status(BT_HCI_ERR_CMD_DISALLOWED);
4251 		return;
4252 	}
4253 
4254 	/* Number of bits set indicate scan sets to be configured by calling
4255 	 * ll_create_connection function.
4256 	 */
4257 	phys_bitmask = BT_HCI_LE_EXT_SCAN_PHY_1M;
4258 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
4259 		phys_bitmask |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
4260 	}
4261 
4262 	phys = cmd->phys;
4263 
4264 	/* Ignore Scan Interval and Scan Window, and ignore scanning if
4265 	 * Initiating PHY is set for LE 2M PHY
4266 	 * Refer to Bluetooth Core Specification Version 5.4 Vol 4, Part E
4267 	 * 7.8.66 LE Extended Create Connection command
4268 	 */
4269 	phys &= ~BT_HCI_LE_EXT_SCAN_PHY_2M;
4270 
4271 	/* Check if unsupported PHY requested for scanning */
4272 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
4273 	    (((phys & phys_bitmask) == 0) || (phys & ~phys_bitmask))) {
4274 		*evt = cmd_status(BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL);
4275 
4276 		return;
4277 	}
4278 
4279 	filter_policy = cmd->filter_policy;
4280 	own_addr_type = cmd->own_addr_type;
4281 	peer_addr_type = cmd->peer_addr.type;
4282 	peer_addr = cmd->peer_addr.a.val;
4283 	p = cmd->p;
4284 
4285 	do {
4286 		uint16_t supervision_timeout;
4287 		uint16_t conn_interval_max;
4288 		uint16_t scan_interval;
4289 		uint16_t conn_latency;
4290 		uint16_t scan_window;
4291 		uint8_t phy;
4292 
4293 		phy = BIT(find_lsb_set(phys_bitmask) - 1);
4294 
4295 		if (phys & phy) {
4296 			scan_interval = sys_le16_to_cpu(p->scan_interval);
4297 			scan_window = sys_le16_to_cpu(p->scan_window);
4298 			conn_interval_max =
4299 				sys_le16_to_cpu(p->conn_interval_max);
4300 			conn_latency = sys_le16_to_cpu(p->conn_latency);
4301 			supervision_timeout =
4302 				sys_le16_to_cpu(p->supervision_timeout);
4303 
4304 			if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
4305 				status = check_cconn_params(true, scan_interval,
4306 							    scan_window,
4307 							    conn_interval_max,
4308 							    conn_latency,
4309 							    supervision_timeout);
4310 				if (status) {
4311 					*evt = cmd_status(status);
4312 					return;
4313 				}
4314 			}
4315 
4316 			status = ll_create_connection(scan_interval,
4317 						      scan_window,
4318 						      filter_policy,
4319 						      peer_addr_type,
4320 						      peer_addr,
4321 						      own_addr_type,
4322 						      conn_interval_max,
4323 						      conn_latency,
4324 						      supervision_timeout,
4325 						      phy);
4326 			p++;
4327 		} else {
4328 			uint8_t type;
4329 
4330 			type = (phy << 1);
4331 			/* NOTE: Pass invalid interval value to reset the PHY
4332 			 *       value in the scan instance so not to start
4333 			 *       scanning on the unselected PHY.
4334 			 */
4335 			status = ll_scan_params_set(type, 0, 0, 0, 0);
4336 		}
4337 
4338 		if (status) {
4339 			*evt = cmd_status(status);
4340 			return;
4341 		}
4342 
4343 		phys_bitmask &= (phys_bitmask - 1);
4344 	} while (phys_bitmask);
4345 
4346 	status = ll_connect_enable(phys & BT_HCI_LE_EXT_SCAN_PHY_CODED);
4347 
4348 	*evt = cmd_status(status);
4349 }
4350 #endif /* CONFIG_BT_CENTRAL */
4351 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4352 
4353 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4354 static void le_cis_request(struct pdu_data *pdu_data,
4355 			   struct node_rx_pdu *node_rx,
4356 			   struct net_buf *buf)
4357 {
4358 	struct bt_hci_evt_le_cis_req *sep;
4359 	struct node_rx_conn_iso_req *req;
4360 	void *node;
4361 
4362 	/* Check for pdu field being aligned before accessing CIS established
4363 	 * event.
4364 	 */
4365 	node = pdu_data;
4366 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4367 
4368 	req = node;
4369 	if (!(ll_feat_get() & BIT64(BT_LE_FEAT_BIT_ISO_CHANNELS)) ||
4370 	    !(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4371 	    !(le_event_mask & BT_EVT_MASK_LE_CIS_REQ)) {
4372 		ll_cis_reject(req->cis_handle, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE);
4373 		return;
4374 	}
4375 
4376 	sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_REQ, sizeof(*sep));
4377 	sep->acl_handle = sys_cpu_to_le16(node_rx->hdr.handle);
4378 	sep->cis_handle = sys_cpu_to_le16(req->cis_handle);
4379 	sep->cig_id = req->cig_id;
4380 	sep->cis_id = req->cis_id;
4381 }
4382 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4383 
4384 #if defined(CONFIG_BT_CTLR_CONN_ISO)
4385 static void le_cis_established(struct pdu_data *pdu_data,
4386 			       struct node_rx_pdu *node_rx,
4387 			       struct net_buf *buf)
4388 {
4389 	struct lll_conn_iso_stream_rxtx *lll_cis_c;
4390 	struct lll_conn_iso_stream_rxtx *lll_cis_p;
4391 	struct bt_hci_evt_le_cis_established *sep;
4392 	struct lll_conn_iso_stream *lll_cis;
4393 	struct node_rx_conn_iso_estab *est;
4394 	struct ll_conn_iso_stream *cis;
4395 	struct ll_conn_iso_group *cig;
4396 	bool is_central;
4397 	void *node;
4398 
4399 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4400 	    !(le_event_mask & BT_EVT_MASK_LE_CIS_ESTABLISHED)) {
4401 		return;
4402 	}
4403 
4404 	cis = node_rx->rx_ftr.param;
4405 	cig = cis->group;
4406 
4407 	sep = meta_evt(buf, BT_HCI_EVT_LE_CIS_ESTABLISHED, sizeof(*sep));
4408 
4409 	/* Check for pdu field being aligned before accessing CIS established
4410 	 * event.
4411 	 */
4412 	node = pdu_data;
4413 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_conn_iso_estab));
4414 
4415 	est = node;
4416 	sep->status = est->status;
4417 	sep->conn_handle = sys_cpu_to_le16(est->cis_handle);
4418 
4419 	if (!cig) {
4420 		/* CIS was not established and instance was released */
4421 		return;
4422 	}
4423 
4424 	lll_cis = &cis->lll;
4425 	is_central = cig->lll.role == BT_CONN_ROLE_CENTRAL;
4426 	lll_cis_c = is_central ? &lll_cis->tx : &lll_cis->rx;
4427 	lll_cis_p = is_central ? &lll_cis->rx : &lll_cis->tx;
4428 
4429 	sys_put_le24(cig->sync_delay, sep->cig_sync_delay);
4430 	sys_put_le24(cis->sync_delay, sep->cis_sync_delay);
4431 	sys_put_le24(cig->c_latency, sep->c_latency);
4432 	sys_put_le24(cig->p_latency, sep->p_latency);
4433 	sep->c_phy = find_lsb_set(lll_cis_c->phy);
4434 	sep->p_phy = find_lsb_set(lll_cis_p->phy);
4435 	sep->nse = lll_cis->nse;
4436 	sep->c_bn = lll_cis_c->bn;
4437 	sep->p_bn = lll_cis_p->bn;
4438 	sep->c_ft = lll_cis_c->ft;
4439 	sep->p_ft = lll_cis_p->ft;
4440 	sep->c_max_pdu = sys_cpu_to_le16(lll_cis_c->max_pdu);
4441 	sep->p_max_pdu = sys_cpu_to_le16(lll_cis_p->max_pdu);
4442 	sep->interval = sys_cpu_to_le16(cig->iso_interval);
4443 
4444 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4445 	if (is_central) {
4446 		cis_pending_count--;
4447 	}
4448 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4449 }
4450 #endif /* CONFIG_BT_CTLR_CONN_ISO */
4451 
4452 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
4453 static void le_per_adv_sync_transfer_received(struct pdu_data *pdu_data_rx,
4454 					      struct node_rx_pdu *node_rx, struct net_buf *buf)
4455 {
4456 	struct bt_hci_evt_le_past_received *sep;
4457 	struct node_rx_past_received *se;
4458 	struct ll_sync_set *sync;
4459 	void *node;
4460 
4461 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
4462 	    !(le_event_mask & BT_EVT_MASK_LE_PAST_RECEIVED)) {
4463 		return;
4464 	}
4465 
4466 	sep = meta_evt(buf, BT_HCI_EVT_LE_PAST_RECEIVED, sizeof(*sep));
4467 
4468 	/* Check for pdu field being aligned before accessing PAST received
4469 	 * event.
4470 	 */
4471 	node = pdu_data_rx;
4472 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_past_received));
4473 
4474 	se = node;
4475 	sep->status = se->rx_sync.status;
4476 
4477 	sync = node_rx->rx_ftr.param;
4478 
4479 	/* Resolved address, if private, has been populated in ULL */
4480 	sep->addr.type = sync->peer_id_addr_type;
4481 	if (sync->peer_addr_resolved) {
4482 		/* Mark it as identity address from RPA (0x02, 0x03) */
4483 		MARK_AS_IDENTITY_ADDR(sep->addr.type);
4484 	}
4485 	(void)memcpy(sep->addr.a.val, sync->peer_id_addr, BDADDR_SIZE);
4486 
4487 	sep->adv_sid = sync->sid;
4488 	sep->phy = find_lsb_set(se->rx_sync.phy);
4489 	sep->interval = sys_cpu_to_le16(se->rx_sync.interval);
4490 	sep->clock_accuracy = se->rx_sync.sca;
4491 	sep->conn_handle = sys_cpu_to_le16(se->conn_handle);
4492 	sep->service_data = sys_cpu_to_le16(se->service_data);
4493 	sep->sync_handle = sys_cpu_to_le16(node_rx->hdr.handle);
4494 }
4495 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
4496 
4497 static int controller_cmd_handle(uint16_t  ocf, struct net_buf *cmd,
4498 				 struct net_buf **evt, void **node_rx)
4499 {
4500 	switch (ocf) {
4501 	case BT_OCF(BT_HCI_OP_LE_SET_EVENT_MASK):
4502 		le_set_event_mask(cmd, evt);
4503 		break;
4504 
4505 	case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE):
4506 		le_read_buffer_size(cmd, evt);
4507 		break;
4508 
4509 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4510 	case BT_OCF(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2):
4511 		le_read_buffer_size_v2(cmd, evt);
4512 		break;
4513 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4514 
4515 	case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_FEATURES):
4516 		le_read_local_features(cmd, evt);
4517 		break;
4518 
4519 	case BT_OCF(BT_HCI_OP_LE_SET_RANDOM_ADDRESS):
4520 		le_set_random_address(cmd, evt);
4521 		break;
4522 
4523 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
4524 	case BT_OCF(BT_HCI_OP_LE_READ_FAL_SIZE):
4525 		le_read_fal_size(cmd, evt);
4526 		break;
4527 
4528 	case BT_OCF(BT_HCI_OP_LE_CLEAR_FAL):
4529 		le_clear_fal(cmd, evt);
4530 		break;
4531 
4532 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_FAL):
4533 		le_add_dev_to_fal(cmd, evt);
4534 		break;
4535 
4536 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_FAL):
4537 		le_rem_dev_from_fal(cmd, evt);
4538 		break;
4539 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
4540 
4541 #if defined(CONFIG_BT_CTLR_CRYPTO)
4542 	case BT_OCF(BT_HCI_OP_LE_ENCRYPT):
4543 		le_encrypt(cmd, evt);
4544 		break;
4545 #endif /* CONFIG_BT_CTLR_CRYPTO */
4546 
4547 	case BT_OCF(BT_HCI_OP_LE_RAND):
4548 		le_rand(cmd, evt);
4549 		break;
4550 
4551 	case BT_OCF(BT_HCI_OP_LE_READ_SUPP_STATES):
4552 		le_read_supp_states(cmd, evt);
4553 		break;
4554 
4555 #if defined(CONFIG_BT_BROADCASTER)
4556 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_PARAM):
4557 		le_set_adv_param(cmd, evt);
4558 		break;
4559 
4560 	case BT_OCF(BT_HCI_OP_LE_READ_ADV_CHAN_TX_POWER):
4561 		le_read_adv_chan_tx_power(cmd, evt);
4562 		break;
4563 
4564 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_DATA):
4565 		le_set_adv_data(cmd, evt);
4566 		break;
4567 
4568 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_RSP_DATA):
4569 		le_set_scan_rsp_data(cmd, evt);
4570 		break;
4571 
4572 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_ENABLE):
4573 		le_set_adv_enable(cmd, evt);
4574 		break;
4575 
4576 #if defined(CONFIG_BT_CTLR_ADV_ISO)
4577 	case BT_OCF(BT_HCI_OP_LE_CREATE_BIG):
4578 		le_create_big(cmd, evt);
4579 		break;
4580 
4581 	case BT_OCF(BT_HCI_OP_LE_CREATE_BIG_TEST):
4582 		le_create_big_test(cmd, evt);
4583 		break;
4584 
4585 	case BT_OCF(BT_HCI_OP_LE_TERMINATE_BIG):
4586 		le_terminate_big(cmd, evt);
4587 		break;
4588 #endif /* CONFIG_BT_CTLR_ADV_ISO */
4589 #endif /* CONFIG_BT_BROADCASTER */
4590 
4591 #if defined(CONFIG_BT_OBSERVER)
4592 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_PARAM):
4593 		le_set_scan_param(cmd, evt);
4594 		break;
4595 
4596 	case BT_OCF(BT_HCI_OP_LE_SET_SCAN_ENABLE):
4597 		le_set_scan_enable(cmd, evt);
4598 		break;
4599 
4600 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
4601 	case BT_OCF(BT_HCI_OP_LE_BIG_CREATE_SYNC):
4602 		le_big_create_sync(cmd, evt);
4603 		break;
4604 
4605 	case BT_OCF(BT_HCI_OP_LE_BIG_TERMINATE_SYNC):
4606 		le_big_terminate_sync(cmd, evt, node_rx);
4607 		break;
4608 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
4609 #endif /* CONFIG_BT_OBSERVER */
4610 
4611 #if defined(CONFIG_BT_CENTRAL)
4612 	case BT_OCF(BT_HCI_OP_LE_CREATE_CONN):
4613 		le_create_connection(cmd, evt);
4614 		break;
4615 
4616 	case BT_OCF(BT_HCI_OP_LE_CREATE_CONN_CANCEL):
4617 		le_create_conn_cancel(cmd, evt, node_rx);
4618 		break;
4619 
4620 	case BT_OCF(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF):
4621 		le_set_host_chan_classif(cmd, evt);
4622 		break;
4623 
4624 #if defined(CONFIG_BT_CTLR_LE_ENC)
4625 	case BT_OCF(BT_HCI_OP_LE_START_ENCRYPTION):
4626 		le_start_encryption(cmd, evt);
4627 		break;
4628 #endif /* CONFIG_BT_CTLR_LE_ENC */
4629 
4630 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
4631 	case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS):
4632 		le_set_cig_parameters(cmd, evt);
4633 		break;
4634 	case BT_OCF(BT_HCI_OP_LE_SET_CIG_PARAMS_TEST):
4635 		le_set_cig_params_test(cmd, evt);
4636 		break;
4637 	case BT_OCF(BT_HCI_OP_LE_CREATE_CIS):
4638 		le_create_cis(cmd, evt);
4639 		break;
4640 	case BT_OCF(BT_HCI_OP_LE_REMOVE_CIG):
4641 		le_remove_cig(cmd, evt);
4642 		break;
4643 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
4644 #endif /* CONFIG_BT_CENTRAL */
4645 
4646 #if defined(CONFIG_BT_PERIPHERAL)
4647 #if defined(CONFIG_BT_CTLR_LE_ENC)
4648 	case BT_OCF(BT_HCI_OP_LE_LTK_REQ_REPLY):
4649 		le_ltk_req_reply(cmd, evt);
4650 		break;
4651 
4652 	case BT_OCF(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY):
4653 		le_ltk_req_neg_reply(cmd, evt);
4654 		break;
4655 #endif /* CONFIG_BT_CTLR_LE_ENC */
4656 
4657 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
4658 	case BT_OCF(BT_HCI_OP_LE_ACCEPT_CIS):
4659 		le_accept_cis(cmd, evt);
4660 		break;
4661 	case BT_OCF(BT_HCI_OP_LE_REJECT_CIS):
4662 		le_reject_cis(cmd, evt);
4663 		break;
4664 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
4665 #endif /* CONFIG_BT_PERIPHERAL */
4666 
4667 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
4668 	case BT_OCF(BT_HCI_OP_LE_REQ_PEER_SC):
4669 		le_req_peer_sca(cmd, evt);
4670 		break;
4671 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
4672 
4673 #if defined(CONFIG_BT_CTLR_ISO)
4674 	case BT_OCF(BT_HCI_OP_LE_SETUP_ISO_PATH):
4675 		le_setup_iso_path(cmd, evt);
4676 		break;
4677 	case BT_OCF(BT_HCI_OP_LE_REMOVE_ISO_PATH):
4678 		le_remove_iso_path(cmd, evt);
4679 		break;
4680 	case BT_OCF(BT_HCI_OP_LE_ISO_TEST_END):
4681 		le_iso_test_end(cmd, evt);
4682 		break;
4683 #endif /* CONFIG_BT_CTLR_ISO */
4684 
4685 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4686 	case BT_OCF(BT_HCI_OP_LE_ISO_TRANSMIT_TEST):
4687 		le_iso_transmit_test(cmd, evt);
4688 		break;
4689 	case BT_OCF(BT_HCI_OP_LE_READ_ISO_TX_SYNC):
4690 		le_read_iso_tx_sync(cmd, evt);
4691 		break;
4692 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
4693 
4694 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
4695 	case BT_OCF(BT_HCI_OP_LE_ISO_RECEIVE_TEST):
4696 		le_iso_receive_test(cmd, evt);
4697 		break;
4698 	case BT_OCF(BT_HCI_OP_LE_ISO_READ_TEST_COUNTERS):
4699 		le_iso_read_test_counters(cmd, evt);
4700 		break;
4701 #if defined(CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY)
4702 	case BT_OCF(BT_HCI_OP_LE_READ_ISO_LINK_QUALITY):
4703 		le_read_iso_link_quality(cmd, evt);
4704 		break;
4705 #endif /* CONFIG_BT_CTLR_READ_ISO_LINK_QUALITY */
4706 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
4707 
4708 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
4709 	case BT_OCF(BT_HCI_OP_LE_SET_HOST_FEATURE):
4710 		le_set_host_feature(cmd, evt);
4711 		break;
4712 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
4713 
4714 #if defined(CONFIG_BT_CONN)
4715 	case BT_OCF(BT_HCI_OP_LE_READ_CHAN_MAP):
4716 		le_read_chan_map(cmd, evt);
4717 		break;
4718 
4719 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
4720 	case BT_OCF(BT_HCI_OP_LE_READ_REMOTE_FEATURES):
4721 		le_read_remote_features(cmd, evt);
4722 		break;
4723 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
4724 
4725 	case BT_OCF(BT_HCI_OP_LE_CONN_UPDATE):
4726 		le_conn_update(cmd, evt);
4727 		break;
4728 
4729 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
4730 	case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY):
4731 		le_conn_param_req_reply(cmd, evt);
4732 		break;
4733 
4734 	case BT_OCF(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY):
4735 		le_conn_param_req_neg_reply(cmd, evt);
4736 		break;
4737 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
4738 
4739 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4740 	case BT_OCF(BT_HCI_OP_LE_SET_DATA_LEN):
4741 		le_set_data_len(cmd, evt);
4742 		break;
4743 
4744 	case BT_OCF(BT_HCI_OP_LE_READ_DEFAULT_DATA_LEN):
4745 		le_read_default_data_len(cmd, evt);
4746 		break;
4747 
4748 	case BT_OCF(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN):
4749 		le_write_default_data_len(cmd, evt);
4750 		break;
4751 
4752 	case BT_OCF(BT_HCI_OP_LE_READ_MAX_DATA_LEN):
4753 		le_read_max_data_len(cmd, evt);
4754 		break;
4755 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4756 
4757 #if defined(CONFIG_BT_CTLR_PHY)
4758 	case BT_OCF(BT_HCI_OP_LE_READ_PHY):
4759 		le_read_phy(cmd, evt);
4760 		break;
4761 
4762 	case BT_OCF(BT_HCI_OP_LE_SET_DEFAULT_PHY):
4763 		le_set_default_phy(cmd, evt);
4764 		break;
4765 
4766 	case BT_OCF(BT_HCI_OP_LE_SET_PHY):
4767 		le_set_phy(cmd, evt);
4768 		break;
4769 #endif /* CONFIG_BT_CTLR_PHY */
4770 #endif /* CONFIG_BT_CONN */
4771 
4772 #if defined(CONFIG_BT_CTLR_ADV_EXT)
4773 #if defined(CONFIG_BT_BROADCASTER)
4774 	case BT_OCF(BT_HCI_OP_LE_SET_ADV_SET_RANDOM_ADDR):
4775 		le_set_adv_set_random_addr(cmd, evt);
4776 		break;
4777 
4778 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_PARAM):
4779 		le_set_ext_adv_param(cmd, evt);
4780 		break;
4781 
4782 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_DATA):
4783 		le_set_ext_adv_data(cmd, evt);
4784 		break;
4785 
4786 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_RSP_DATA):
4787 		le_set_ext_scan_rsp_data(cmd, evt);
4788 		break;
4789 
4790 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_ADV_ENABLE):
4791 		le_set_ext_adv_enable(cmd, evt);
4792 		break;
4793 
4794 	case BT_OCF(BT_HCI_OP_LE_READ_MAX_ADV_DATA_LEN):
4795 		le_read_max_adv_data_len(cmd, evt);
4796 		break;
4797 
4798 	case BT_OCF(BT_HCI_OP_LE_READ_NUM_ADV_SETS):
4799 		le_read_num_adv_sets(cmd, evt);
4800 		break;
4801 
4802 	case BT_OCF(BT_HCI_OP_LE_REMOVE_ADV_SET):
4803 		le_remove_adv_set(cmd, evt);
4804 		break;
4805 
4806 	case BT_OCF(BT_HCI_OP_CLEAR_ADV_SETS):
4807 		le_clear_adv_sets(cmd, evt);
4808 		break;
4809 
4810 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
4811 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_PARAM):
4812 		le_set_per_adv_param(cmd, evt);
4813 		break;
4814 
4815 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_DATA):
4816 		le_set_per_adv_data(cmd, evt);
4817 		break;
4818 
4819 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_ENABLE):
4820 		le_set_per_adv_enable(cmd, evt);
4821 		break;
4822 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
4823 #endif /* CONFIG_BT_BROADCASTER */
4824 
4825 #if defined(CONFIG_BT_OBSERVER)
4826 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_PARAM):
4827 		le_set_ext_scan_param(cmd, evt);
4828 		break;
4829 
4830 	case BT_OCF(BT_HCI_OP_LE_SET_EXT_SCAN_ENABLE):
4831 		le_set_ext_scan_enable(cmd, evt);
4832 		break;
4833 
4834 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
4835 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC):
4836 		le_per_adv_create_sync(cmd, evt);
4837 		break;
4838 
4839 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC_CANCEL):
4840 		le_per_adv_create_sync_cancel(cmd, evt, node_rx);
4841 		break;
4842 
4843 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_TERMINATE_SYNC):
4844 		le_per_adv_terminate_sync(cmd, evt);
4845 		break;
4846 
4847 	case BT_OCF(BT_HCI_OP_LE_SET_PER_ADV_RECV_ENABLE):
4848 		le_per_adv_recv_enable(cmd, evt);
4849 		break;
4850 
4851 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
4852 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_PER_ADV_LIST):
4853 		le_add_dev_to_pal(cmd, evt);
4854 		break;
4855 
4856 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_PER_ADV_LIST):
4857 		le_rem_dev_from_pal(cmd, evt);
4858 		break;
4859 
4860 	case BT_OCF(BT_HCI_OP_LE_CLEAR_PER_ADV_LIST):
4861 		le_clear_pal(cmd, evt);
4862 		break;
4863 
4864 	case BT_OCF(BT_HCI_OP_LE_READ_PER_ADV_LIST_SIZE):
4865 		le_read_pal_size(cmd, evt);
4866 		break;
4867 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
4868 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
4869 #endif /* CONFIG_BT_OBSERVER */
4870 
4871 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
4872 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_SYNC_TRANSFER):
4873 		le_per_adv_sync_transfer(cmd, evt);
4874 		break;
4875 
4876 	case BT_OCF(BT_HCI_OP_LE_PER_ADV_SET_INFO_TRANSFER):
4877 		le_per_adv_set_info_transfer(cmd, evt);
4878 		break;
4879 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
4880 
4881 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
4882 	case BT_OCF(BT_HCI_OP_LE_PAST_PARAM):
4883 		le_past_param(cmd, evt);
4884 		break;
4885 
4886 	case BT_OCF(BT_HCI_OP_LE_DEFAULT_PAST_PARAM):
4887 		le_default_past_param(cmd, evt);
4888 		break;
4889 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
4890 
4891 #if defined(CONFIG_BT_CONN)
4892 #if defined(CONFIG_BT_CENTRAL)
4893 	case BT_OCF(BT_HCI_OP_LE_EXT_CREATE_CONN):
4894 		le_ext_create_connection(cmd, evt);
4895 		break;
4896 #endif /* CONFIG_BT_CENTRAL */
4897 #endif /* CONFIG_BT_CONN */
4898 #endif /* CONFIG_BT_CTLR_ADV_EXT */
4899 
4900 #if defined(CONFIG_BT_CTLR_PRIVACY)
4901 	case BT_OCF(BT_HCI_OP_LE_ADD_DEV_TO_RL):
4902 		le_add_dev_to_rl(cmd, evt);
4903 		break;
4904 	case BT_OCF(BT_HCI_OP_LE_REM_DEV_FROM_RL):
4905 		le_rem_dev_from_rl(cmd, evt);
4906 		break;
4907 	case BT_OCF(BT_HCI_OP_LE_CLEAR_RL):
4908 		le_clear_rl(cmd, evt);
4909 		break;
4910 	case BT_OCF(BT_HCI_OP_LE_READ_RL_SIZE):
4911 		le_read_rl_size(cmd, evt);
4912 		break;
4913 	case BT_OCF(BT_HCI_OP_LE_READ_PEER_RPA):
4914 		le_read_peer_rpa(cmd, evt);
4915 		break;
4916 	case BT_OCF(BT_HCI_OP_LE_READ_LOCAL_RPA):
4917 		le_read_local_rpa(cmd, evt);
4918 		break;
4919 	case BT_OCF(BT_HCI_OP_LE_SET_ADDR_RES_ENABLE):
4920 		le_set_addr_res_enable(cmd, evt);
4921 		break;
4922 	case BT_OCF(BT_HCI_OP_LE_SET_RPA_TIMEOUT):
4923 		le_set_rpa_timeout(cmd, evt);
4924 		break;
4925 	case BT_OCF(BT_HCI_OP_LE_SET_PRIVACY_MODE):
4926 		le_set_privacy_mode(cmd, evt);
4927 		break;
4928 #endif /* CONFIG_BT_CTLR_PRIVACY */
4929 
4930 	case BT_OCF(BT_HCI_OP_LE_READ_TX_POWER):
4931 		le_read_tx_power(cmd, evt);
4932 		break;
4933 
4934 #if defined(CONFIG_BT_CTLR_DF)
4935 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
4936 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_PARAMS):
4937 		le_df_set_cl_cte_tx_params(cmd, evt);
4938 		break;
4939 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_TX_ENABLE):
4940 		le_df_set_cl_cte_enable(cmd, evt);
4941 		break;
4942 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
4943 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
4944 	case BT_OCF(BT_HCI_OP_LE_SET_CL_CTE_SAMPLING_ENABLE):
4945 		le_df_set_cl_iq_sampling_enable(cmd, evt);
4946 		break;
4947 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
4948 	case BT_OCF(BT_HCI_OP_LE_READ_ANT_INFO):
4949 		le_df_read_ant_inf(cmd, evt);
4950 		break;
4951 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
4952 	case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_TX_PARAMS):
4953 		le_df_set_conn_cte_tx_params(cmd, evt);
4954 		break;
4955 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
4956 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
4957 	case BT_OCF(BT_HCI_OP_LE_SET_CONN_CTE_RX_PARAMS):
4958 		le_df_set_conn_cte_rx_params(cmd, evt);
4959 		break;
4960 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
4961 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
4962 	case BT_OCF(BT_HCI_OP_LE_CONN_CTE_REQ_ENABLE):
4963 		le_df_set_conn_cte_req_enable(cmd, evt);
4964 		break;
4965 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
4966 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
4967 	case BT_OCF(BT_HCI_OP_LE_CONN_CTE_RSP_ENABLE):
4968 		le_df_set_conn_cte_rsp_enable(cmd, evt);
4969 		break;
4970 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
4971 #endif /* CONFIG_BT_CTLR_DF */
4972 
4973 #if defined(CONFIG_BT_CTLR_DTM_HCI)
4974 	case BT_OCF(BT_HCI_OP_LE_RX_TEST):
4975 		le_rx_test(cmd, evt);
4976 		break;
4977 	case BT_OCF(BT_HCI_OP_LE_TX_TEST):
4978 		le_tx_test(cmd, evt);
4979 		break;
4980 	case BT_OCF(BT_HCI_OP_LE_TEST_END):
4981 		le_test_end(cmd, evt);
4982 		break;
4983 	case BT_OCF(BT_HCI_OP_LE_ENH_RX_TEST):
4984 		le_enh_rx_test(cmd, evt);
4985 		break;
4986 #if defined(CONFIG_BT_CTLR_DTM_HCI_RX_V3)
4987 	case BT_OCF(BT_HCI_OP_LE_RX_TEST_V3):
4988 		le_rx_test_v3(cmd, evt);
4989 		break;
4990 #endif /* CONFIG_BT_CTLR_DTM_HCI_RX_V3 */
4991 	case BT_OCF(BT_HCI_OP_LE_ENH_TX_TEST):
4992 		le_enh_tx_test(cmd, evt);
4993 		break;
4994 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V3)
4995 	case BT_OCF(BT_HCI_OP_LE_TX_TEST_V3):
4996 		le_tx_test_v3(cmd, evt);
4997 		break;
4998 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V3 */
4999 #if defined(CONFIG_BT_CTLR_DTM_HCI_TX_V4)
5000 	case BT_OCF(BT_HCI_OP_LE_TX_TEST_V4):
5001 		le_tx_test_v4(cmd, evt);
5002 		break;
5003 #endif /* CONFIG_BT_CTLR_DTM_HCI_TX_V4 */
5004 #endif /* CONFIG_BT_CTLR_DTM_HCI */
5005 
5006 	default:
5007 		return -EINVAL;
5008 	}
5009 
5010 	return 0;
5011 }
5012 
5013 #if defined(CONFIG_BT_HCI_VS)
5014 static void vs_read_version_info(struct net_buf *buf, struct net_buf **evt)
5015 {
5016 	struct bt_hci_rp_vs_read_version_info *rp;
5017 
5018 	rp = hci_cmd_complete(evt, sizeof(*rp));
5019 
5020 	rp->status = 0x00;
5021 	rp->hw_platform = sys_cpu_to_le16(BT_HCI_VS_HW_PLAT);
5022 	rp->hw_variant = sys_cpu_to_le16(BT_HCI_VS_HW_VAR);
5023 
5024 	rp->fw_variant = 0U;
5025 	rp->fw_version = (KERNEL_VERSION_MAJOR & 0xff);
5026 	rp->fw_revision = sys_cpu_to_le16(KERNEL_VERSION_MINOR);
5027 	rp->fw_build = sys_cpu_to_le32(KERNEL_PATCHLEVEL & 0xffff);
5028 }
5029 
5030 static void vs_read_supported_commands(struct net_buf *buf,
5031 				       struct net_buf **evt)
5032 {
5033 	struct bt_hci_rp_vs_read_supported_commands *rp;
5034 
5035 	rp = hci_cmd_complete(evt, sizeof(*rp));
5036 
5037 	rp->status = 0x00;
5038 	(void)memset(&rp->commands[0], 0, sizeof(rp->commands));
5039 
5040 	/* Set Version Information, Supported Commands, Supported Features. */
5041 	rp->commands[0] |= BIT(0) | BIT(1) | BIT(2);
5042 	/* Write BD_ADDR, Read Build Info */
5043 	rp->commands[0] |= BIT(5) | BIT(7);
5044 	/* Read Static Addresses, Read Key Hierarchy Roots */
5045 	rp->commands[1] |= BIT(0) | BIT(1);
5046 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
5047 	/* Set Scan Request Reports */
5048 	rp->commands[1] |= BIT(4);
5049 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
5050 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5051 	/* Write Tx Power, Read Tx Power */
5052 	rp->commands[1] |= BIT(5) | BIT(6);
5053 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5054 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
5055 	/* Read Supported USB Transport Modes */
5056 	rp->commands[1] |= BIT(7);
5057 	/* Set USB Transport Mode */
5058 	rp->commands[2] |= BIT(0);
5059 #endif /* USB_DEVICE_BLUETOOTH_VS_H4 */
5060 }
5061 
5062 static void vs_read_supported_features(struct net_buf *buf,
5063 				       struct net_buf **evt)
5064 {
5065 	struct bt_hci_rp_vs_read_supported_features *rp;
5066 
5067 	rp = hci_cmd_complete(evt, sizeof(*rp));
5068 
5069 	rp->status = 0x00;
5070 	(void)memset(&rp->features[0], 0x00, sizeof(rp->features));
5071 }
5072 
5073 uint8_t __weak hci_vendor_read_static_addr(struct bt_hci_vs_static_addr addrs[],
5074 					uint8_t size)
5075 {
5076 	ARG_UNUSED(addrs);
5077 	ARG_UNUSED(size);
5078 
5079 	return 0;
5080 }
5081 
5082 static void vs_write_bd_addr(struct net_buf *buf, struct net_buf **evt)
5083 {
5084 	struct bt_hci_cp_vs_write_bd_addr *cmd = (void *)buf->data;
5085 
5086 	ll_addr_set(0, &cmd->bdaddr.val[0]);
5087 
5088 	*evt = cmd_complete_status(0x00);
5089 }
5090 
5091 static void vs_read_build_info(struct net_buf *buf, struct net_buf **evt)
5092 {
5093 	struct bt_hci_rp_vs_read_build_info *rp;
5094 
5095 #define HCI_VS_BUILD_INFO "Zephyr OS v" \
5096 	KERNEL_VERSION_STRING CONFIG_BT_CTLR_HCI_VS_BUILD_INFO
5097 
5098 	const char build_info[] = HCI_VS_BUILD_INFO;
5099 
5100 #define BUILD_INFO_EVT_LEN (sizeof(struct bt_hci_evt_hdr) + \
5101 			    sizeof(struct bt_hci_evt_cmd_complete) + \
5102 			    sizeof(struct bt_hci_rp_vs_read_build_info) + \
5103 			    sizeof(build_info))
5104 
5105 	BUILD_ASSERT(CONFIG_BT_BUF_EVT_RX_SIZE >= BUILD_INFO_EVT_LEN);
5106 
5107 	rp = hci_cmd_complete(evt, sizeof(*rp) + sizeof(build_info));
5108 	rp->status = 0x00;
5109 	memcpy(rp->info, build_info, sizeof(build_info));
5110 }
5111 
5112 void __weak hci_vendor_read_key_hierarchy_roots(uint8_t ir[16], uint8_t er[16])
5113 {
5114 	/* Mark IR as invalid */
5115 	(void)memset(ir, 0x00, 16);
5116 
5117 	/* Mark ER as invalid */
5118 	(void)memset(er, 0x00, 16);
5119 }
5120 
5121 static void vs_read_static_addrs(struct net_buf *buf, struct net_buf **evt)
5122 {
5123 	struct bt_hci_rp_vs_read_static_addrs *rp;
5124 
5125 	rp = hci_cmd_complete(evt, sizeof(*rp) +
5126 				   sizeof(struct bt_hci_vs_static_addr));
5127 	rp->status = 0x00;
5128 	rp->num_addrs = hci_vendor_read_static_addr(rp->a, 1);
5129 }
5130 
5131 static void vs_read_key_hierarchy_roots(struct net_buf *buf,
5132 					struct net_buf **evt)
5133 {
5134 	struct bt_hci_rp_vs_read_key_hierarchy_roots *rp;
5135 
5136 	rp = hci_cmd_complete(evt, sizeof(*rp));
5137 	rp->status = 0x00;
5138 	hci_vendor_read_key_hierarchy_roots(rp->ir, rp->er);
5139 }
5140 
5141 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
5142 static void vs_set_min_used_chans(struct net_buf *buf, struct net_buf **evt)
5143 {
5144 	struct bt_hci_cp_vs_set_min_num_used_chans *cmd = (void *)buf->data;
5145 	uint16_t handle = sys_le16_to_cpu(cmd->handle);
5146 	uint8_t status;
5147 
5148 	status = ll_set_min_used_chans(handle, cmd->phys, cmd->min_used_chans);
5149 
5150 	*evt = cmd_complete_status(status);
5151 }
5152 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
5153 
5154 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
5155 static void vs_set_scan_req_reports(struct net_buf *buf, struct net_buf **evt)
5156 {
5157 	struct bt_hci_cp_vs_set_scan_req_reports *cmd = (void *)buf->data;
5158 
5159 	if (cmd->enable) {
5160 		vs_events_mask |= BT_EVT_MASK_VS_SCAN_REQ_RX;
5161 	} else {
5162 		vs_events_mask &= ~BT_EVT_MASK_VS_SCAN_REQ_RX;
5163 	}
5164 	*evt = cmd_complete_status(0x00);
5165 }
5166 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
5167 
5168 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5169 static void vs_write_tx_power_level(struct net_buf *buf, struct net_buf **evt)
5170 {
5171 	struct bt_hci_cp_vs_write_tx_power_level *cmd = (void *)buf->data;
5172 	struct bt_hci_rp_vs_write_tx_power_level *rp;
5173 	uint8_t handle_type;
5174 	uint16_t handle;
5175 	uint8_t status;
5176 
5177 	handle_type = cmd->handle_type;
5178 	handle = sys_le16_to_cpu(cmd->handle);
5179 
5180 	rp = hci_cmd_complete(evt, sizeof(*rp));
5181 	rp->selected_tx_power = cmd->tx_power_level;
5182 
5183 	status = ll_tx_pwr_lvl_set(handle_type, handle, &rp->selected_tx_power);
5184 
5185 	rp->status = status;
5186 	rp->handle_type = handle_type;
5187 	rp->handle = sys_cpu_to_le16(handle);
5188 }
5189 
5190 static void vs_read_tx_power_level(struct net_buf *buf, struct net_buf **evt)
5191 {
5192 	struct bt_hci_cp_vs_read_tx_power_level *cmd = (void *)buf->data;
5193 	struct bt_hci_rp_vs_read_tx_power_level *rp;
5194 	uint8_t handle_type;
5195 	uint16_t handle;
5196 	uint8_t status;
5197 
5198 	handle_type = cmd->handle_type;
5199 	handle = sys_le16_to_cpu(cmd->handle);
5200 
5201 	rp = hci_cmd_complete(evt, sizeof(*rp));
5202 
5203 	status = ll_tx_pwr_lvl_get(handle_type, handle, 0, &rp->tx_power_level);
5204 
5205 	rp->status = status;
5206 	rp->handle_type = handle_type;
5207 	rp->handle = sys_cpu_to_le16(handle);
5208 }
5209 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5210 
5211 #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
5212 /* A memory pool for vandor specific events for fatal error reporting purposes. */
5213 NET_BUF_POOL_FIXED_DEFINE(vs_err_tx_pool, 1, BT_BUF_EVT_RX_SIZE,
5214 			  sizeof(struct bt_buf_data), NULL);
5215 
5216 /* The alias for convenience of Controller HCI implementation. Controller is build for
5217  * a particular architecture hence the alias will allow to avoid conditional compilation.
5218  * Host may be not aware of hardware architecture the Controller is working on, hence
5219  * all CPU data types for supported architectures should be available during build, hence
5220  * the alias is defined here.
5221  */
5222 #if defined(CONFIG_CPU_CORTEX_M)
5223 typedef struct bt_hci_vs_fata_error_cpu_data_cortex_m bt_hci_vs_fatal_error_cpu_data;
5224 
5225 static void vs_err_fatal_cpu_data_fill(bt_hci_vs_fatal_error_cpu_data *cpu_data,
5226 				       const struct arch_esf *esf)
5227 {
5228 	cpu_data->a1 = sys_cpu_to_le32(esf->basic.a1);
5229 	cpu_data->a2 = sys_cpu_to_le32(esf->basic.a2);
5230 	cpu_data->a3 = sys_cpu_to_le32(esf->basic.a3);
5231 	cpu_data->a4 = sys_cpu_to_le32(esf->basic.a4);
5232 	cpu_data->ip = sys_cpu_to_le32(esf->basic.ip);
5233 	cpu_data->lr = sys_cpu_to_le32(esf->basic.lr);
5234 	cpu_data->xpsr = sys_cpu_to_le32(esf->basic.xpsr);
5235 }
5236 #endif /* CONFIG_CPU_CORTEX_M */
5237 
5238 static struct net_buf *vs_err_evt_create(uint8_t subevt, uint8_t len)
5239 {
5240 	struct net_buf *buf;
5241 
5242 	buf = net_buf_alloc(&vs_err_tx_pool, K_FOREVER);
5243 	if (buf) {
5244 		struct bt_hci_evt_le_meta_event *me;
5245 		struct bt_hci_evt_hdr *hdr;
5246 
5247 		net_buf_reserve(buf, BT_BUF_RESERVE);
5248 		bt_buf_set_type(buf, BT_BUF_EVT);
5249 
5250 		hdr = net_buf_add(buf, sizeof(*hdr));
5251 		hdr->evt = BT_HCI_EVT_VENDOR;
5252 		hdr->len = len + sizeof(*me);
5253 
5254 		me = net_buf_add(buf, sizeof(*me));
5255 		me->subevent = subevt;
5256 	}
5257 
5258 	return buf;
5259 }
5260 
5261 struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const struct arch_esf *esf)
5262 {
5263 	/* Prepare vendor specific HCI Fatal Error event */
5264 	struct bt_hci_vs_fatal_error_stack_frame *sf;
5265 	bt_hci_vs_fatal_error_cpu_data *cpu_data;
5266 	struct net_buf *buf;
5267 
5268 	buf = vs_err_evt_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_STACK_FRAME,
5269 				sizeof(*sf) + sizeof(*cpu_data));
5270 	if (buf != NULL) {
5271 		sf = net_buf_add(buf, (sizeof(*sf) + sizeof(*cpu_data)));
5272 		sf->reason = sys_cpu_to_le32(reason);
5273 		sf->cpu_type = BT_HCI_EVT_VS_ERROR_CPU_TYPE_CORTEX_M;
5274 
5275 		vs_err_fatal_cpu_data_fill(
5276 			(bt_hci_vs_fatal_error_cpu_data *)sf->cpu_data, esf);
5277 	} else {
5278 		LOG_ERR("Can't create HCI Fatal Error event");
5279 	}
5280 
5281 	return buf;
5282 }
5283 
5284 static struct net_buf *hci_vs_err_trace_create(uint8_t data_type,
5285 					       const char *file_path,
5286 					       uint32_t line, uint64_t pc)
5287 {
5288 	uint32_t file_name_len = 0U, pos = 0U;
5289 	struct net_buf *buf = NULL;
5290 
5291 	if (file_path) {
5292 		/* Extract file name from a path */
5293 		while (file_path[file_name_len] != '\0') {
5294 			if (file_path[file_name_len] == '/') {
5295 				pos = file_name_len + 1;
5296 			}
5297 			file_name_len++;
5298 		}
5299 		file_path += pos;
5300 		file_name_len -= pos;
5301 
5302 		/* If file name was found in file_path, in other words: file_path is not empty
5303 		 * string and is not `foo/bar/`.
5304 		 */
5305 		if (file_name_len) {
5306 			/* Total data length: len = file name strlen + \0 + sizeof(line number)
5307 			 * Maximum length of an HCI event data is BT_BUF_EVT_RX_SIZE. If total data
5308 			 * length exceeds this maximum, truncate file name.
5309 			 */
5310 			uint32_t data_len = 1 + sizeof(line);
5311 
5312 			/* If a buffer is created for a TRACE data, include sizeof(pc) in total
5313 			 * length.
5314 			 */
5315 			if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5316 				data_len += sizeof(pc);
5317 			}
5318 
5319 			if (data_len + file_name_len > BT_BUF_EVT_RX_SIZE) {
5320 				uint32_t overflow_len =
5321 					file_name_len + data_len - BT_BUF_EVT_RX_SIZE;
5322 
5323 				/* Truncate the file name length by number of overflow bytes */
5324 				file_name_len -= overflow_len;
5325 			}
5326 
5327 			/* Get total event data length including file name length */
5328 			data_len += file_name_len;
5329 
5330 			/* Prepare vendor specific HCI Fatal Error event */
5331 			buf = vs_err_evt_create(data_type, data_len);
5332 			if (buf != NULL) {
5333 				if (data_type == BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE) {
5334 					net_buf_add_le64(buf, pc);
5335 				}
5336 				net_buf_add_mem(buf, file_path, file_name_len);
5337 				net_buf_add_u8(buf, STR_NULL_TERMINATOR);
5338 				net_buf_add_le32(buf, line);
5339 			} else {
5340 				LOG_ERR("Can't create HCI Fatal Error event");
5341 			}
5342 		}
5343 	}
5344 
5345 	return buf;
5346 }
5347 
5348 struct net_buf *hci_vs_err_trace(const char *file, uint32_t line, uint64_t pc)
5349 {
5350 	return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_TRACE, file, line, pc);
5351 }
5352 
5353 struct net_buf *hci_vs_err_assert(const char *file, uint32_t line)
5354 {
5355 	/* ASSERT data does not contain PC counter, because of that zero constant is used */
5356 	return hci_vs_err_trace_create(BT_HCI_EVT_VS_ERROR_DATA_TYPE_CTRL_ASSERT, file, line, 0U);
5357 }
5358 #endif /* CONFIG_BT_HCI_VS_FATAL_ERROR */
5359 
5360 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
5361 static void vs_le_df_connectionless_iq_report(struct pdu_data *pdu_rx, struct node_rx_pdu *node_rx,
5362 					      struct net_buf *buf)
5363 {
5364 	struct bt_hci_evt_vs_le_connectionless_iq_report *sep;
5365 	struct node_rx_iq_report *iq_report;
5366 	struct lll_sync *lll;
5367 	uint8_t samples_cnt;
5368 	int16_t rssi;
5369 	uint16_t sync_handle;
5370 	uint16_t per_evt_counter;
5371 	struct ll_sync_set *sync = NULL;
5372 
5373 	iq_report = (struct node_rx_iq_report *)node_rx;
5374 
5375 	if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTIONLESS_IQ_REPORT)) {
5376 		return;
5377 	}
5378 
5379 	lll = iq_report->rx.rx_ftr.param;
5380 
5381 	sync = HDR_LLL2ULL(lll);
5382 
5383 	/* TX LL thread has higher priority than RX thread. It may happen that
5384 	 * host successfully disables CTE sampling in the meantime.
5385 	 * It should be verified here, to avoid reporting IQ samples after
5386 	 * the functionality was disabled or if sync was lost.
5387 	 */
5388 	if (ull_df_sync_cfg_is_not_enabled(&lll->df_cfg) || !sync->timeout_reload) {
5389 		/* Drop further processing of the event. */
5390 		return;
5391 	}
5392 
5393 	/* Get the sync handle corresponding to the LLL context passed in the
5394 	 * node rx footer field.
5395 	 */
5396 	sync_handle = ull_sync_handle_get(sync);
5397 	per_evt_counter = iq_report->event_counter;
5398 
5399 	/* If packet status does not indicate insufficient resources for IQ samples and for
5400 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5401 	 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE
5402 	 * value.
5403 	 */
5404 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5405 		samples_cnt = 0U;
5406 	} else {
5407 		samples_cnt = MAX(1, iq_report->sample_count);
5408 	}
5409 
5410 	sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
5411 		       (sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5412 
5413 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
5414 
5415 	sep->sync_handle = sys_cpu_to_le16(sync_handle);
5416 	sep->rssi = sys_cpu_to_le16(rssi);
5417 	sep->rssi_ant_id = iq_report->rssi_ant_id;
5418 	sep->cte_type = iq_report->cte_info.type;
5419 
5420 	sep->chan_idx = iq_report->chan_idx;
5421 	sep->per_evt_counter = sys_cpu_to_le16(per_evt_counter);
5422 
5423 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5424 		sep->slot_durations = iq_report->local_slot_durations;
5425 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5426 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5427 	} else {
5428 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5429 	}
5430 
5431 	sep->packet_status = iq_report->packet_status;
5432 
5433 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5434 		if (iq_report->sample_count == 0U) {
5435 			sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5436 			sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5437 		} else {
5438 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5439 				sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5440 				sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5441 			}
5442 		}
5443 	}
5444 
5445 	sep->sample_count = samples_cnt;
5446 }
5447 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
5448 
5449 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
5450 static void vs_le_df_connection_iq_report(struct node_rx_pdu *node_rx, struct net_buf *buf)
5451 {
5452 	struct bt_hci_evt_vs_le_connection_iq_report *sep;
5453 	struct node_rx_iq_report *iq_report;
5454 	struct lll_conn *lll;
5455 	uint8_t samples_cnt;
5456 	uint8_t phy_rx;
5457 	int16_t rssi;
5458 
5459 	iq_report = (struct node_rx_iq_report *)node_rx;
5460 
5461 	if (!(vs_events_mask & BT_EVT_MASK_VS_LE_CONNECTION_IQ_REPORT)) {
5462 		return;
5463 	}
5464 
5465 	lll = iq_report->rx.rx_ftr.param;
5466 
5467 #if defined(CONFIG_BT_CTLR_PHY)
5468 	phy_rx = lll->phy_rx;
5469 
5470 	/* Make sure the report is generated for connection on PHY UNCODED */
5471 	LL_ASSERT(phy_rx != PHY_CODED);
5472 #else
5473 	phy_rx = PHY_1M;
5474 #endif /* CONFIG_BT_CTLR_PHY */
5475 
5476 	/* TX LL thread has higher priority than RX thread. It may happen that host succefully
5477 	 * disables CTE sampling in the meantime. It should be verified here, to avoid reporting
5478 	 * IQ samples after the functionality was disabled.
5479 	 */
5480 	if (ull_df_conn_cfg_is_not_enabled(&lll->df_rx_cfg)) {
5481 		/* Dropp further processing of the event. */
5482 		return;
5483 	}
5484 
5485 	/* If packet status does not indicate insufficient resources for IQ samples and for
5486 	 * some reason sample_count is zero, inform Host about lack of valid IQ samples by
5487 	 * storing single I_sample and Q_sample with BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE value.
5488 	 */
5489 	if (iq_report->packet_status == BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5490 		samples_cnt = 0U;
5491 	} else {
5492 		samples_cnt = MAX(1, iq_report->sample_count);
5493 	}
5494 
5495 	sep = vs_event(buf, BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT,
5496 			(sizeof(*sep) + (samples_cnt * sizeof(struct bt_hci_le_iq_sample16))));
5497 
5498 	rssi = RSSI_DBM_TO_DECI_DBM(iq_report->rx.rx_ftr.rssi);
5499 
5500 	sep->conn_handle = sys_cpu_to_le16(iq_report->rx.hdr.handle);
5501 	sep->rx_phy = phy_rx;
5502 	sep->rssi = sys_cpu_to_le16(rssi);
5503 	sep->rssi_ant_id = iq_report->rssi_ant_id;
5504 	sep->cte_type = iq_report->cte_info.type;
5505 
5506 	sep->data_chan_idx = iq_report->chan_idx;
5507 	sep->conn_evt_counter = sys_cpu_to_le16(iq_report->event_counter);
5508 
5509 	if (sep->cte_type == BT_HCI_LE_AOA_CTE) {
5510 		sep->slot_durations = iq_report->local_slot_durations;
5511 	} else if (sep->cte_type == BT_HCI_LE_AOD_CTE_1US) {
5512 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_1US;
5513 	} else {
5514 		sep->slot_durations = BT_HCI_LE_ANTENNA_SWITCHING_SLOT_2US;
5515 	}
5516 
5517 	sep->packet_status = iq_report->packet_status;
5518 
5519 	if (iq_report->packet_status != BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES) {
5520 		if (iq_report->sample_count == 0U) {
5521 			sep->sample[0].i = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5522 			sep->sample[0].q = sys_cpu_to_le16(BT_HCI_VS_LE_CTE_REPORT_NO_VALID_SAMPLE);
5523 		} else {
5524 			for (uint8_t idx = 0U; idx < samples_cnt; ++idx) {
5525 				sep->sample[idx].i = sys_cpu_to_le16(iq_report->sample[idx].i);
5526 				sep->sample[idx].q = sys_cpu_to_le16(iq_report->sample[idx].q);
5527 			}
5528 		}
5529 	}
5530 
5531 	sep->sample_count = samples_cnt;
5532 }
5533 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
5534 
5535 #if defined(CONFIG_BT_HCI_MESH_EXT)
5536 static void mesh_get_opts(struct net_buf *buf, struct net_buf **evt)
5537 {
5538 	struct bt_hci_rp_mesh_get_opts *rp;
5539 
5540 	rp = hci_cmd_complete(evt, sizeof(*rp));
5541 
5542 	rp->status = 0x00;
5543 	rp->opcode = BT_HCI_OC_MESH_GET_OPTS;
5544 
5545 	rp->revision = BT_HCI_MESH_REVISION;
5546 	rp->ch_map = 0x7;
5547 	/*@todo: nRF51 only */
5548 	rp->min_tx_power = -30;
5549 	/*@todo: nRF51 only */
5550 	rp->max_tx_power = 4;
5551 	rp->max_scan_filter = CONFIG_BT_CTLR_MESH_SCAN_FILTERS;
5552 	rp->max_filter_pattern = CONFIG_BT_CTLR_MESH_SF_PATTERNS;
5553 	rp->max_adv_slot = 1U;
5554 	rp->evt_prefix_len = 0x01;
5555 	rp->evt_prefix = BT_HCI_MESH_EVT_PREFIX;
5556 }
5557 
5558 static void mesh_set_scan_filter(struct net_buf *buf, struct net_buf **evt)
5559 {
5560 	struct bt_hci_cp_mesh_set_scan_filter *cmd = (void *)buf->data;
5561 	struct bt_hci_rp_mesh_set_scan_filter *rp;
5562 	uint8_t filter = cmd->scan_filter - 1;
5563 	struct scan_filter *f;
5564 	uint8_t status = 0x00;
5565 	uint8_t i;
5566 
5567 	if (filter > ARRAY_SIZE(scan_filters) ||
5568 	    cmd->num_patterns > CONFIG_BT_CTLR_MESH_SF_PATTERNS) {
5569 		status = BT_HCI_ERR_INVALID_PARAM;
5570 		goto exit;
5571 	}
5572 
5573 	if (filter == sf_curr) {
5574 		status = BT_HCI_ERR_CMD_DISALLOWED;
5575 		goto exit;
5576 	}
5577 
5578 	/* duplicate filtering not supported yet */
5579 	if (cmd->filter_dup) {
5580 		status = BT_HCI_ERR_INVALID_PARAM;
5581 		goto exit;
5582 	}
5583 
5584 	f = &scan_filters[filter];
5585 	for (i = 0U; i < cmd->num_patterns; i++) {
5586 		if (!cmd->patterns[i].pattern_len ||
5587 		    cmd->patterns[i].pattern_len >
5588 		    BT_HCI_MESH_PATTERN_LEN_MAX) {
5589 			status = BT_HCI_ERR_INVALID_PARAM;
5590 			goto exit;
5591 		}
5592 		f->lengths[i] = cmd->patterns[i].pattern_len;
5593 		memcpy(f->patterns[i], cmd->patterns[i].pattern, f->lengths[i]);
5594 	}
5595 
5596 	f->count = cmd->num_patterns;
5597 
5598 exit:
5599 	rp = hci_cmd_complete(evt, sizeof(*rp));
5600 	rp->status = status;
5601 	rp->opcode = BT_HCI_OC_MESH_SET_SCAN_FILTER;
5602 	rp->scan_filter = filter + 1;
5603 }
5604 
5605 static void mesh_advertise(struct net_buf *buf, struct net_buf **evt)
5606 {
5607 	struct bt_hci_cp_mesh_advertise *cmd = (void *)buf->data;
5608 	struct bt_hci_rp_mesh_advertise *rp;
5609 	uint8_t adv_slot = cmd->adv_slot;
5610 	uint8_t status;
5611 
5612 	status = ll_mesh_advertise(adv_slot,
5613 				   cmd->own_addr_type, cmd->random_addr.val,
5614 				   cmd->ch_map, cmd->tx_power,
5615 				   cmd->min_tx_delay, cmd->max_tx_delay,
5616 				   cmd->retx_count, cmd->retx_interval,
5617 				   cmd->scan_duration, cmd->scan_delay,
5618 				   cmd->scan_filter, cmd->data_len, cmd->data);
5619 	if (!status) {
5620 		/* Yields 0xFF if no scan filter selected */
5621 		sf_curr = cmd->scan_filter - 1;
5622 	}
5623 
5624 	rp = hci_cmd_complete(evt, sizeof(*rp));
5625 	rp->status = status;
5626 	rp->opcode = BT_HCI_OC_MESH_ADVERTISE;
5627 	rp->adv_slot = adv_slot;
5628 }
5629 
5630 static void mesh_advertise_cancel(struct net_buf *buf, struct net_buf **evt)
5631 {
5632 	struct bt_hci_cp_mesh_advertise_cancel *cmd = (void *)buf->data;
5633 	struct bt_hci_rp_mesh_advertise_cancel *rp;
5634 	uint8_t adv_slot = cmd->adv_slot;
5635 	uint8_t status;
5636 
5637 	status = ll_mesh_advertise_cancel(adv_slot);
5638 	if (!status) {
5639 		/* Yields 0xFF if no scan filter selected */
5640 		sf_curr = 0xFF;
5641 	}
5642 
5643 	rp = hci_cmd_complete(evt, sizeof(*rp));
5644 	rp->status = status;
5645 	rp->opcode = BT_HCI_OC_MESH_ADVERTISE_CANCEL;
5646 	rp->adv_slot = adv_slot;
5647 }
5648 
5649 static int mesh_cmd_handle(struct net_buf *cmd, struct net_buf **evt)
5650 {
5651 	struct bt_hci_cp_mesh *cp_mesh;
5652 	uint8_t mesh_op;
5653 
5654 	if (cmd->len < sizeof(*cp_mesh)) {
5655 		LOG_ERR("No HCI VSD Command header");
5656 		return -EINVAL;
5657 	}
5658 
5659 	cp_mesh = net_buf_pull_mem(cmd, sizeof(*cp_mesh));
5660 	mesh_op = cp_mesh->opcode;
5661 
5662 	switch (mesh_op) {
5663 	case BT_HCI_OC_MESH_GET_OPTS:
5664 		mesh_get_opts(cmd, evt);
5665 		break;
5666 
5667 	case BT_HCI_OC_MESH_SET_SCAN_FILTER:
5668 		mesh_set_scan_filter(cmd, evt);
5669 		break;
5670 
5671 	case BT_HCI_OC_MESH_ADVERTISE:
5672 		mesh_advertise(cmd, evt);
5673 		break;
5674 
5675 	case BT_HCI_OC_MESH_ADVERTISE_CANCEL:
5676 		mesh_advertise_cancel(cmd, evt);
5677 		break;
5678 
5679 	default:
5680 		return -EINVAL;
5681 	}
5682 
5683 	return 0;
5684 }
5685 #endif /* CONFIG_BT_HCI_MESH_EXT */
5686 
5687 int hci_vendor_cmd_handle_common(uint16_t ocf, struct net_buf *cmd,
5688 				 struct net_buf **evt)
5689 {
5690 	switch (ocf) {
5691 	case BT_OCF(BT_HCI_OP_VS_READ_VERSION_INFO):
5692 		vs_read_version_info(cmd, evt);
5693 		break;
5694 
5695 	case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS):
5696 		vs_read_supported_commands(cmd, evt);
5697 		break;
5698 
5699 	case BT_OCF(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES):
5700 		vs_read_supported_features(cmd, evt);
5701 		break;
5702 
5703 #if defined(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4)
5704 	case BT_OCF(BT_HCI_OP_VS_READ_USB_TRANSPORT_MODE):
5705 		break;
5706 	case BT_OCF(BT_HCI_OP_VS_SET_USB_TRANSPORT_MODE):
5707 		reset(cmd, evt);
5708 		break;
5709 #endif /* CONFIG_USB_DEVICE_BLUETOOTH_VS_H4 */
5710 
5711 	case BT_OCF(BT_HCI_OP_VS_READ_BUILD_INFO):
5712 		vs_read_build_info(cmd, evt);
5713 		break;
5714 
5715 	case BT_OCF(BT_HCI_OP_VS_WRITE_BD_ADDR):
5716 		vs_write_bd_addr(cmd, evt);
5717 		break;
5718 
5719 	case BT_OCF(BT_HCI_OP_VS_READ_STATIC_ADDRS):
5720 		vs_read_static_addrs(cmd, evt);
5721 		break;
5722 
5723 	case BT_OCF(BT_HCI_OP_VS_READ_KEY_HIERARCHY_ROOTS):
5724 		vs_read_key_hierarchy_roots(cmd, evt);
5725 		break;
5726 
5727 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
5728 	case BT_OCF(BT_HCI_OP_VS_SET_SCAN_REQ_REPORTS):
5729 		vs_set_scan_req_reports(cmd, evt);
5730 		break;
5731 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
5732 
5733 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
5734 	case BT_OCF(BT_HCI_OP_VS_WRITE_TX_POWER_LEVEL):
5735 		vs_write_tx_power_level(cmd, evt);
5736 		break;
5737 
5738 	case BT_OCF(BT_HCI_OP_VS_READ_TX_POWER_LEVEL):
5739 		vs_read_tx_power_level(cmd, evt);
5740 		break;
5741 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
5742 
5743 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
5744 	case BT_OCF(BT_HCI_OP_VS_SET_MIN_NUM_USED_CHANS):
5745 		vs_set_min_used_chans(cmd, evt);
5746 		break;
5747 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
5748 
5749 #if defined(CONFIG_BT_HCI_MESH_EXT)
5750 	case BT_OCF(BT_HCI_OP_VS_MESH):
5751 		mesh_cmd_handle(cmd, evt);
5752 		break;
5753 #endif /* CONFIG_BT_HCI_MESH_EXT */
5754 
5755 	default:
5756 		return -EINVAL;
5757 	}
5758 
5759 	return 0;
5760 }
5761 #endif
5762 
5763 struct net_buf *hci_cmd_handle(struct net_buf *cmd, void **node_rx)
5764 {
5765 	struct bt_hci_cmd_hdr *chdr;
5766 	struct net_buf *evt = NULL;
5767 	uint16_t ocf;
5768 	int err;
5769 
5770 	if (cmd->len < sizeof(*chdr)) {
5771 		LOG_ERR("No HCI Command header");
5772 		return NULL;
5773 	}
5774 
5775 	chdr = net_buf_pull_mem(cmd, sizeof(*chdr));
5776 	if (cmd->len < chdr->param_len) {
5777 		LOG_ERR("Invalid HCI CMD packet length");
5778 		return NULL;
5779 	}
5780 
5781 	/* store in a global for later CC/CS event creation */
5782 	_opcode = sys_le16_to_cpu(chdr->opcode);
5783 
5784 	ocf = BT_OCF(_opcode);
5785 
5786 	switch (BT_OGF(_opcode)) {
5787 	case BT_OGF_LINK_CTRL:
5788 		err = link_control_cmd_handle(ocf, cmd, &evt);
5789 		break;
5790 	case BT_OGF_BASEBAND:
5791 		err = ctrl_bb_cmd_handle(ocf, cmd, &evt);
5792 		break;
5793 	case BT_OGF_INFO:
5794 		err = info_cmd_handle(ocf, cmd, &evt);
5795 		break;
5796 	case BT_OGF_STATUS:
5797 		err = status_cmd_handle(ocf, cmd, &evt);
5798 		break;
5799 	case BT_OGF_LE:
5800 		err = controller_cmd_handle(ocf, cmd, &evt, node_rx);
5801 		break;
5802 #if defined(CONFIG_BT_HCI_VS)
5803 	case BT_OGF_VS:
5804 		err = hci_vendor_cmd_handle(ocf, cmd, &evt);
5805 		break;
5806 #endif
5807 	default:
5808 		err = -EINVAL;
5809 		break;
5810 	}
5811 
5812 	if (err == -EINVAL) {
5813 		evt = cmd_status(BT_HCI_ERR_UNKNOWN_CMD);
5814 	}
5815 
5816 	return evt;
5817 }
5818 
5819 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
5820 	defined(CONFIG_BT_CTLR_CONN_ISO)
5821 static void data_buf_overflow(struct net_buf **buf, uint8_t link_type)
5822 {
5823 	struct bt_hci_evt_data_buf_overflow *ep;
5824 
5825 	if (!(event_mask & BT_EVT_MASK_DATA_BUFFER_OVERFLOW)) {
5826 		return;
5827 	}
5828 
5829 	*buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
5830 	hci_evt_create(*buf, BT_HCI_EVT_DATA_BUF_OVERFLOW, sizeof(*ep));
5831 	ep = net_buf_add(*buf, sizeof(*ep));
5832 
5833 	ep->link_type = link_type;
5834 }
5835 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_SYNC_ISO ||
5836 	* CONFIG_BT_CTLR_CONN_ISO
5837 	*/
5838 
5839 #if defined(CONFIG_BT_CONN)
5840 int hci_acl_handle(struct net_buf *buf, struct net_buf **evt)
5841 {
5842 	struct node_tx *node_tx;
5843 	struct bt_hci_acl_hdr *acl;
5844 	struct pdu_data *pdu_data;
5845 	uint16_t handle;
5846 	uint8_t flags;
5847 	uint16_t len;
5848 
5849 	*evt = NULL;
5850 
5851 	if (buf->len < sizeof(*acl)) {
5852 		LOG_ERR("No HCI ACL header");
5853 		return -EINVAL;
5854 	}
5855 
5856 	acl = net_buf_pull_mem(buf, sizeof(*acl));
5857 	len = sys_le16_to_cpu(acl->len);
5858 	handle = sys_le16_to_cpu(acl->handle);
5859 
5860 	if (buf->len < len) {
5861 		LOG_ERR("Invalid HCI ACL packet length");
5862 		return -EINVAL;
5863 	}
5864 
5865 	if (len > LL_LENGTH_OCTETS_TX_MAX) {
5866 		LOG_ERR("Invalid HCI ACL Data length");
5867 		return -EINVAL;
5868 	}
5869 
5870 	/* assigning flags first because handle will be overwritten */
5871 	flags = bt_acl_flags(handle);
5872 	handle = bt_acl_handle(handle);
5873 
5874 	node_tx = ll_tx_mem_acquire();
5875 	if (!node_tx) {
5876 		LOG_ERR("Tx Buffer Overflow");
5877 		data_buf_overflow(evt, BT_OVERFLOW_LINK_ACL);
5878 		return -ENOBUFS;
5879 	}
5880 
5881 	pdu_data = (void *)node_tx->pdu;
5882 
5883 	if (bt_acl_flags_bc(flags) != BT_ACL_POINT_TO_POINT) {
5884 		return -EINVAL;
5885 	}
5886 
5887 	switch (bt_acl_flags_pb(flags)) {
5888 	case BT_ACL_START_NO_FLUSH:
5889 		pdu_data->ll_id = PDU_DATA_LLID_DATA_START;
5890 		break;
5891 	case BT_ACL_CONT:
5892 		pdu_data->ll_id = PDU_DATA_LLID_DATA_CONTINUE;
5893 		break;
5894 	default:
5895 		/* BT_ACL_START and BT_ACL_COMPLETE not allowed on LE-U
5896 		 * from Host to Controller
5897 		 */
5898 		return -EINVAL;
5899 	}
5900 
5901 	pdu_data->len = len;
5902 	memcpy(&pdu_data->lldata[0], buf->data, len);
5903 
5904 	if (ll_tx_mem_enqueue(handle, node_tx)) {
5905 		LOG_ERR("Invalid Tx Enqueue");
5906 		ll_tx_mem_release(node_tx);
5907 		return -EINVAL;
5908 	}
5909 
5910 	return 0;
5911 }
5912 #endif /* CONFIG_BT_CONN */
5913 
5914 #if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
5915 int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
5916 {
5917 	struct bt_hci_iso_sdu_hdr *iso_sdu_hdr;
5918 	struct isoal_sdu_tx sdu_frag_tx;
5919 	struct bt_hci_iso_hdr *iso_hdr;
5920 	uint32_t *time_stamp;
5921 	uint16_t handle;
5922 	uint8_t pb_flag;
5923 	uint8_t ts_flag;
5924 	uint8_t flags;
5925 	uint16_t len;
5926 
5927 	iso_sdu_hdr = NULL;
5928 	*evt  = NULL;
5929 
5930 	if (buf->len < sizeof(*iso_hdr)) {
5931 		LOG_ERR("No HCI ISO header");
5932 		return -EINVAL;
5933 	}
5934 
5935 	iso_hdr = net_buf_pull_mem(buf, sizeof(*iso_hdr));
5936 	handle = sys_le16_to_cpu(iso_hdr->handle);
5937 	len = bt_iso_hdr_len(sys_le16_to_cpu(iso_hdr->len));
5938 
5939 	if (buf->len < len) {
5940 		LOG_ERR("Invalid HCI ISO packet length");
5941 		return -EINVAL;
5942 	}
5943 
5944 	/* Assigning flags first because handle will be overwritten */
5945 	flags = bt_iso_flags(handle);
5946 	pb_flag = bt_iso_flags_pb(flags);
5947 	ts_flag = bt_iso_flags_ts(flags);
5948 	handle = bt_iso_handle(handle);
5949 
5950 	/* Extract time stamp */
5951 	/* Set default to current time
5952 	 * BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
5953 	 * 3.1 Time_Offset in framed PDUs :
5954 	 * The Controller transmitting a SDU may use any of the following
5955 	 * methods to determine the value of the SDU reference time:
5956 	 * -- A captured time stamp of the SDU
5957 	 * -- A time stamp provided by the higher layer
5958 	 * -- A computed time stamp based on a sequence counter provided by the
5959 	 *    higher layer
5960 	 * -- Any other method of determining Time_Offset
5961 	 *    (Uses a timestamp computed from the difference in provided
5962 	 *    timestamps, if the timestamp is deemed not based on the
5963 	 *    controller's clock)
5964 	 */
5965 	sdu_frag_tx.cntr_time_stamp = HAL_TICKER_TICKS_TO_US(ticker_ticks_now_get());
5966 	if (ts_flag) {
5967 		/* Use HCI provided time stamp */
5968 		time_stamp = net_buf_pull_mem(buf, sizeof(*time_stamp));
5969 		len -= sizeof(*time_stamp);
5970 		sdu_frag_tx.time_stamp = sys_le32_to_cpu(*time_stamp);
5971 	} else {
5972 		/* Use controller's capture time */
5973 		sdu_frag_tx.time_stamp = sdu_frag_tx.cntr_time_stamp;
5974 	}
5975 
5976 	/* Extract ISO data header if included (PB_Flag 0b00 or 0b10) */
5977 	if ((pb_flag & 0x01) == 0) {
5978 		iso_sdu_hdr = net_buf_pull_mem(buf, sizeof(*iso_sdu_hdr));
5979 		len -= sizeof(*iso_sdu_hdr);
5980 		sdu_frag_tx.packet_sn = sys_le16_to_cpu(iso_sdu_hdr->sn);
5981 		sdu_frag_tx.iso_sdu_length =
5982 			sys_le16_to_cpu(bt_iso_pkt_len(iso_sdu_hdr->slen));
5983 	} else {
5984 		sdu_frag_tx.packet_sn = 0;
5985 		sdu_frag_tx.iso_sdu_length = 0;
5986 	}
5987 
5988 	/* Packet boundary flags should be bitwise identical to the SDU state
5989 	 * 0b00 BT_ISO_START
5990 	 * 0b01 BT_ISO_CONT
5991 	 * 0b10 BT_ISO_SINGLE
5992 	 * 0b11 BT_ISO_END
5993 	 */
5994 	sdu_frag_tx.sdu_state = pb_flag;
5995 	/* Fill in SDU buffer fields */
5996 	sdu_frag_tx.dbuf = buf->data;
5997 	sdu_frag_tx.size = len;
5998 
5999 	if (false) {
6000 
6001 #if defined(CONFIG_BT_CTLR_CONN_ISO)
6002 	/* Extract source handle from CIS or BIS handle by way of header and
6003 	 * data path
6004 	 */
6005 	} else if (IS_CIS_HANDLE(handle)) {
6006 		struct ll_conn_iso_stream *cis;
6007 		struct ll_conn_iso_group *cig;
6008 		struct ll_iso_stream_hdr *hdr;
6009 		struct ll_iso_datapath *dp_in;
6010 		uint8_t event_offset;
6011 
6012 		cis = ll_iso_stream_connected_get(handle);
6013 		if (!cis) {
6014 			return -EINVAL;
6015 		}
6016 
6017 		cig = cis->group;
6018 
6019 		/* We must ensure sufficient time for ISO-AL to fragment SDU and
6020 		 * deliver PDUs to the TX queue. By checking ull_ref_get, we
6021 		 * know if we are within the subevents of an ISO event. If so,
6022 		 * we can assume that we have enough time to deliver in the next
6023 		 * ISO event. If we're not active within the ISO event, we don't
6024 		 * know if there is enough time to deliver in the next event,
6025 		 * and for safety we set the target to current event + 2.
6026 		 *
6027 		 * For FT > 1, we have the opportunity to retransmit in later
6028 		 * event(s), in which case we have the option to target an
6029 		 * earlier event (this or next) because being late does not
6030 		 * instantly flush the payload.
6031 		 */
6032 
6033 		event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
6034 
6035 		if (cis->lll.tx.ft > 1) {
6036 			/* FT > 1, target an earlier event */
6037 			event_offset -= 1;
6038 		}
6039 
6040 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
6041 		uint64_t event_count;
6042 		uint64_t pkt_seq_num;
6043 
6044 		/* Catch up local pkt_seq_num with internal pkt_seq_num */
6045 		event_count = cis->lll.event_count + event_offset;
6046 		pkt_seq_num = event_count + 1U;
6047 
6048 		/* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
6049 		 * then we simply check that the pb_flag is an even value, and
6050 		 * then  pkt_seq_num is a future sequence number value compare
6051 		 * to last recorded number in cis->pkt_seq_num.
6052 		 *
6053 		 * When (pkt_seq_num - stream->pkt_seq_num) is negative then
6054 		 * BIT64(39) will be set (2's compliment value). The diff value
6055 		 * less than or equal to BIT64_MASK(38) means the diff value is
6056 		 * positive and hence pkt_seq_num is greater than
6057 		 * stream->pkt_seq_num. This calculation is valid for when value
6058 		 * rollover too.
6059 		 */
6060 		if (!(pb_flag & 0x01) &&
6061 		    (((pkt_seq_num - cis->pkt_seq_num) &
6062 		      BIT64_MASK(39)) <= BIT64_MASK(38))) {
6063 			cis->pkt_seq_num = pkt_seq_num;
6064 		} else {
6065 			pkt_seq_num = cis->pkt_seq_num;
6066 		}
6067 
6068 		/* Pre-increment, when pg_flag is BT_ISO_SINGLE (0b10) or
6069 		 * BT_ISO_END (0b11) then we simple check if pb_flag has bit 1
6070 		 * is set, for next ISO data packet seq num comparison.
6071 		 */
6072 		if (pb_flag & 0x10) {
6073 			cis->pkt_seq_num++;
6074 		}
6075 
6076 		/* Target next ISO event to avoid overlapping with, if any,
6077 		 * current ISO event
6078 		 */
6079 		pkt_seq_num++;
6080 		sdu_frag_tx.target_event = pkt_seq_num;
6081 		sdu_frag_tx.grp_ref_point =
6082 			isoal_get_wrapped_time_us(cig->cig_ref_point,
6083 						  ((pkt_seq_num - event_count) *
6084 						   cig->iso_interval *
6085 						   ISO_INT_UNIT_US));
6086 
6087 #else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6088 		sdu_frag_tx.target_event = cis->lll.event_count + event_offset;
6089 		sdu_frag_tx.grp_ref_point =
6090 			isoal_get_wrapped_time_us(cig->cig_ref_point,
6091 						  (event_offset *
6092 						   cig->iso_interval *
6093 						   ISO_INT_UNIT_US));
6094 #endif /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6095 
6096 		/* Get controller's input data path for CIS */
6097 		hdr = &cis->hdr;
6098 		dp_in = hdr->datapath_in;
6099 		if (!dp_in || dp_in->path_id != BT_HCI_DATAPATH_ID_HCI) {
6100 			LOG_ERR("Input data path not set for HCI");
6101 			return -EINVAL;
6102 		}
6103 
6104 		/* Get input data path's source handle */
6105 		isoal_source_handle_t source = dp_in->source_hdl;
6106 
6107 		/* Start Fragmentation */
6108 		isoal_status_t isoal_status =
6109 			isoal_tx_sdu_fragment(source, &sdu_frag_tx);
6110 
6111 		if (isoal_status) {
6112 			if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
6113 				data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
6114 				return -ENOBUFS;
6115 			}
6116 
6117 			return -EINVAL;
6118 		}
6119 
6120 		/* TODO: Assign *evt if an immediate response is required */
6121 		return 0;
6122 #endif /* CONFIG_BT_CTLR_CONN_ISO */
6123 
6124 #if defined(CONFIG_BT_CTLR_ADV_ISO)
6125 	} else if (IS_ADV_ISO_HANDLE(handle)) {
6126 		struct lll_adv_iso_stream *stream;
6127 		struct ll_adv_iso_set *adv_iso;
6128 		struct lll_adv_iso *lll_iso;
6129 		uint16_t latency_prepare;
6130 		uint16_t stream_handle;
6131 		uint64_t target_event;
6132 		uint8_t event_offset;
6133 
6134 		/* Get BIS stream handle and stream context */
6135 		stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
6136 		stream = ull_adv_iso_stream_get(stream_handle);
6137 		if (!stream || !stream->dp) {
6138 			LOG_ERR("Invalid BIS stream");
6139 			return -EINVAL;
6140 		}
6141 
6142 		adv_iso = ull_adv_iso_by_stream_get(stream_handle);
6143 		if (!adv_iso) {
6144 			LOG_ERR("No BIG associated with stream handle");
6145 			return -EINVAL;
6146 		}
6147 
6148 		lll_iso = &adv_iso->lll;
6149 
6150 		/* Determine the target event and the first event offset after
6151 		 * datapath setup.
6152 		 * event_offset mitigates the possibility of first SDU being
6153 		 * late on the datapath and avoid all subsequent SDUs being
6154 		 * dropped for a said SDU interval. i.e. upper layer is not
6155 		 * drifting, say first SDU dropped, hence subsequent SDUs all
6156 		 * dropped, is mitigated by offsetting the grp_ref_point.
6157 		 *
6158 		 * It is ok to do the below for every received ISO data, ISOAL
6159 		 * will not consider subsequent skewed target_event after the
6160 		 * first use of target_event value.
6161 		 *
6162 		 * In BIG implementation in LLL, payload_count corresponds to
6163 		 * the next BIG event, hence calculate grp_ref_point for next
6164 		 * BIG event by incrementing the previous elapsed big_ref_point
6165 		 * by one additional ISO interval.
6166 		 */
6167 		target_event = lll_iso->payload_count / lll_iso->bn;
6168 		latency_prepare = lll_iso->latency_prepare;
6169 		if (latency_prepare) {
6170 			/* big_ref_point has been updated, but payload_count
6171 			 * hasn't been updated yet - increment target_event to
6172 			 * compensate
6173 			 */
6174 			target_event += latency_prepare;
6175 		}
6176 		event_offset = ull_ref_get(&adv_iso->ull) ? 0U : 1U;
6177 
6178 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
6179 		uint64_t event_count;
6180 		uint64_t pkt_seq_num;
6181 
6182 		/* Catch up local pkt_seq_num with internal pkt_seq_num */
6183 		event_count = target_event + event_offset;
6184 		pkt_seq_num = event_count + 1U;
6185 
6186 		/* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
6187 		 * then we simply check that the pb_flag is an even value, and
6188 		 * then  pkt_seq_num is a future sequence number value compare
6189 		 * to last recorded number in cis->pkt_seq_num.
6190 		 *
6191 		 * When (pkt_seq_num - stream->pkt_seq_num) is negative then
6192 		 * BIT64(39) will be set (2's compliment value). The diff value
6193 		 * less than or equal to BIT64_MASK(38) means the diff value is
6194 		 * positive and hence pkt_seq_num is greater than
6195 		 * stream->pkt_seq_num. This calculation is valid for when value
6196 		 * rollover too.
6197 		 */
6198 		if (!(pb_flag & 0x01) &&
6199 		    (((pkt_seq_num - stream->pkt_seq_num) &
6200 		      BIT64_MASK(39)) <= BIT64_MASK(38))) {
6201 			stream->pkt_seq_num = pkt_seq_num;
6202 		} else {
6203 			pkt_seq_num = stream->pkt_seq_num;
6204 		}
6205 
6206 		/* Pre-increment, when pg_flag is BT_ISO_SINGLE (0b10) or
6207 		 * BT_ISO_END (0b11) then we simple check if pb_flag has bit 1
6208 		 * is set, for next ISO data packet seq num comparison.
6209 		 */
6210 		if (pb_flag & 0x10) {
6211 			stream->pkt_seq_num++;
6212 		}
6213 
6214 		/* Target next ISO event to avoid overlapping with, if any,
6215 		 * current ISO event
6216 		 */
6217 		/* FIXME: Implement ISO Tx ack generation early in done compared
6218 		 *        to currently only in prepare. I.e. to ensure upper
6219 		 *        layer has the number of completed packet before the
6220 		 *        next BIG event, so as to supply new ISO data packets.
6221 		 *        Without which upper layers need extra buffers to
6222 		 *        buffer next ISO data packet.
6223 		 *
6224 		 *        Enable below increment once early Tx ack is
6225 		 *        implemented.
6226 		 *
6227 		 * pkt_seq_num++;
6228 		 */
6229 		sdu_frag_tx.target_event = pkt_seq_num;
6230 		sdu_frag_tx.grp_ref_point =
6231 			isoal_get_wrapped_time_us(adv_iso->big_ref_point,
6232 						  (((pkt_seq_num + 1U) -
6233 						    event_count) *
6234 						   lll_iso->iso_interval *
6235 						   ISO_INT_UNIT_US));
6236 
6237 #else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6238 		sdu_frag_tx.target_event = target_event + event_offset;
6239 		sdu_frag_tx.grp_ref_point =
6240 			isoal_get_wrapped_time_us(adv_iso->big_ref_point,
6241 						  ((event_offset + 1U) *
6242 						   lll_iso->iso_interval *
6243 						   ISO_INT_UNIT_US));
6244 #endif /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
6245 
6246 		/* Start Fragmentation */
6247 		/* FIXME: need to ensure ISO-AL returns proper isoal_status.
6248 		 * Currently there are cases where ISO-AL calls LL_ASSERT.
6249 		 */
6250 		isoal_status_t isoal_status =
6251 			isoal_tx_sdu_fragment(stream->dp->source_hdl, &sdu_frag_tx);
6252 
6253 		if (isoal_status) {
6254 			if (isoal_status & ISOAL_STATUS_ERR_PDU_ALLOC) {
6255 				data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
6256 				return -ENOBUFS;
6257 			}
6258 
6259 			return -EINVAL;
6260 		}
6261 
6262 		return 0;
6263 #endif /* CONFIG_BT_CTLR_ADV_ISO */
6264 
6265 	}
6266 
6267 	return -EINVAL;
6268 }
6269 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
6270 
6271 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6272 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6273 static void dup_ext_adv_adi_store(struct dup_ext_adv_mode *dup_mode,
6274 				  const struct pdu_adv_adi *adi,
6275 				  uint8_t data_status)
6276 {
6277 	struct dup_ext_adv_set *adv_set;
6278 
6279 	adv_set = &dup_mode->set[dup_mode->set_curr];
6280 
6281 	adv_set->data_cmplt = (data_status ==
6282 			       BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) ?
6283 			      1U : 0U;
6284 
6285 	if (adi) {
6286 		(void)memcpy(&adv_set->adi, adi, sizeof(*adi));
6287 	} else {
6288 		(void)memset(&adv_set->adi, 0U, sizeof(*adi));
6289 	}
6290 
6291 	if (dup_mode->set_count < CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
6292 		dup_mode->set_count++;
6293 		dup_mode->set_curr = dup_mode->set_count;
6294 	} else {
6295 		dup_mode->set_curr++;
6296 	}
6297 
6298 	if (dup_mode->set_curr == CONFIG_BT_CTLR_DUP_FILTER_ADV_SET_MAX) {
6299 		dup_mode->set_curr = 0U;
6300 	}
6301 }
6302 
6303 static void dup_ext_adv_mode_reset(struct dup_ext_adv_mode *dup_adv_mode)
6304 {
6305 	uint8_t adv_mode;
6306 
6307 	for (adv_mode = 0U; adv_mode < DUP_EXT_ADV_MODE_COUNT;
6308 	     adv_mode++) {
6309 		struct dup_ext_adv_mode *dup_mode;
6310 
6311 		dup_mode = &dup_adv_mode[adv_mode];
6312 		dup_mode->set_count = 0U;
6313 		dup_mode->set_curr = 0U;
6314 	}
6315 }
6316 
6317 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
6318 static void dup_ext_adv_reset(void)
6319 {
6320 	for (int32_t i = 0; i < dup_count; i++) {
6321 		struct dup_entry *dup;
6322 
6323 		dup = &dup_filter[i];
6324 		dup->mask = 0U;
6325 		dup_ext_adv_mode_reset(dup->adv_mode);
6326 	}
6327 }
6328 
6329 static void dup_periodic_adv_reset(uint8_t addr_type, const uint8_t *addr,
6330 				   uint8_t sid)
6331 {
6332 	for (int32_t addr_idx = 0; addr_idx < dup_count; addr_idx++) {
6333 		struct dup_ext_adv_mode *dup_mode;
6334 		struct dup_entry *dup;
6335 
6336 		dup = &dup_filter[addr_idx];
6337 		if (memcmp(addr, dup->addr.a.val, sizeof(bt_addr_t)) ||
6338 		    (addr_type != dup->addr.type)) {
6339 			continue;
6340 		}
6341 
6342 		dup_mode = &dup->adv_mode[DUP_EXT_ADV_MODE_PERIODIC];
6343 		for (uint16_t set_idx = 0; set_idx < dup_mode->set_count;
6344 		     set_idx++) {
6345 			struct dup_ext_adv_set *adv_set;
6346 
6347 			adv_set = &dup_mode->set[set_idx];
6348 			if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != sid) {
6349 				continue;
6350 			}
6351 
6352 			/* reset data complete state */
6353 			adv_set->data_cmplt = 0U;
6354 
6355 			return;
6356 		}
6357 
6358 		return;
6359 	}
6360 }
6361 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT */
6362 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6363 
6364 static inline bool is_dup_or_update(struct dup_entry *dup, uint8_t adv_type,
6365 				    uint8_t adv_mode,
6366 				    const struct pdu_adv_adi *adi,
6367 				    uint8_t data_status)
6368 {
6369 	if (!(dup->mask & BIT(adv_type))) {
6370 		/* report different adv types */
6371 		dup->mask |= BIT(adv_type);
6372 
6373 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6374 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6375 				      data_status);
6376 
6377 		return false;
6378 	} else if (adv_type != PDU_ADV_TYPE_EXT_IND) {
6379 		/* drop duplicate legacy advertising */
6380 		return true;
6381 	} else if (dup->adv_mode[adv_mode].set_count == 0U) {
6382 		/* report different extended adv mode */
6383 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6384 				      data_status);
6385 		return false;
6386 	} else if (adi) {
6387 		struct dup_ext_adv_mode *dup_mode;
6388 		uint8_t j;
6389 
6390 		dup_mode = &dup->adv_mode[adv_mode];
6391 		for (j = 0; j < dup_mode->set_count; j++) {
6392 			struct dup_ext_adv_set *adv_set;
6393 
6394 			adv_set = &dup_mode->set[j];
6395 			if (PDU_ADV_ADI_SID_GET(&adv_set->adi) != PDU_ADV_ADI_SID_GET(adi)) {
6396 				continue;
6397 			}
6398 
6399 			if (PDU_ADV_ADI_DID_GET(&adv_set->adi) != PDU_ADV_ADI_DID_GET(adi)) {
6400 				/* report different DID */
6401 				adv_set->adi.did_sid_packed[0] = adi->did_sid_packed[0];
6402 				adv_set->adi.did_sid_packed[1] = adi->did_sid_packed[1];
6403 				/* set new data status */
6404 				if (data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
6405 					adv_set->data_cmplt = 1U;
6406 				} else {
6407 					adv_set->data_cmplt = 0U;
6408 				}
6409 
6410 				return false;
6411 			} else if (!adv_set->data_cmplt &&
6412 				   (data_status ==
6413 				    BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE)) {
6414 				/* report data complete */
6415 				adv_set->data_cmplt = 1U;
6416 				return false;
6417 			} else if (!adv_set->data_cmplt) {
6418 				/* report partial and incomplete data */
6419 				return false;
6420 			}
6421 
6422 			return true;
6423 		}
6424 
6425 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6426 				      data_status);
6427 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6428 
6429 		return false;
6430 	}
6431 
6432 	return true;
6433 }
6434 
6435 static bool dup_found(uint8_t adv_type, uint8_t addr_type, const uint8_t *addr,
6436 		      uint8_t adv_mode, const struct pdu_adv_adi *adi,
6437 		      uint8_t data_status)
6438 {
6439 	/* check for duplicate filtering */
6440 	if (dup_count >= 0) {
6441 		struct dup_entry *dup;
6442 
6443 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6444 		__ASSERT((adv_mode < ARRAY_SIZE(dup_filter[0].adv_mode)),
6445 			 "adv_mode index out-of-bound");
6446 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6447 
6448 		/* find for existing entry and update if changed */
6449 		for (int32_t i = 0; i < dup_count; i++) {
6450 			dup = &dup_filter[i];
6451 			if (memcmp(addr, &dup->addr.a.val[0],
6452 				   sizeof(bt_addr_t)) ||
6453 			    (addr_type != dup->addr.type)) {
6454 				continue;
6455 			}
6456 
6457 			/* still duplicate or update entry with change */
6458 			return is_dup_or_update(dup, adv_type, adv_mode, adi,
6459 						data_status);
6460 		}
6461 
6462 		/* insert into the duplicate filter */
6463 		dup = &dup_filter[dup_curr];
6464 		(void)memcpy(&dup->addr.a.val[0], addr, sizeof(bt_addr_t));
6465 		dup->addr.type = addr_type;
6466 		dup->mask = BIT(adv_type);
6467 
6468 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6469 		dup_ext_adv_mode_reset(dup->adv_mode);
6470 		dup_ext_adv_adi_store(&dup->adv_mode[adv_mode], adi,
6471 				      data_status);
6472 #endif /* CONFIG_BT_CTLR_ADV_EXT */
6473 
6474 		if (dup_count < CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6475 			dup_count++;
6476 			dup_curr = dup_count;
6477 		} else {
6478 			dup_curr++;
6479 		}
6480 
6481 		if (dup_curr == CONFIG_BT_CTLR_DUP_FILTER_LEN) {
6482 			dup_curr = 0U;
6483 		}
6484 	}
6485 
6486 	return false;
6487 }
6488 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6489 
6490 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6491 static inline void le_dir_adv_report(struct pdu_adv *adv, struct net_buf *buf,
6492 				     int8_t rssi, uint8_t rl_idx)
6493 {
6494 	struct bt_hci_evt_le_direct_adv_report *drp;
6495 	struct bt_hci_evt_le_direct_adv_info *dir_info;
6496 
6497 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6498 	    !(le_event_mask & BT_EVT_MASK_LE_DIRECT_ADV_REPORT)) {
6499 		return;
6500 	}
6501 
6502 	LL_ASSERT(adv->type == PDU_ADV_TYPE_DIRECT_IND);
6503 
6504 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6505 	if (dup_scan &&
6506 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6507 		return;
6508 	}
6509 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6510 
6511 	drp = meta_evt(buf, BT_HCI_EVT_LE_DIRECT_ADV_REPORT,
6512 		       sizeof(*drp) + sizeof(*dir_info));
6513 
6514 	drp->num_reports = 1U;
6515 	dir_info = (void *)(((uint8_t *)drp) + sizeof(*drp));
6516 
6517 	/* Directed Advertising */
6518 	dir_info->evt_type = BT_HCI_ADV_DIRECT_IND;
6519 
6520 #if defined(CONFIG_BT_CTLR_PRIVACY)
6521 	if (rl_idx < ll_rl_size_get()) {
6522 		/* Store identity address */
6523 		ll_rl_id_addr_get(rl_idx, &dir_info->addr.type,
6524 				  &dir_info->addr.a.val[0]);
6525 		/* Mark it as identity address from RPA (0x02, 0x03) */
6526 		MARK_AS_IDENTITY_ADDR(dir_info->addr.type);
6527 	} else {
6528 #else
6529 	if (1) {
6530 #endif /* CONFIG_BT_CTLR_PRIVACY */
6531 		dir_info->addr.type = adv->tx_addr;
6532 		memcpy(&dir_info->addr.a.val[0], &adv->direct_ind.adv_addr[0],
6533 		       sizeof(bt_addr_t));
6534 	}
6535 
6536 	dir_info->dir_addr.type = adv->rx_addr;
6537 	memcpy(&dir_info->dir_addr.a.val[0],
6538 	       &adv->direct_ind.tgt_addr[0], sizeof(bt_addr_t));
6539 
6540 	dir_info->rssi = rssi;
6541 }
6542 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6543 
6544 #if defined(CONFIG_BT_OBSERVER)
6545 #if defined(CONFIG_BT_HCI_MESH_EXT)
6546 static inline bool scan_filter_apply(uint8_t filter, uint8_t *data, uint8_t len)
6547 {
6548 	struct scan_filter *f = &scan_filters[filter];
6549 
6550 	/* No patterns means filter out all advertising packets */
6551 	for (uint8_t i = 0; i < f->count; i++) {
6552 		/* Require at least the length of the pattern */
6553 		if (len >= f->lengths[i] &&
6554 		    !memcmp(data, f->patterns[i], f->lengths[i])) {
6555 			return true;
6556 		}
6557 	}
6558 
6559 	return false;
6560 }
6561 
6562 static inline void le_mesh_scan_report(struct pdu_adv *adv,
6563 				       struct node_rx_pdu *node_rx,
6564 				       struct net_buf *buf, int8_t rssi)
6565 {
6566 	uint8_t data_len = (adv->len - BDADDR_SIZE);
6567 	struct bt_hci_evt_mesh_scanning_report *mep;
6568 	struct bt_hci_evt_mesh_scan_report *sr;
6569 	uint32_t instant;
6570 	uint8_t chan;
6571 
6572 	LL_ASSERT(adv->type == PDU_ADV_TYPE_NONCONN_IND);
6573 
6574 	/* Filter based on currently active Scan Filter */
6575 	if (sf_curr < ARRAY_SIZE(scan_filters) &&
6576 	    !scan_filter_apply(sf_curr, &adv->adv_ind.data[0], data_len)) {
6577 		/* Drop the report */
6578 		return;
6579 	}
6580 
6581 	chan = node_rx->rx_ftr.chan;
6582 	instant = node_rx->rx_ftr.anchor_ticks;
6583 
6584 	mep = mesh_evt(buf, BT_HCI_EVT_MESH_SCANNING_REPORT,
6585 			    sizeof(*mep) + sizeof(*sr));
6586 
6587 	mep->num_reports = 1U;
6588 	sr = (void *)(((uint8_t *)mep) + sizeof(*mep));
6589 	sr->addr.type = adv->tx_addr;
6590 	memcpy(&sr->addr.a.val[0], &adv->adv_ind.addr[0], sizeof(bt_addr_t));
6591 	sr->chan = chan;
6592 	sr->rssi = rssi;
6593 	sys_put_le32(instant, (uint8_t *)&sr->instant);
6594 
6595 	sr->data_len = data_len;
6596 	memcpy(&sr->data[0], &adv->adv_ind.data[0], data_len);
6597 }
6598 #endif /* CONFIG_BT_HCI_MESH_EXT */
6599 
6600 static void le_advertising_report(struct pdu_data *pdu_data,
6601 				  struct node_rx_pdu *node_rx,
6602 				  struct net_buf *buf)
6603 {
6604 	const uint8_t c_adv_type[] = { 0x00, 0x01, 0x03, 0xff, 0x04,
6605 				    0xff, 0x02 };
6606 	struct bt_hci_evt_le_advertising_report *sep;
6607 	struct pdu_adv *adv = (void *)pdu_data;
6608 	struct bt_hci_evt_le_advertising_info *adv_info;
6609 	uint8_t data_len;
6610 	uint8_t info_len;
6611 	int8_t rssi;
6612 #if defined(CONFIG_BT_CTLR_PRIVACY)
6613 	uint8_t rl_idx;
6614 #endif /* CONFIG_BT_CTLR_PRIVACY */
6615 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6616 	uint8_t direct_report;
6617 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6618 	int8_t *prssi;
6619 
6620 	rssi = -(node_rx->rx_ftr.rssi);
6621 #if defined(CONFIG_BT_CTLR_PRIVACY)
6622 	rl_idx = node_rx->rx_ftr.rl_idx;
6623 #endif /* CONFIG_BT_CTLR_PRIVACY */
6624 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6625 	direct_report = node_rx->rx_ftr.direct;
6626 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6627 
6628 #if defined(CONFIG_BT_CTLR_PRIVACY)
6629 	if (adv->tx_addr) {
6630 		/* Update current RPA */
6631 		ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6632 	}
6633 #endif /* CONFIG_BT_CTLR_PRIVACY */
6634 
6635 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
6636 	if (direct_report) {
6637 #if defined(CONFIG_BT_CTLR_PRIVACY)
6638 		le_dir_adv_report(adv, buf, rssi, rl_idx);
6639 #else
6640 		le_dir_adv_report(adv, buf, rssi, 0xFF);
6641 #endif /* CONFIG_BT_CTLR_PRIVACY */
6642 		return;
6643 	}
6644 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
6645 
6646 #if defined(CONFIG_BT_HCI_MESH_EXT)
6647 	if (node_rx->hdr.type == NODE_RX_TYPE_MESH_REPORT) {
6648 		le_mesh_scan_report(adv, node_rx, buf, rssi);
6649 		return;
6650 	}
6651 #endif /* CONFIG_BT_HCI_MESH_EXT */
6652 
6653 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6654 	    !(le_event_mask & BT_EVT_MASK_LE_ADVERTISING_REPORT)) {
6655 		return;
6656 	}
6657 
6658 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6659 	if (dup_scan &&
6660 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6661 		return;
6662 	}
6663 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6664 
6665 	if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6666 		data_len = (adv->len - BDADDR_SIZE);
6667 	} else {
6668 		data_len = 0U;
6669 	}
6670 	info_len = sizeof(struct bt_hci_evt_le_advertising_info) + data_len +
6671 		   sizeof(*prssi);
6672 	sep = meta_evt(buf, BT_HCI_EVT_LE_ADVERTISING_REPORT,
6673 		       sizeof(*sep) + info_len);
6674 
6675 	sep->num_reports = 1U;
6676 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6677 
6678 	adv_info->evt_type = c_adv_type[adv->type];
6679 
6680 #if defined(CONFIG_BT_CTLR_PRIVACY)
6681 	if (rl_idx < ll_rl_size_get()) {
6682 		/* Store identity address */
6683 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6684 				  &adv_info->addr.a.val[0]);
6685 		/* Mark it as identity address from RPA (0x02, 0x03) */
6686 		MARK_AS_IDENTITY_ADDR(adv_info->addr.type);
6687 	} else {
6688 #else
6689 	if (1) {
6690 #endif /* CONFIG_BT_CTLR_PRIVACY */
6691 
6692 		adv_info->addr.type = adv->tx_addr;
6693 		memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6694 		       sizeof(bt_addr_t));
6695 	}
6696 
6697 	adv_info->length = data_len;
6698 	memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6699 	/* RSSI */
6700 	prssi = &adv_info->data[0] + data_len;
6701 	*prssi = rssi;
6702 }
6703 
6704 #if defined(CONFIG_BT_CTLR_ADV_EXT)
6705 static void le_ext_adv_legacy_report(struct pdu_data *pdu_data,
6706 				     struct node_rx_pdu *node_rx,
6707 				     struct net_buf *buf)
6708 {
6709 	/* Lookup event type based on pdu_adv_type set by LLL */
6710 	const uint8_t evt_type_lookup[] = {
6711 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6712 		 BT_HCI_LE_ADV_EVT_TYPE_CONN),   /* ADV_IND */
6713 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY | BT_HCI_LE_ADV_EVT_TYPE_DIRECT |
6714 		 BT_HCI_LE_ADV_EVT_TYPE_CONN),   /* DIRECT_IND */
6715 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY), /* NONCONN_IND */
6716 		0xff,                            /* Invalid index lookup */
6717 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6718 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6719 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN),   /* SCAN_RSP to an ADV_SCAN_IND
6720 						  */
6721 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6722 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
6723 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN |
6724 		 BT_HCI_LE_ADV_EVT_TYPE_CONN), /* SCAN_RSP to an ADV_IND,
6725 						* NOTE: LLL explicitly sets
6726 						* adv_type to
6727 						* PDU_ADV_TYPE_ADV_IND_SCAN_RSP
6728 						*/
6729 		(BT_HCI_LE_ADV_EVT_TYPE_LEGACY |
6730 		 BT_HCI_LE_ADV_EVT_TYPE_SCAN)    /* SCAN_IND */
6731 	};
6732 	struct bt_hci_evt_le_ext_advertising_info *adv_info;
6733 	struct bt_hci_evt_le_ext_advertising_report *sep;
6734 	struct pdu_adv *adv = (void *)pdu_data;
6735 	uint8_t data_len;
6736 	uint8_t info_len;
6737 	int8_t rssi;
6738 
6739 #if defined(CONFIG_BT_CTLR_PRIVACY)
6740 	uint8_t rl_idx;
6741 #endif /* CONFIG_BT_CTLR_PRIVACY */
6742 
6743 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
6744 	    !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
6745 		return;
6746 	}
6747 
6748 	/* The Link Layer currently returns RSSI as an absolute value */
6749 	rssi = -(node_rx->rx_ftr.rssi);
6750 
6751 #if defined(CONFIG_BT_CTLR_PRIVACY)
6752 	rl_idx = node_rx->rx_ftr.rl_idx;
6753 #endif /* CONFIG_BT_CTLR_PRIVACY */
6754 
6755 #if defined(CONFIG_BT_CTLR_PRIVACY)
6756 	if (adv->tx_addr) {
6757 		/* Update current RPA */
6758 		ll_rl_crpa_set(0x00, NULL, rl_idx, &adv->adv_ind.addr[0]);
6759 	}
6760 #endif /* CONFIG_BT_CTLR_PRIVACY */
6761 
6762 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
6763 	if (dup_scan &&
6764 	    dup_found(adv->type, adv->tx_addr, adv->adv_ind.addr, 0, NULL, 0)) {
6765 		return;
6766 	}
6767 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
6768 
6769 	if (adv->type != PDU_ADV_TYPE_DIRECT_IND) {
6770 		data_len = (adv->len - BDADDR_SIZE);
6771 	} else {
6772 		data_len = 0U;
6773 	}
6774 
6775 	info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6776 		   data_len;
6777 	sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6778 		       sizeof(*sep) + info_len);
6779 
6780 	sep->num_reports = 1U;
6781 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6782 
6783 	adv_info->evt_type = sys_cpu_to_le16((uint16_t)evt_type_lookup[adv->type]);
6784 
6785 #if defined(CONFIG_BT_CTLR_PRIVACY)
6786 	if (rl_idx < ll_rl_size_get()) {
6787 		/* Store identity address */
6788 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6789 				  &adv_info->addr.a.val[0]);
6790 		/* Mark it as identity address from RPA (0x02, 0x03) */
6791 		MARK_AS_IDENTITY_ADDR(adv_info->addr.type);
6792 	} else
6793 #endif /* CONFIG_BT_CTLR_PRIVACY */
6794 	{
6795 		adv_info->addr.type = adv->tx_addr;
6796 		memcpy(&adv_info->addr.a.val[0], &adv->adv_ind.addr[0],
6797 		       sizeof(bt_addr_t));
6798 	}
6799 
6800 	adv_info->prim_phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
6801 	adv_info->sec_phy = 0U;
6802 	adv_info->sid = 0xff;
6803 	adv_info->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6804 	adv_info->rssi = rssi;
6805 	adv_info->interval = 0U;
6806 
6807 	if (adv->type == PDU_ADV_TYPE_DIRECT_IND) {
6808 		adv_info->direct_addr.type = adv->rx_addr;
6809 		bt_addr_copy(&adv_info->direct_addr.a,
6810 			     (void *)adv->direct_ind.tgt_addr);
6811 	} else {
6812 		adv_info->direct_addr.type = 0U;
6813 		(void)memset(adv_info->direct_addr.a.val, 0U,
6814 			     sizeof(adv_info->direct_addr.a.val));
6815 	}
6816 
6817 	adv_info->length = data_len;
6818 	memcpy(&adv_info->data[0], &adv->adv_ind.data[0], data_len);
6819 }
6820 
6821 static uint8_t ext_adv_direct_addr_type(struct lll_scan *lll,
6822 					bool peer_resolved, bool direct_report,
6823 					uint8_t rx_addr_type,
6824 					const uint8_t *const rx_addr)
6825 {
6826 	/* The directed address is resolvable private address, but Controller
6827 	 * could not resolve it.
6828 	 */
6829 	if (direct_report) {
6830 		return BT_ADDR_LE_UNRESOLVED;
6831 	}
6832 
6833 	if (0) {
6834 #if defined(CONFIG_BT_CTLR_PRIVACY)
6835 	/* Peer directed advertiser's address was resolved */
6836 	} else if (peer_resolved) {
6837 		struct ll_scan_set *scan;
6838 
6839 		scan = HDR_LLL2ULL(lll);
6840 		if ((rx_addr_type == lll->init_addr_type) &&
6841 		    !memcmp(lll->init_addr, rx_addr, BDADDR_SIZE)) {
6842 			/* Peer directed advertiser used local scanner's
6843 			 * initiator address.
6844 			 */
6845 			return scan->own_addr_type;
6846 		}
6847 
6848 		/* Peer directed advertiser used directed resolvable
6849 		 * private address generated from the local scanner's
6850 		 * Identity Resolution Key.
6851 		 */
6852 		return scan->own_addr_type | BIT(1);
6853 #endif /* CONFIG_BT_CTLR_PRIVACY */
6854 	} else {
6855 		struct ll_scan_set *scan;
6856 
6857 		scan = HDR_LLL2ULL(lll);
6858 
6859 		/* Peer directed advertiser used local scanner's
6860 		 * initiator address.
6861 		 */
6862 		return scan->own_addr_type;
6863 	}
6864 }
6865 
6866 static uint8_t ext_adv_data_get(const struct node_rx_pdu *node_rx_data,
6867 				uint8_t *const sec_phy, int8_t *const tx_pwr,
6868 				const uint8_t **const data)
6869 {
6870 	const struct pdu_adv *adv = (void *)node_rx_data->pdu;
6871 	const struct pdu_adv_com_ext_adv *p;
6872 	const struct pdu_adv_ext_hdr *h;
6873 	uint8_t hdr_buf_len;
6874 	const uint8_t *ptr;
6875 	uint8_t hdr_len;
6876 
6877 	*tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
6878 
6879 	p = (void *)&adv->adv_ext_ind;
6880 	h = (void *)p->ext_hdr_adv_data;
6881 	ptr = (void *)h;
6882 
6883 	if (!p->ext_hdr_len) {
6884 		hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
6885 
6886 		goto no_ext_hdr;
6887 	}
6888 
6889 	ptr = h->data;
6890 
6891 	if (h->adv_addr) {
6892 		ptr += BDADDR_SIZE;
6893 	}
6894 
6895 	if (h->tgt_addr) {
6896 		ptr += BDADDR_SIZE;
6897 	}
6898 
6899 	if (h->adi) {
6900 		ptr += sizeof(struct pdu_adv_adi);
6901 	}
6902 
6903 	if (h->aux_ptr) {
6904 		struct pdu_adv_aux_ptr *aux_ptr;
6905 
6906 		aux_ptr = (void *)ptr;
6907 		ptr += sizeof(*aux_ptr);
6908 
6909 		*sec_phy = HCI_AUX_PHY_TO_HCI_PHY(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
6910 	}
6911 
6912 	if (h->sync_info) {
6913 		ptr += sizeof(struct pdu_adv_sync_info);
6914 	}
6915 
6916 	if (h->tx_pwr) {
6917 		*tx_pwr = *(int8_t *)ptr;
6918 		ptr++;
6919 	}
6920 
6921 	hdr_len = ptr - (uint8_t *)p;
6922 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
6923 	if (hdr_len < hdr_buf_len) {
6924 		uint8_t acad_len = hdr_buf_len - hdr_len;
6925 
6926 		ptr += acad_len;
6927 		hdr_len += acad_len;
6928 	}
6929 
6930 no_ext_hdr:
6931 	if (hdr_len < adv->len) {
6932 		*data = ptr;
6933 
6934 		return adv->len - hdr_len;
6935 	}
6936 
6937 	return 0;
6938 }
6939 
6940 static void node_rx_extra_list_release(struct node_rx_pdu *node_rx_extra)
6941 {
6942 	while (node_rx_extra) {
6943 		struct node_rx_pdu *node_rx_curr;
6944 
6945 		node_rx_curr = node_rx_extra;
6946 		node_rx_extra = node_rx_curr->rx_ftr.extra;
6947 
6948 		node_rx_curr->hdr.next = NULL;
6949 		ll_rx_mem_release((void **)&node_rx_curr);
6950 	}
6951 }
6952 
6953 static void ext_adv_info_fill(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
6954 			      uint8_t adv_addr_type, const uint8_t *adv_addr,
6955 			      uint8_t direct_addr_type,
6956 			      const uint8_t *direct_addr, uint8_t rl_idx,
6957 			      int8_t tx_pwr, int8_t rssi,
6958 			      uint16_t interval_le16,
6959 			      const struct pdu_adv_adi *adi, uint8_t data_len,
6960 			      const uint8_t *data, struct net_buf *buf)
6961 {
6962 	struct bt_hci_evt_le_ext_advertising_info *adv_info;
6963 	struct bt_hci_evt_le_ext_advertising_report *sep;
6964 	uint8_t info_len;
6965 
6966 	info_len = sizeof(struct bt_hci_evt_le_ext_advertising_info) +
6967 		   data_len;
6968 	sep = meta_evt(buf, BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT,
6969 		       sizeof(*sep) + info_len);
6970 
6971 	sep->num_reports = 1U;
6972 	adv_info = (void *)(((uint8_t *)sep) + sizeof(*sep));
6973 
6974 	adv_info->evt_type = sys_cpu_to_le16((uint16_t)evt_type);
6975 
6976 	if (0) {
6977 #if defined(CONFIG_BT_CTLR_PRIVACY)
6978 	} else if (rl_idx < ll_rl_size_get()) {
6979 		/* Store identity address */
6980 		ll_rl_id_addr_get(rl_idx, &adv_info->addr.type,
6981 				  adv_info->addr.a.val);
6982 		/* Mark it as identity address from RPA (0x02, 0x03) */
6983 		MARK_AS_IDENTITY_ADDR(adv_info->addr.type);
6984 #else /* !CONFIG_BT_CTLR_PRIVACY */
6985 		ARG_UNUSED(rl_idx);
6986 #endif /* !CONFIG_BT_CTLR_PRIVACY */
6987 	} else if (adv_addr) {
6988 		adv_info->addr.type = adv_addr_type;
6989 		(void)memcpy(adv_info->addr.a.val, adv_addr, sizeof(bt_addr_t));
6990 	} else {
6991 		adv_info->addr.type = 0U;
6992 		(void)memset(adv_info->addr.a.val, 0, sizeof(bt_addr_t));
6993 	}
6994 
6995 	adv_info->prim_phy = find_lsb_set(phy);
6996 	adv_info->sec_phy = sec_phy;
6997 	adv_info->sid = (adi) ? PDU_ADV_ADI_SID_GET(adi) : BT_HCI_LE_EXT_ADV_SID_INVALID;
6998 	adv_info->tx_power = tx_pwr;
6999 	adv_info->rssi = rssi;
7000 	adv_info->interval = interval_le16;
7001 
7002 	if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_DIRECT) {
7003 		adv_info->direct_addr.type = direct_addr_type;
7004 		(void)memcpy(adv_info->direct_addr.a.val, direct_addr,
7005 			     sizeof(bt_addr_t));
7006 	} else {
7007 		adv_info->direct_addr.type = 0U;
7008 		(void)memset(adv_info->direct_addr.a.val, 0, sizeof(bt_addr_t));
7009 	}
7010 
7011 	adv_info->length = data_len;
7012 	(void)memcpy(adv_info->data, data, data_len);
7013 }
7014 
7015 static void ext_adv_pdu_frag(uint8_t evt_type, uint8_t phy, uint8_t sec_phy,
7016 			     uint8_t adv_addr_type, const uint8_t *adv_addr,
7017 			     uint8_t direct_addr_type,
7018 			     const uint8_t *direct_addr, uint8_t rl_idx,
7019 			     int8_t tx_pwr, int8_t rssi, uint16_t interval_le16,
7020 			     const struct pdu_adv_adi *adi,
7021 			     uint8_t data_len_max,
7022 			     uint16_t *const data_len_total,
7023 			     uint8_t *const data_len,
7024 			     const uint8_t **const data, struct net_buf *buf,
7025 			     struct net_buf **const evt_buf)
7026 {
7027 	const uint8_t data_len_frag = MIN(*data_len, data_len_max);
7028 
7029 	do {
7030 		/* Prepare a fragment of PDU data in a HCI event */
7031 		ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type,
7032 				  adv_addr, direct_addr_type, direct_addr,
7033 				  rl_idx, tx_pwr, rssi, interval_le16, adi,
7034 				  data_len_frag, *data, *evt_buf);
7035 
7036 		*data += data_len_frag;
7037 		*data_len -= data_len_frag;
7038 		*data_len_total -= data_len_frag;
7039 
7040 		*evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7041 		LL_ASSERT(*evt_buf);
7042 
7043 		net_buf_frag_add(buf, *evt_buf);
7044 
7045 		/* Continue to fragment until last partial PDU data fragment,
7046 		 * remainder PDU data's HCI event will be prepare by caller.
7047 		 */
7048 	} while (*data_len > data_len_max);
7049 }
7050 
7051 static void ext_adv_data_frag(const struct node_rx_pdu *node_rx_data,
7052 			      uint8_t evt_type, uint8_t phy,
7053 			      uint8_t *const sec_phy, uint8_t adv_addr_type,
7054 			      const uint8_t *adv_addr, uint8_t direct_addr_type,
7055 			      const uint8_t *direct_addr, uint8_t rl_idx,
7056 			      int8_t *const tx_pwr, int8_t rssi,
7057 			      uint16_t interval_le16,
7058 			      const struct pdu_adv_adi *adi,
7059 			      uint8_t data_len_max, uint16_t data_len_total,
7060 			      uint8_t *const data_len,
7061 			      const uint8_t **const data, struct net_buf *buf,
7062 			      struct net_buf **const evt_buf)
7063 {
7064 	evt_type |= (BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL << 5);
7065 
7066 	do {
7067 		/* Fragment the PDU data */
7068 		ext_adv_pdu_frag(evt_type, phy, *sec_phy, adv_addr_type,
7069 				 adv_addr, direct_addr_type, direct_addr,
7070 				 rl_idx, *tx_pwr, rssi, interval_le16, adi,
7071 				 data_len_max, &data_len_total, data_len,
7072 				 data, buf, evt_buf);
7073 
7074 		/* Check if more PDUs in the list */
7075 		node_rx_data = node_rx_data->rx_ftr.extra;
7076 		if (node_rx_data) {
7077 			if (*data_len >= data_len_total) {
7078 				/* Last fragment restricted to maximum scan
7079 				 * data length, caller will prepare the last
7080 				 * HCI fragment event.
7081 				 */
7082 				break;
7083 			} else if (*data_len) {
7084 				/* Last fragment of current PDU data */
7085 				ext_adv_pdu_frag(evt_type, phy, *sec_phy,
7086 						 adv_addr_type, adv_addr,
7087 						 direct_addr_type, direct_addr,
7088 						 rl_idx, *tx_pwr, rssi,
7089 						 interval_le16, adi,
7090 						 data_len_max, &data_len_total,
7091 						 data_len, data, buf, evt_buf);
7092 			}
7093 
7094 			/* Get next PDU data in list */
7095 			*data_len = ext_adv_data_get(node_rx_data, sec_phy,
7096 						     tx_pwr, data);
7097 
7098 			/* Restrict PDU data to maximum scan data length */
7099 			if (*data_len > data_len_total) {
7100 				*data_len = data_len_total;
7101 			}
7102 		}
7103 
7104 		/* Continue to fragment if current PDU data length less than
7105 		 * total data length or current PDU data length greater than
7106 		 * HCI event max length.
7107 		 */
7108 	} while ((*data_len < data_len_total) || (*data_len > data_len_max));
7109 }
7110 
7111 static void le_ext_adv_report(struct pdu_data *pdu_data,
7112 			      struct node_rx_pdu *node_rx,
7113 			      struct net_buf *buf, uint8_t phy)
7114 {
7115 	int8_t scan_rsp_tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7116 	int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7117 	struct node_rx_pdu *node_rx_scan_data = NULL;
7118 	struct node_rx_pdu *node_rx_data = NULL;
7119 	const struct pdu_adv_adi *adi = NULL;
7120 	uint16_t scan_data_len_total = 0U;
7121 	struct node_rx_pdu *node_rx_curr;
7122 	struct node_rx_pdu *node_rx_next;
7123 	const uint8_t *scan_data = NULL;
7124 	uint8_t scan_data_status = 0U;
7125 	uint8_t direct_addr_type = 0U;
7126 	uint16_t data_len_total = 0U;
7127 	uint8_t *direct_addr = NULL;
7128 	uint16_t interval_le16 = 0U;
7129 	const uint8_t *data = NULL;
7130 	uint8_t scan_data_len = 0U;
7131 	uint8_t adv_addr_type = 0U;
7132 	uint8_t sec_phy_scan = 0U;
7133 	uint8_t *adv_addr = NULL;
7134 	uint8_t data_status = 0U;
7135 	struct net_buf *evt_buf;
7136 	bool devmatch = false;
7137 	uint8_t data_len = 0U;
7138 	uint8_t evt_type = 0U;
7139 	uint8_t sec_phy = 0U;
7140 	uint8_t data_len_max;
7141 	uint8_t rl_idx = 0U;
7142 	struct pdu_adv *adv;
7143 	int8_t rssi;
7144 
7145 	/* NOTE: This function uses a lot of initializers before the check and
7146 	 * return below, as an exception to initializing close to their locality
7147 	 * of reference. This is acceptable as the return is unlikely in typical
7148 	 * Controller use.
7149 	 */
7150 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7151 	    !(le_event_mask & BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT)) {
7152 		node_rx_extra_list_release(node_rx->rx_ftr.extra);
7153 		return;
7154 	}
7155 
7156 #if defined(CONFIG_BT_CTLR_PRIVACY)
7157 	rl_idx = ll_rl_size_get();
7158 #endif /* CONFIG_BT_CTLR_PRIVACY */
7159 
7160 	adv = (void *)pdu_data;
7161 	node_rx_curr = node_rx;
7162 	node_rx_next = node_rx_curr->rx_ftr.extra;
7163 	do {
7164 		int8_t tx_pwr_curr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7165 		struct pdu_adv_adi *adi_curr = NULL;
7166 		uint8_t direct_addr_type_curr = 0U;
7167 		bool direct_resolved_curr = false;
7168 		uint8_t *direct_addr_curr = NULL;
7169 		uint8_t adv_addr_type_curr = 0U;
7170 		struct pdu_adv_com_ext_adv *p;
7171 		uint8_t *adv_addr_curr = NULL;
7172 		uint8_t data_len_curr = 0U;
7173 		uint8_t *data_curr = NULL;
7174 		struct pdu_adv_ext_hdr *h;
7175 		uint8_t sec_phy_curr = 0U;
7176 		uint8_t evt_type_curr;
7177 		uint8_t hdr_buf_len;
7178 		uint8_t hdr_len;
7179 		uint8_t *ptr;
7180 
7181 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
7182 		bool direct_report_curr = node_rx_curr->rx_ftr.direct;
7183 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
7184 
7185 #if defined(CONFIG_BT_CTLR_PRIVACY)
7186 		uint8_t rl_idx_curr = node_rx_curr->rx_ftr.rl_idx;
7187 
7188 		direct_resolved_curr = node_rx_curr->rx_ftr.direct_resolved;
7189 #endif /* CONFIG_BT_CTLR_PRIVACY */
7190 
7191 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7192 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7193 		const bool devmatch_curr = node_rx_curr->rx_ftr.devmatch;
7194 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7195 
7196 		/* The Link Layer currently returns RSSI as an absolute value */
7197 		rssi = -(node_rx_curr->rx_ftr.rssi);
7198 
7199 		LOG_DBG("phy= 0x%x, type= 0x%x, len= %u, tat= %u, rat= %u,"
7200 		       " rssi=%d dB", phy, adv->type, adv->len, adv->tx_addr,
7201 		       adv->rx_addr, rssi);
7202 
7203 		p = (void *)&adv->adv_ext_ind;
7204 		h = (void *)p->ext_hdr_adv_data;
7205 		ptr = (void *)h;
7206 
7207 		LOG_DBG("    Ext. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
7208 
7209 		evt_type_curr = p->adv_mode;
7210 
7211 		if (!p->ext_hdr_len) {
7212 			hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
7213 
7214 			goto no_ext_hdr;
7215 		}
7216 
7217 		ptr = h->data;
7218 
7219 		if (h->adv_addr) {
7220 			/* AdvA is RFU in AUX_CHAIN_IND */
7221 			if (node_rx_curr == node_rx ||
7222 			    node_rx_curr == node_rx->rx_ftr.extra) {
7223 				bt_addr_le_t addr;
7224 
7225 				adv_addr_type_curr = adv->tx_addr;
7226 				adv_addr_curr = ptr;
7227 
7228 				addr.type = adv->tx_addr;
7229 				(void)memcpy(addr.a.val, ptr, sizeof(bt_addr_t));
7230 
7231 				LOG_DBG("    AdvA: %s", bt_addr_le_str(&addr));
7232 			}
7233 
7234 			ptr += BDADDR_SIZE;
7235 		}
7236 
7237 		if (h->tgt_addr) {
7238 			/* TargetA is RFU in AUX_CHAIN_IND */
7239 			if (node_rx_curr == node_rx ||
7240 			    node_rx_curr == node_rx->rx_ftr.extra) {
7241 				struct lll_scan *lll;
7242 				bt_addr_le_t addr;
7243 
7244 				lll = node_rx->rx_ftr.param;
7245 
7246 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
7247 				direct_addr_type_curr =
7248 					ext_adv_direct_addr_type(lll,
7249 								 direct_resolved_curr,
7250 								 direct_report_curr,
7251 								 adv->rx_addr, ptr);
7252 #else /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
7253 				direct_addr_type_curr =
7254 					ext_adv_direct_addr_type(lll,
7255 								 direct_resolved_curr,
7256 								 false, adv->rx_addr,
7257 								 ptr);
7258 #endif /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
7259 
7260 				direct_addr_curr = ptr;
7261 
7262 				addr.type = adv->rx_addr;
7263 				(void)memcpy(addr.a.val, direct_addr_curr,
7264 					     sizeof(bt_addr_t));
7265 
7266 				LOG_DBG("    TgtA: %s", bt_addr_le_str(&addr));
7267 			}
7268 
7269 			ptr += BDADDR_SIZE;
7270 		}
7271 
7272 		if (h->cte_info) {
7273 			/* CTEInfo is RFU */
7274 			ptr += 1;
7275 		}
7276 
7277 		if (h->adi) {
7278 			adi_curr = (void *)ptr;
7279 
7280 			ptr += sizeof(*adi);
7281 
7282 			LOG_DBG("    AdvDataInfo DID = 0x%x, SID = 0x%x",
7283 				PDU_ADV_ADI_DID_GET(adi_curr), PDU_ADV_ADI_SID_GET(adi_curr));
7284 		}
7285 
7286 		if (h->aux_ptr) {
7287 			struct pdu_adv_aux_ptr *aux_ptr;
7288 
7289 			/* AuxPtr is RFU for connectable or scannable AUX_ADV_IND */
7290 			if (node_rx_curr != node_rx->rx_ftr.extra ||
7291 			    evt_type_curr == 0U) {
7292 				uint8_t aux_phy;
7293 
7294 				aux_ptr = (void *)ptr;
7295 
7296 				/* Don't report if invalid phy or AUX_ADV_IND was not received
7297 				 * See BT Core 5.4, Vol 6, Part B, Section 4.4.3.5:
7298 				 * If the Controller does not listen for or does not receive the
7299 				 * AUX_ADV_IND PDU, no report shall be generated
7300 				 */
7301 				if ((node_rx_curr == node_rx && !node_rx_next) ||
7302 				    PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7303 					struct node_rx_ftr *ftr;
7304 
7305 					ftr = &node_rx->rx_ftr;
7306 					node_rx_extra_list_release(ftr->extra);
7307 					return;
7308 				}
7309 
7310 
7311 				sec_phy_curr = HCI_AUX_PHY_TO_HCI_PHY(
7312 					PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7313 
7314 				aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7315 
7316 				LOG_DBG("    AuxPtr chan_idx = %u, ca = %u, offs_units "
7317 				       "= %u offs = 0x%x, phy = 0x%x",
7318 				       aux_ptr->chan_idx, aux_ptr->ca,
7319 				       aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr),
7320 				       aux_phy);
7321 			}
7322 
7323 			ptr += sizeof(*aux_ptr);
7324 		}
7325 
7326 		if (h->sync_info) {
7327 			struct pdu_adv_sync_info *si;
7328 
7329 			si = (void *)ptr;
7330 			ptr += sizeof(*si);
7331 
7332 			interval_le16 = si->interval;
7333 
7334 			LOG_DBG("    SyncInfo offs = %u, offs_unit = 0x%x, "
7335 			       "interval = 0x%x, sca = 0x%x, "
7336 			       "chan map = 0x%x 0x%x 0x%x 0x%x 0x%x, "
7337 			       "AA = 0x%x%x%x%x, CRC = 0x%x 0x%x 0x%x, "
7338 			       "evt cntr = 0x%x",
7339 			       PDU_ADV_SYNC_INFO_OFFSET_GET(si),
7340 			       PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si),
7341 			       sys_le16_to_cpu(si->interval),
7342 			       ((si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
7343 				 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
7344 				PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS),
7345 			       si->sca_chm[0], si->sca_chm[1], si->sca_chm[2],
7346 			       si->sca_chm[3],
7347 			       (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
7348 				~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK),
7349 			       si->aa[3], si->aa[2], si->aa[1], si->aa[0],
7350 			       si->crc_init[0], si->crc_init[1],
7351 			       si->crc_init[2], sys_le16_to_cpu(si->evt_cntr));
7352 		}
7353 
7354 		if (h->tx_pwr) {
7355 			tx_pwr_curr = *(int8_t *)ptr;
7356 			ptr++;
7357 
7358 			LOG_DBG("    Tx pwr= %d dB", tx_pwr_curr);
7359 		}
7360 
7361 		hdr_len = ptr - (uint8_t *)p;
7362 		hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7363 		if (hdr_len > hdr_buf_len) {
7364 			LOG_WRN("    Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7365 		} else {
7366 			uint8_t acad_len = hdr_buf_len - hdr_len;
7367 
7368 			if (acad_len) {
7369 				ptr += acad_len;
7370 				hdr_len += acad_len;
7371 			}
7372 		}
7373 
7374 no_ext_hdr:
7375 		if (hdr_len < adv->len) {
7376 			data_len_curr = adv->len - hdr_len;
7377 			data_curr = ptr;
7378 
7379 			LOG_DBG("    AD Data (%u): <todo>", data_len);
7380 		}
7381 
7382 		if (data_len_total + data_len_curr > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
7383 			/* Truncating advertising data
7384 			 * Note that this has to be done at a PDU boundary, so stop
7385 			 * processing nodes from this one forward
7386 			 */
7387 			if (scan_data) {
7388 				scan_data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7389 			} else {
7390 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7391 			}
7392 			break;
7393 		}
7394 
7395 		if (node_rx_curr == node_rx) {
7396 			evt_type = evt_type_curr;
7397 			adv_addr_type = adv_addr_type_curr;
7398 			adv_addr = adv_addr_curr;
7399 			direct_addr_type = direct_addr_type_curr;
7400 			direct_addr = direct_addr_curr;
7401 			adi = adi_curr;
7402 			sec_phy = sec_phy_curr;
7403 			node_rx_data = node_rx_curr;
7404 			/* Adv data in ADV_EXT_IND is RFU */
7405 			data_len = 0U;
7406 			data_len_total = 0U;
7407 			data = NULL;
7408 			scan_data_len_total = 0U;
7409 			tx_pwr = tx_pwr_curr;
7410 
7411 #if defined(CONFIG_BT_CTLR_PRIVACY)
7412 			rl_idx = rl_idx_curr;
7413 #endif /* CONFIG_BT_CTLR_PRIVACY */
7414 
7415 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7416 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7417 			devmatch = devmatch_curr;
7418 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7419 
7420 		} else {
7421 			/* TODO: Validate current value with previous */
7422 
7423 			/* Detect the scan response in the list of node_rx */
7424 			if (node_rx_curr->rx_ftr.scan_rsp) {
7425 				node_rx_scan_data = node_rx_curr;
7426 				if (sec_phy_curr) {
7427 					sec_phy_scan = sec_phy_curr;
7428 				} else {
7429 					sec_phy_scan = sec_phy;
7430 				}
7431 				scan_data_len = data_len_curr;
7432 				scan_data = data_curr;
7433 				scan_rsp_tx_pwr = tx_pwr_curr;
7434 			}
7435 
7436 			if (!adv_addr) {
7437 				adv_addr_type = adv_addr_type_curr;
7438 				adv_addr = adv_addr_curr;
7439 			}
7440 
7441 			if (!direct_addr) {
7442 				direct_addr_type = direct_addr_type_curr;
7443 				direct_addr = direct_addr_curr;
7444 			}
7445 
7446 			if (scan_data) {
7447 				scan_data_len_total += data_len_curr;
7448 			} else if (!data) {
7449 				node_rx_data = node_rx_curr;
7450 				data_len = data_len_curr;
7451 				data_len_total = data_len;
7452 				data = data_curr;
7453 				tx_pwr = tx_pwr_curr;
7454 			} else {
7455 				data_len_total += data_len_curr;
7456 			}
7457 
7458 #if defined(CONFIG_BT_CTLR_PRIVACY)
7459 			if (rl_idx >= ll_rl_size_get()) {
7460 				rl_idx = rl_idx_curr;
7461 			}
7462 #endif /* CONFIG_BT_CTLR_PRIVACY */
7463 
7464 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC) && \
7465 	defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
7466 			if (!devmatch) {
7467 				devmatch = devmatch_curr;
7468 			}
7469 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC && CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
7470 		}
7471 
7472 		if (!node_rx_next) {
7473 			bool has_aux_ptr = !!sec_phy_curr;
7474 
7475 			if (scan_data) {
7476 				if (has_aux_ptr) {
7477 					scan_data_status =
7478 				  BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7479 				}
7480 			} else if (has_aux_ptr) {
7481 				data_status =
7482 				  BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7483 			}
7484 
7485 			break;
7486 		}
7487 
7488 		node_rx_curr = node_rx_next;
7489 		node_rx_next = node_rx_curr->rx_ftr.extra;
7490 		adv = (void *)node_rx_curr->pdu;
7491 	} while (1);
7492 
7493 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
7494 	    IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST) &&
7495 	    !devmatch) {
7496 		node_rx_extra_list_release(node_rx->rx_ftr.extra);
7497 		return;
7498 	}
7499 
7500 #if CONFIG_BT_CTLR_DUP_FILTER_LEN > 0
7501 	if (adv_addr) {
7502 		if (dup_scan &&
7503 		    dup_found(PDU_ADV_TYPE_EXT_IND, adv_addr_type, adv_addr,
7504 			      (evt_type & BIT_MASK(2)), adi, data_status)) {
7505 			node_rx_extra_list_release(node_rx->rx_ftr.extra);
7506 			return;
7507 		}
7508 	}
7509 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 */
7510 
7511 	/* If data incomplete */
7512 	if (data_status) {
7513 		/* Data incomplete and no more to come */
7514 		if (!(adv_addr ||
7515 		      (adi && ((tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) ||
7516 			       data)))) {
7517 			/* No device address and no valid AD data parsed or
7518 			 * Tx Power present for this PDU chain that has ADI,
7519 			 * skip HCI event generation.
7520 			 * In other terms, generate HCI event if device address
7521 			 * is present or if Tx pwr and/or data is present from
7522 			 * anonymous device.
7523 			 */
7524 			node_rx_extra_list_release(node_rx->rx_ftr.extra);
7525 			return;
7526 		}
7527 	}
7528 
7529 	/* Set directed advertising bit */
7530 	if (direct_addr) {
7531 		evt_type |= BT_HCI_LE_ADV_EVT_TYPE_DIRECT;
7532 	}
7533 
7534 	/* HCI fragment */
7535 	evt_buf = buf;
7536 	data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7537 		       sizeof(struct bt_hci_evt_le_meta_event) -
7538 		       sizeof(struct bt_hci_evt_le_ext_advertising_report) -
7539 		       sizeof(struct bt_hci_evt_le_ext_advertising_info);
7540 
7541 	/* If PDU data length less than total data length or PDU data length
7542 	 * greater than maximum HCI event data length, then fragment.
7543 	 */
7544 	if ((data_len < data_len_total) || (data_len > data_len_max)) {
7545 		ext_adv_data_frag(node_rx_data, evt_type, phy, &sec_phy,
7546 				  adv_addr_type, adv_addr, direct_addr_type,
7547 				  direct_addr, rl_idx, &tx_pwr, rssi,
7548 				  interval_le16, adi, data_len_max,
7549 				  data_len_total, &data_len, &data, buf,
7550 				  &evt_buf);
7551 	}
7552 
7553 	/* Set data status bits */
7554 	evt_type |= (data_status << 5);
7555 
7556 	/* Start constructing the adv event for remainder of the PDU data */
7557 	ext_adv_info_fill(evt_type, phy, sec_phy, adv_addr_type, adv_addr,
7558 			  direct_addr_type, direct_addr, rl_idx, tx_pwr, rssi,
7559 			  interval_le16, adi, data_len, data, evt_buf);
7560 
7561 	/* If scan response event to be constructed */
7562 	if (!scan_data) {
7563 		node_rx_extra_list_release(node_rx->rx_ftr.extra);
7564 
7565 		return;
7566 	}
7567 
7568 	/* Set scan response bit */
7569 	evt_type |= BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP;
7570 
7571 	/* Clear the data status bits */
7572 	evt_type &= ~(BIT_MASK(2) << 5);
7573 
7574 	/* Allocate, append as buf fragment and construct the scan response
7575 	 * event.
7576 	 */
7577 	evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7578 	LL_ASSERT(evt_buf);
7579 
7580 	net_buf_frag_add(buf, evt_buf);
7581 
7582 	/* If PDU data length less than total data length or PDU data length
7583 	 * greater than maximum HCI event data length, then fragment.
7584 	 */
7585 	if ((scan_data_len < scan_data_len_total) ||
7586 	    (scan_data_len > data_len_max)) {
7587 		ext_adv_data_frag(node_rx_scan_data, evt_type, phy,
7588 				  &sec_phy_scan, adv_addr_type, adv_addr,
7589 				  direct_addr_type, direct_addr, rl_idx,
7590 				  &scan_rsp_tx_pwr, rssi, interval_le16, adi,
7591 				  data_len_max, scan_data_len_total,
7592 				  &scan_data_len, &scan_data, buf, &evt_buf);
7593 	}
7594 
7595 	/* set scan data status bits */
7596 	evt_type |= (scan_data_status << 5);
7597 
7598 	/* Start constructing the event for remainder of the PDU data */
7599 	ext_adv_info_fill(evt_type, phy, sec_phy_scan, adv_addr_type, adv_addr,
7600 			  direct_addr_type, direct_addr, rl_idx,
7601 			  scan_rsp_tx_pwr, rssi, interval_le16, adi,
7602 			  scan_data_len, scan_data, evt_buf);
7603 
7604 	node_rx_extra_list_release(node_rx->rx_ftr.extra);
7605 }
7606 
7607 static void le_adv_ext_report(struct pdu_data *pdu_data,
7608 			      struct node_rx_pdu *node_rx,
7609 			      struct net_buf *buf, uint8_t phy)
7610 {
7611 	struct pdu_adv *adv = (void *)pdu_data;
7612 
7613 	if ((adv->type == PDU_ADV_TYPE_EXT_IND) && adv->len) {
7614 		le_ext_adv_report(pdu_data, node_rx, buf, phy);
7615 	} else {
7616 		le_ext_adv_legacy_report(pdu_data, node_rx, buf);
7617 	}
7618 }
7619 
7620 static void le_adv_ext_1M_report(struct pdu_data *pdu_data,
7621 				 struct node_rx_pdu *node_rx,
7622 				 struct net_buf *buf)
7623 {
7624 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_1M);
7625 }
7626 
7627 static void le_adv_ext_2M_report(struct pdu_data *pdu_data,
7628 				 struct node_rx_pdu *node_rx,
7629 				 struct net_buf *buf)
7630 {
7631 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_2M);
7632 }
7633 
7634 static void le_adv_ext_coded_report(struct pdu_data *pdu_data,
7635 				    struct node_rx_pdu *node_rx,
7636 				    struct net_buf *buf)
7637 {
7638 	le_adv_ext_report(pdu_data, node_rx, buf, BT_HCI_LE_EXT_SCAN_PHY_CODED);
7639 }
7640 
7641 static void le_scan_timeout(struct pdu_data *pdu_data,
7642 			    struct node_rx_pdu *node_rx, struct net_buf *buf)
7643 {
7644 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7645 	    !(le_event_mask & BT_EVT_MASK_LE_SCAN_TIMEOUT)) {
7646 		return;
7647 	}
7648 
7649 	meta_evt(buf, BT_HCI_EVT_LE_SCAN_TIMEOUT, 0U);
7650 }
7651 
7652 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
7653 static void le_per_adv_sync_established(struct pdu_data *pdu_data,
7654 					struct node_rx_pdu *node_rx,
7655 					struct net_buf *buf)
7656 {
7657 	struct bt_hci_evt_le_per_adv_sync_established *sep;
7658 	struct ll_sync_set *sync;
7659 	struct node_rx_sync *se;
7660 	void *node;
7661 
7662 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7663 	    !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED)) {
7664 		return;
7665 	}
7666 
7667 	sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
7668 		       sizeof(*sep));
7669 
7670 	/* Check for pdu field being aligned before accessing sync established
7671 	 * event.
7672 	 */
7673 	node = pdu_data;
7674 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync));
7675 
7676 	se = node;
7677 	sep->status = se->status;
7678 
7679 	if (se->status == BT_HCI_ERR_OP_CANCELLED_BY_HOST) {
7680 		return;
7681 	}
7682 
7683 	sync = node_rx->rx_ftr.param;
7684 
7685 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7686 	defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7687 	dup_periodic_adv_reset(sync->peer_id_addr_type, sync->peer_id_addr, sync->sid);
7688 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7689 	* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7690 	*/
7691 
7692 	sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7693 
7694 	/* Resolved address, if private, has been populated in ULL */
7695 	sep->adv_addr.type = sync->peer_id_addr_type;
7696 	if (sync->peer_addr_resolved) {
7697 		/* Mark it as identity address from RPA (0x02, 0x03) */
7698 		MARK_AS_IDENTITY_ADDR(sep->adv_addr.type);
7699 	}
7700 	(void)memcpy(sep->adv_addr.a.val, sync->peer_id_addr, BDADDR_SIZE);
7701 
7702 	sep->sid = sync->sid;
7703 	sep->phy = find_lsb_set(se->phy);
7704 	sep->interval = sys_cpu_to_le16(se->interval);
7705 	sep->clock_accuracy = se->sca;
7706 }
7707 
7708 static void le_per_adv_sync_report(struct pdu_data *pdu_data,
7709 				   struct node_rx_pdu *node_rx,
7710 				   struct net_buf *buf)
7711 {
7712 	struct node_rx_ftr *ftr = &node_rx->rx_ftr;
7713 	int8_t tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7714 	struct pdu_adv *adv = (void *)pdu_data;
7715 	struct pdu_adv_aux_ptr *aux_ptr = NULL;
7716 	const struct pdu_adv_adi *adi = NULL;
7717 	uint8_t cte_type = BT_HCI_LE_NO_CTE;
7718 	const struct ll_sync_set *sync;
7719 	struct pdu_adv_com_ext_adv *p;
7720 	struct pdu_adv_ext_hdr *h;
7721 	uint16_t data_len_total;
7722 	struct net_buf *evt_buf;
7723 	uint8_t data_len = 0U;
7724 	uint8_t acad_len = 0U;
7725 	uint8_t *data = NULL;
7726 	uint8_t data_len_max;
7727 	uint8_t *acad = NULL;
7728 	uint8_t hdr_buf_len;
7729 	uint8_t hdr_len;
7730 	uint8_t *ptr;
7731 	int8_t rssi;
7732 	bool accept;
7733 
7734 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
7735 	    (!(le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7736 	     !(le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT))) {
7737 		return;
7738 	}
7739 
7740 	/* NOTE: The timeout_reload field in the sync context is checked under
7741 	 *       race condition between HCI Tx and Rx thread wherein a sync
7742 	 *       terminate was performed which resets the timeout_reload field
7743 	 *       before releasing the sync context back into its memory pool.
7744 	 *       It is important that timeout_reload field is at safe offset
7745 	 *       inside the sync context such that it is not corrupt while being
7746 	 *       in the memory pool.
7747 	 *
7748 	 *       This check ensures reports are not sent out after sync
7749 	 *       terminate.
7750 	 */
7751 	sync = HDR_LLL2ULL(ftr->param);
7752 	if (unlikely(!sync->timeout_reload)) {
7753 		return;
7754 	}
7755 
7756 	data_len_total = ftr->aux_data_len;
7757 
7758 	if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) &&
7759 	    (ftr->aux_failed || data_len_total > CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7760 		struct bt_hci_evt_le_per_advertising_report *sep;
7761 
7762 		sep = meta_evt(buf,
7763 			       BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7764 			       sizeof(*sep));
7765 
7766 		sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7767 		sep->tx_power = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7768 		sep->rssi = BT_HCI_LE_RSSI_NOT_AVAILABLE;
7769 		sep->cte_type = BT_HCI_LE_NO_CTE;
7770 		sep->data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7771 		sep->length = 0;
7772 
7773 		return;
7774 	}
7775 
7776 	/* The Link Layer currently returns RSSI as an absolute value */
7777 	rssi = -(ftr->rssi);
7778 
7779 	LOG_DBG("len = %u, rssi = %d", adv->len, rssi);
7780 
7781 	p = (void *)&adv->adv_ext_ind;
7782 	h = (void *)p->ext_hdr_adv_data;
7783 	ptr = (void *)h;
7784 
7785 	LOG_DBG("    Per. adv mode= 0x%x, hdr len= %u", p->adv_mode, p->ext_hdr_len);
7786 
7787 	if (!p->ext_hdr_len) {
7788 		hdr_len = PDU_AC_EXT_HEADER_SIZE_MIN;
7789 
7790 		goto no_ext_hdr;
7791 	}
7792 
7793 	ptr = h->data;
7794 
7795 	if (h->adv_addr) {
7796 		ptr += BDADDR_SIZE;
7797 	}
7798 
7799 	if (h->tgt_addr) {
7800 		ptr += BDADDR_SIZE;
7801 	}
7802 
7803 	if (h->cte_info) {
7804 		struct pdu_cte_info *cte_info;
7805 
7806 		cte_info = (void *)ptr;
7807 		cte_type = cte_info->type;
7808 		ptr++;
7809 
7810 		LOG_DBG("    CTE type= %d", cte_type);
7811 	}
7812 
7813 	if (h->adi) {
7814 		adi = (void *)ptr;
7815 
7816 		ptr += sizeof(struct pdu_adv_adi);
7817 	}
7818 
7819 	/* AuxPtr */
7820 	if (h->aux_ptr) {
7821 		uint8_t aux_phy;
7822 
7823 		aux_ptr = (void *)ptr;
7824 		if (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) {
7825 			return;
7826 		}
7827 
7828 		ptr += sizeof(*aux_ptr);
7829 
7830 		aux_phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
7831 
7832 		LOG_DBG("    AuxPtr chan_idx = %u, ca = %u, offs_units "
7833 		       "= %u offs = 0x%x, phy = 0x%x",
7834 		       aux_ptr->chan_idx, aux_ptr->ca,
7835 		       aux_ptr->offs_units, PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr), aux_phy);
7836 	}
7837 
7838 	/* No SyncInfo */
7839 	if (h->sync_info) {
7840 		ptr += sizeof(struct pdu_adv_sync_info);
7841 	}
7842 
7843 	/* Tx Power */
7844 	if (h->tx_pwr) {
7845 		tx_pwr = *(int8_t *)ptr;
7846 		ptr++;
7847 
7848 		LOG_DBG("    Tx pwr= %d dB", tx_pwr);
7849 	}
7850 
7851 	hdr_len = ptr - (uint8_t *)p;
7852 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
7853 	if (hdr_len > hdr_buf_len) {
7854 		LOG_WRN("    Header length %u/%u, INVALID.", hdr_len, p->ext_hdr_len);
7855 	} else {
7856 		acad_len = hdr_buf_len - hdr_len;
7857 		if (acad_len) {
7858 			acad = ptr;
7859 
7860 			ptr += acad_len;
7861 			hdr_len += acad_len;
7862 		}
7863 	}
7864 
7865 no_ext_hdr:
7866 	if (hdr_len < adv->len) {
7867 		data_len = adv->len - hdr_len;
7868 		data = ptr;
7869 
7870 		LOG_DBG("    AD Data (%u): <todo>", data_len);
7871 	}
7872 
7873 	if (0) {
7874 
7875 #if (CONFIG_BT_CTLR_DUP_FILTER_LEN > 0) && \
7876 	defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
7877 	} else if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT) &&
7878 		   adi) {
7879 		uint8_t data_status;
7880 
7881 		data_status = (aux_ptr) ?
7882 			      BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL :
7883 			      BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7884 
7885 		accept = sync->rx_enable && ftr->sync_rx_enabled &&
7886 			 (!sync->nodups ||
7887 			  !dup_found(PDU_ADV_TYPE_EXT_IND,
7888 				     sync->peer_id_addr_type,
7889 				     sync->peer_id_addr,
7890 				     DUP_EXT_ADV_MODE_PERIODIC,
7891 				     adi, data_status));
7892 #endif /* CONFIG_BT_CTLR_DUP_FILTER_LEN > 0 &&
7893 	* CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
7894 	*/
7895 
7896 	} else {
7897 		accept = sync->rx_enable && ftr->sync_rx_enabled;
7898 	}
7899 
7900 	data_len_max = CONFIG_BT_BUF_EVT_RX_SIZE -
7901 		       sizeof(struct bt_hci_evt_le_meta_event) -
7902 		       sizeof(struct bt_hci_evt_le_per_advertising_report);
7903 
7904 	evt_buf = buf;
7905 
7906 	if ((le_event_mask & BT_EVT_MASK_LE_PER_ADVERTISING_REPORT) && accept) {
7907 
7908 		/* Pass verdict in LL.TS.p19 section 4.2.3.6 Extended Scanning,
7909 		 * Passive, Periodic Advertising Report, RSSI and TX_Power
7910 		 * states:
7911 		 * TX_Power is set to value of the TxPower field for the
7912 		 * AUX_SYNC_IND received, and RSSI set to a valid value.
7913 		 * Subsequent reports with data and the status set to
7914 		 * "Incomplete, more data to come" or "complete" can have the
7915 		 * TX_Power field set to 0x7F.
7916 		 *
7917 		 * In the implementation data_len_total is the running total
7918 		 * AD data length so far, data_len is the current PDU's AD data
7919 		 * length. For AUX_SYNC_IND received, data_len_total ==
7920 		 * data_len.
7921 		 */
7922 		if (data_len_total > data_len) {
7923 			/* Subsequent reports */
7924 			tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7925 		}
7926 
7927 		do {
7928 			struct bt_hci_evt_le_per_advertising_report *sep;
7929 			uint8_t data_len_frag;
7930 			uint8_t data_status;
7931 
7932 			data_len_frag = MIN(data_len, data_len_max);
7933 
7934 			/* Start constructing periodic advertising report */
7935 			sep = meta_evt(evt_buf,
7936 				       BT_HCI_EVT_LE_PER_ADVERTISING_REPORT,
7937 				       sizeof(*sep) + data_len_frag);
7938 
7939 			sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
7940 			sep->tx_power = tx_pwr;
7941 			sep->rssi = rssi;
7942 			sep->cte_type = cte_type;
7943 			sep->length = data_len_frag;
7944 			memcpy(&sep->data[0], data, data_len_frag);
7945 
7946 			data += data_len_frag;
7947 			data_len -= data_len_frag;
7948 
7949 			if (data_len > 0) {
7950 				/* Some data left in PDU, mark as partial data. */
7951 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7952 
7953 				evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
7954 				LL_ASSERT(evt_buf);
7955 
7956 				net_buf_frag_add(buf, evt_buf);
7957 
7958 				tx_pwr = BT_HCI_LE_ADV_TX_POWER_NO_PREF;
7959 			} else if (!aux_ptr) {
7960 				/* No data left, no AuxPtr, mark as complete data. */
7961 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
7962 			} else if (ftr->aux_sched &&
7963 				   (data_len_total < CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX)) {
7964 				/* No data left, but have AuxPtr and scheduled aux scan,
7965 				 * mark as partial data.
7966 				 */
7967 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
7968 			} else {
7969 				/* No data left, have AuxPtr but not aux scan scheduled,
7970 				 * mark as incomplete data.
7971 				 */
7972 				data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE;
7973 			}
7974 
7975 			sep->data_status = data_status;
7976 		} while (data_len > 0);
7977 
7978 		evt_buf = NULL;
7979 	}
7980 
7981 	if ((le_event_mask & BT_EVT_MASK_LE_BIGINFO_ADV_REPORT) && acad &&
7982 	    (acad_len >= (PDU_BIG_INFO_CLEARTEXT_SIZE +
7983 			  PDU_ADV_DATA_HEADER_SIZE))) {
7984 		struct bt_hci_evt_le_biginfo_adv_report *sep;
7985 		struct pdu_big_info *bi;
7986 		uint8_t bi_size;
7987 		uint8_t phy;
7988 
7989 		/* FIXME: Parse and find the BIGInfo */
7990 		if (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] != BT_DATA_BIG_INFO) {
7991 			return;
7992 		}
7993 
7994 		bi_size = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
7995 		bi = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
7996 
7997 		/* Do not report if phy is invalid or unsupported */
7998 		phy = (bi->chm_phy[4] >> 5);
7999 		if ((phy > EXT_ADV_AUX_PHY_LE_CODED) ||
8000 			(!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
8001 			 (phy == EXT_ADV_AUX_PHY_LE_CODED))) {
8002 			return;
8003 		}
8004 
8005 		/* Allocate new event buffer if periodic advertising report was
8006 		 * constructed with the caller supplied buffer.
8007 		 */
8008 		if (!evt_buf) {
8009 			evt_buf = bt_buf_get_rx(BT_BUF_EVT, BUF_GET_TIMEOUT);
8010 			LL_ASSERT(evt_buf);
8011 
8012 			net_buf_frag_add(buf, evt_buf);
8013 		}
8014 
8015 		/* Start constructing BIGInfo  advertising report */
8016 		sep = meta_evt(evt_buf, BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
8017 			       sizeof(*sep));
8018 
8019 		sep->sync_handle = sys_cpu_to_le16(node_rx->hdr.handle);
8020 
8021 		/* NOTE: both sep and bi struct store little-endian values.
8022 		 *       Multi-byte variables extracted using
8023 		 *       PDU_BIG_INFO_ISO_*_GET macros, which return
8024 		 *       value in host-endianness, require conversion.
8025 		 */
8026 		sep->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
8027 		sep->nse = PDU_BIG_INFO_NSE_GET(bi);
8028 		sep->iso_interval =
8029 			sys_cpu_to_le16(PDU_BIG_INFO_ISO_INTERVAL_GET(bi));
8030 		sep->bn = PDU_BIG_INFO_BN_GET(bi);
8031 		sep->pto = PDU_BIG_INFO_PTO_GET(bi);
8032 		sep->irc = PDU_BIG_INFO_IRC_GET(bi);
8033 
8034 		sep->max_pdu = sys_cpu_to_le16(bi->max_pdu);
8035 		sys_put_le24(PDU_BIG_INFO_SDU_INTERVAL_GET(bi),
8036 			sep->sdu_interval);
8037 		sep->max_sdu = sys_cpu_to_le16(PDU_BIG_INFO_MAX_SDU_GET(bi));
8038 		sep->phy = HCI_AUX_PHY_TO_HCI_PHY(bi->chm_phy[4] >> 5);
8039 		sep->framing = (bi->payload_count_framing[4] >> 7) & 0x01;
8040 		if (bi_size == (PDU_BIG_INFO_ENCRYPTED_SIZE + 1)) {
8041 			sep->encryption = 1U;
8042 		} else {
8043 			sep->encryption = 0U;
8044 		}
8045 	}
8046 }
8047 
8048 static void le_per_adv_sync_lost(struct pdu_data *pdu_data,
8049 				 struct node_rx_pdu *node_rx,
8050 				 struct net_buf *buf)
8051 {
8052 	struct bt_hci_evt_le_per_adv_sync_lost *sep;
8053 
8054 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8055 	    !(le_event_mask & BT_EVT_MASK_LE_PER_ADV_SYNC_LOST)) {
8056 		return;
8057 	}
8058 
8059 	sep = meta_evt(buf, BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, sizeof(*sep));
8060 	sep->handle = sys_cpu_to_le16(node_rx->hdr.handle);
8061 }
8062 
8063 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8064 static void le_big_sync_established(struct pdu_data *pdu,
8065 				    struct node_rx_pdu *node_rx,
8066 				    struct net_buf *buf)
8067 {
8068 	struct bt_hci_evt_le_big_sync_established *sep;
8069 	struct ll_sync_iso_set *sync_iso;
8070 	uint32_t transport_latency_big;
8071 	struct node_rx_sync_iso *se;
8072 	struct lll_sync_iso *lll;
8073 	uint32_t iso_interval_us;
8074 	uint32_t big_sync_delay;
8075 	size_t evt_size;
8076 	void *node;
8077 
8078 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8079 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED)) {
8080 		return;
8081 	}
8082 
8083 	sync_iso = node_rx->rx_ftr.param;
8084 	lll = &sync_iso->lll;
8085 
8086 	evt_size = sizeof(*sep) + (lll->stream_count * sizeof(uint16_t));
8087 
8088 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED, evt_size);
8089 	sep->big_handle = (uint8_t)node_rx->hdr.handle;
8090 
8091 	/* Check for pdu field being aligned before accessing ISO sync
8092 	 * established event.
8093 	 */
8094 	node = pdu;
8095 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_sync_iso));
8096 
8097 	se = node;
8098 	sep->status = se->status;
8099 	if (sep->status) {
8100 		return;
8101 	}
8102 
8103 	/* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
8104 	 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing + (NSE – 1) × Sub_Interval + MPT.
8105 	 *
8106 	 * BT Core v5.4 - Vol 6, Part G, Section 3.2.1: (Framed)
8107 	 * Transport_Latenct_BIG = BIG_Sync_Delay + PTO × (NSE / BN – IRC) * ISO_Interval +
8108 	 *                             ISO_Interval + SDU_Interval
8109 	 *
8110 	 * BT Core v5.4 - Vol 6, Part G, Section 3.2.2: (Unframed)
8111 	 * Transport_Latenct_BIG = BIG_Sync_Delay + (PTO × (NSE / BN – IRC) + 1) * ISO_Interval -
8112 	 *                             SDU_Interval
8113 	 */
8114 	iso_interval_us = lll->iso_interval * ISO_INT_UNIT_US;
8115 	big_sync_delay = ull_iso_big_sync_delay(lll->num_bis, lll->bis_spacing, lll->nse,
8116 						lll->sub_interval, lll->phy, lll->max_pdu,
8117 						lll->enc);
8118 	if (lll->framing) {
8119 		/* Framed */
8120 		transport_latency_big = big_sync_delay +
8121 					lll->pto * (lll->nse / lll->bn - lll->irc) *
8122 					iso_interval_us + iso_interval_us + lll->sdu_interval;
8123 	} else {
8124 		/* Unframed */
8125 		transport_latency_big = big_sync_delay +
8126 					(lll->pto * (lll->nse / lll->bn - lll->irc) + 1) *
8127 					iso_interval_us - lll->sdu_interval;
8128 	}
8129 
8130 	sys_put_le24(transport_latency_big, sep->latency);
8131 	sep->nse = lll->nse;
8132 	sep->bn = lll->bn;
8133 	sep->pto = lll->pto;
8134 	sep->irc = lll->irc;
8135 	sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
8136 	sep->iso_interval = sys_cpu_to_le16(lll->iso_interval);
8137 	sep->num_bis = lll->stream_count;
8138 
8139 	/* Connection handle list of all BISes synchronized in the BIG */
8140 	for (uint8_t i = 0U; i < lll->stream_count; i++) {
8141 		uint16_t handle;
8142 
8143 		handle = LL_BIS_SYNC_HANDLE_FROM_IDX(lll->stream_handle[i]);
8144 		sep->handle[i] = sys_cpu_to_le16(handle);
8145 	}
8146 }
8147 
8148 static void le_big_sync_lost(struct pdu_data *pdu,
8149 			     struct node_rx_pdu *node_rx,
8150 			     struct net_buf *buf)
8151 {
8152 	struct bt_hci_evt_le_big_sync_lost *sep;
8153 
8154 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8155 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_SYNC_LOST)) {
8156 		return;
8157 	}
8158 
8159 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_SYNC_LOST, sizeof(*sep));
8160 	sep->big_handle = (uint8_t)node_rx->hdr.handle;
8161 	sep->reason = *((uint8_t *)pdu);
8162 }
8163 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8164 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8165 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8166 #endif /* CONFIG_BT_OBSERVER */
8167 
8168 #if defined(CONFIG_BT_BROADCASTER)
8169 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8170 static void le_adv_ext_terminate(struct pdu_data *pdu_data,
8171 				    struct node_rx_pdu *node_rx,
8172 				    struct net_buf *buf)
8173 {
8174 	struct bt_hci_evt_le_adv_set_terminated *sep;
8175 
8176 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8177 	    !(le_event_mask & BT_EVT_MASK_LE_ADV_SET_TERMINATED)) {
8178 		return;
8179 	}
8180 
8181 	sep = meta_evt(buf, BT_HCI_EVT_LE_ADV_SET_TERMINATED, sizeof(*sep));
8182 	sep->status = node_rx->rx_ftr.param_adv_term.status;
8183 	sep->adv_handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
8184 	sep->conn_handle =
8185 		sys_cpu_to_le16(node_rx->rx_ftr.param_adv_term.conn_handle);
8186 	sep->num_completed_ext_adv_evts =
8187 		node_rx->rx_ftr.param_adv_term.num_events;
8188 }
8189 
8190 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8191 static void le_big_complete(struct pdu_data *pdu_data,
8192 			    struct node_rx_pdu *node_rx,
8193 			    struct net_buf *buf)
8194 {
8195 	struct bt_hci_evt_le_big_complete *sep;
8196 	uint32_t transport_latency_big;
8197 	struct ll_adv_iso_set *adv_iso;
8198 	uint32_t iso_interval_us;
8199 	struct lll_adv_iso *lll;
8200 	uint32_t big_sync_delay;
8201 	size_t evt_size;
8202 
8203 	adv_iso = node_rx->rx_ftr.param;
8204 	lll = &adv_iso->lll;
8205 
8206 	evt_size = sizeof(*sep) + (lll->num_bis * sizeof(uint16_t));
8207 
8208 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_COMPLETE, evt_size);
8209 
8210 	sep->status = BT_HCI_ERR_SUCCESS;
8211 	sep->big_handle = (uint8_t)node_rx->hdr.handle;
8212 
8213 	if (sep->status) {
8214 		return;
8215 	}
8216 
8217 	/* BT Core v5.4 - Vol 6, Part B, Section 4.4.6.4:
8218 	 * BIG_Sync_Delay = (Num_BIS – 1) × BIS_Spacing + (NSE – 1) × Sub_Interval + MPT.
8219 	 *
8220 	 * BT Core v5.4 - Vol 6, Part G, Section 3.2.1: (Framed)
8221 	 * Transport_Latenct_BIG = BIG_Sync_Delay + PTO × (NSE / BN – IRC) * ISO_Interval +
8222 	 *                             ISO_Interval + SDU_Interval
8223 	 *
8224 	 * BT Core v5.4 - Vol 6, Part G, Section 3.2.2: (Unframed)
8225 	 * Transport_Latenct_BIG = BIG_Sync_Delay + (PTO × (NSE / BN – IRC) + 1) * ISO_Interval -
8226 	 *                             SDU_Interval
8227 	 */
8228 	iso_interval_us = lll->iso_interval * ISO_INT_UNIT_US;
8229 	big_sync_delay = ull_iso_big_sync_delay(lll->num_bis, lll->bis_spacing, lll->nse,
8230 						lll->sub_interval, lll->phy, lll->max_pdu,
8231 						lll->enc);
8232 	sys_put_le24(big_sync_delay, sep->sync_delay);
8233 
8234 	if (lll->framing) {
8235 		/* Framed */
8236 		transport_latency_big = big_sync_delay +
8237 					lll->pto * (lll->nse / lll->bn - lll->irc) *
8238 					iso_interval_us + iso_interval_us + lll->sdu_interval;
8239 	} else {
8240 		/* Unframed */
8241 		transport_latency_big = big_sync_delay +
8242 					(lll->pto * (lll->nse / lll->bn - lll->irc) + 1) *
8243 					iso_interval_us - lll->sdu_interval;
8244 	}
8245 
8246 	sys_put_le24(transport_latency_big, sep->latency);
8247 
8248 	sep->phy = find_lsb_set(lll->phy);
8249 	sep->nse = lll->nse;
8250 	sep->bn = lll->bn;
8251 	sep->pto = lll->pto;
8252 	sep->irc = lll->irc;
8253 	sep->max_pdu = sys_cpu_to_le16(lll->max_pdu);
8254 	sep->iso_interval = sys_cpu_to_le16(lll->iso_interval);
8255 	sep->num_bis = lll->num_bis;
8256 
8257 	/* Connection handle list of all BISes in the BIG */
8258 	for (uint8_t i = 0U; i < lll->num_bis; i++) {
8259 		uint16_t handle;
8260 
8261 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(lll->stream_handle[i]);
8262 		sep->handle[i] = sys_cpu_to_le16(handle);
8263 	}
8264 }
8265 
8266 static void le_big_terminate(struct pdu_data *pdu,
8267 			     struct node_rx_pdu *node_rx,
8268 			     struct net_buf *buf)
8269 {
8270 	struct bt_hci_evt_le_big_terminate *sep;
8271 
8272 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8273 	    !(le_event_mask & BT_EVT_MASK_LE_BIG_TERMINATED)) {
8274 		return;
8275 	}
8276 
8277 	sep = meta_evt(buf, BT_HCI_EVT_LE_BIG_TERMINATE, sizeof(*sep));
8278 	sep->big_handle = (uint8_t)node_rx->hdr.handle;
8279 	sep->reason = *((uint8_t *)pdu);
8280 }
8281 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8282 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8283 #endif /* CONFIG_BT_BROADCASTER */
8284 
8285 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8286 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8287 static void le_scan_req_received(struct pdu_data *pdu_data,
8288 				 struct node_rx_pdu *node_rx,
8289 				 struct net_buf *buf)
8290 {
8291 	struct pdu_adv *adv = (void *)pdu_data;
8292 	struct bt_hci_evt_le_scan_req_received *sep;
8293 
8294 #if defined(CONFIG_BT_CTLR_PRIVACY)
8295 	uint8_t rl_idx;
8296 #endif
8297 
8298 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8299 	    !(le_event_mask & BT_EVT_MASK_LE_SCAN_REQ_RECEIVED)) {
8300 		bt_addr_le_t addr;
8301 		uint8_t handle;
8302 		int8_t rssi;
8303 
8304 		handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
8305 		addr.type = adv->tx_addr;
8306 		memcpy(&addr.a.val[0], &adv->scan_req.scan_addr[0],
8307 		       sizeof(bt_addr_t));
8308 
8309 		/* The Link Layer currently returns RSSI as an absolute value */
8310 		rssi = -(node_rx->rx_ftr.rssi);
8311 
8312 		LOG_DBG("handle: %d, addr: %s, rssi: %d dB.", handle, bt_addr_le_str(&addr), rssi);
8313 
8314 		return;
8315 	}
8316 
8317 	sep = meta_evt(buf, BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, sizeof(*sep));
8318 	sep->handle = ll_adv_set_hci_handle_get(node_rx->hdr.handle & 0xff);
8319 	sep->addr.type = adv->tx_addr;
8320 	memcpy(&sep->addr.a.val[0], &adv->scan_req.scan_addr[0],
8321 	       sizeof(bt_addr_t));
8322 
8323 #if defined(CONFIG_BT_CTLR_PRIVACY)
8324 	rl_idx = node_rx->rx_ftr.rl_idx;
8325 	if (rl_idx < ll_rl_size_get()) {
8326 		/* Store identity address */
8327 		ll_rl_id_addr_get(rl_idx, &sep->addr.type,
8328 				  &sep->addr.a.val[0]);
8329 		/* Mark it as identity address from RPA (0x02, 0x03) */
8330 		MARK_AS_IDENTITY_ADDR(sep->addr.type);
8331 	} else {
8332 #else
8333 	if (1) {
8334 #endif
8335 		sep->addr.type = adv->tx_addr;
8336 		memcpy(&sep->addr.a.val[0], &adv->adv_ind.addr[0],
8337 		       sizeof(bt_addr_t));
8338 	}
8339 }
8340 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8341 
8342 #if defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
8343 static void le_vs_scan_req_received(struct pdu_data *pdu,
8344 				    struct node_rx_pdu *node_rx,
8345 				    struct net_buf *buf)
8346 {
8347 	struct pdu_adv *adv = (void *)pdu;
8348 	struct bt_hci_evt_vs_scan_req_rx *sep;
8349 
8350 #if defined(CONFIG_BT_CTLR_PRIVACY)
8351 	uint8_t rl_idx;
8352 #endif
8353 
8354 	if (!(vs_events_mask & BT_EVT_MASK_VS_SCAN_REQ_RX)) {
8355 		return;
8356 	}
8357 
8358 	sep = vs_event(buf, BT_HCI_EVT_VS_SCAN_REQ_RX, sizeof(*sep));
8359 	sep->addr.type = adv->tx_addr;
8360 	memcpy(&sep->addr.a.val[0], &adv->scan_req.scan_addr[0],
8361 	       sizeof(bt_addr_t));
8362 
8363 #if defined(CONFIG_BT_CTLR_PRIVACY)
8364 	rl_idx = node_rx->rx_ftr.rl_idx;
8365 	if (rl_idx < ll_rl_size_get()) {
8366 		/* Store identity address */
8367 		ll_rl_id_addr_get(rl_idx, &sep->addr.type,
8368 				  &sep->addr.a.val[0]);
8369 		/* Mark it as identity address from RPA (0x02, 0x03) */
8370 		MARK_AS_IDENTITY_ADDR(sep->addr.type);
8371 	} else {
8372 #else
8373 	if (1) {
8374 #endif
8375 		sep->addr.type = adv->tx_addr;
8376 		memcpy(&sep->addr.a.val[0], &adv->adv_ind.addr[0],
8377 		       sizeof(bt_addr_t));
8378 	}
8379 
8380 	/* The Link Layer currently returns RSSI as an absolute value */
8381 	sep->rssi = -(node_rx->rx_ftr.rssi);
8382 }
8383 #endif /* CONFIG_BT_CTLR_VS_SCAN_REQ_RX */
8384 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8385 
8386 #if defined(CONFIG_BT_CONN)
8387 static void le_conn_complete(struct pdu_data *pdu_data, uint16_t handle,
8388 			     struct net_buf *buf)
8389 {
8390 	struct bt_hci_evt_le_conn_complete *lecc;
8391 	struct node_rx_cc *cc;
8392 	uint8_t status;
8393 	void *node;
8394 
8395 	/* Check for pdu field being aligned before accessing connection
8396 	 * complete event.
8397 	 */
8398 	node = pdu_data;
8399 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cc));
8400 
8401 	cc = node;
8402 	status = cc->status;
8403 
8404 #if defined(CONFIG_BT_CTLR_PRIVACY)
8405 	if (!status) {
8406 		/* Update current RPA */
8407 		ll_rl_crpa_set(cc->peer_addr_type,
8408 			       &cc->peer_addr[0], 0xff,
8409 			       &cc->peer_rpa[0]);
8410 	}
8411 #endif
8412 
8413 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8414 	    (!(le_event_mask & BT_EVT_MASK_LE_CONN_COMPLETE) &&
8415 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
8416 	     !(le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE))) {
8417 #else
8418 	     1)) {
8419 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
8420 		return;
8421 	}
8422 
8423 	if (!status) {
8424 		conn_count++;
8425 	}
8426 
8427 #if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_ADV_EXT)
8428 	if (le_event_mask & BT_EVT_MASK_LE_ENH_CONN_COMPLETE) {
8429 		struct bt_hci_evt_le_enh_conn_complete *leecc;
8430 
8431 		leecc = meta_evt(buf, BT_HCI_EVT_LE_ENH_CONN_COMPLETE,
8432 				 sizeof(*leecc));
8433 
8434 		if (status) {
8435 			(void)memset(leecc, 0x00, sizeof(*leecc));
8436 			leecc->status = status;
8437 			return;
8438 		}
8439 
8440 		leecc->status = 0x00;
8441 		leecc->handle = sys_cpu_to_le16(handle);
8442 		leecc->role = cc->role;
8443 
8444 		leecc->peer_addr.type = cc->peer_addr_type;
8445 		memcpy(&leecc->peer_addr.a.val[0], &cc->peer_addr[0],
8446 		       BDADDR_SIZE);
8447 
8448 #if defined(CONFIG_BT_CTLR_PRIVACY)
8449 		memcpy(&leecc->local_rpa.val[0], &cc->local_rpa[0],
8450 		       BDADDR_SIZE);
8451 		memcpy(&leecc->peer_rpa.val[0], &cc->peer_rpa[0],
8452 		       BDADDR_SIZE);
8453 #else /* !CONFIG_BT_CTLR_PRIVACY */
8454 		memset(&leecc->local_rpa.val[0], 0, BDADDR_SIZE);
8455 		memset(&leecc->peer_rpa.val[0], 0, BDADDR_SIZE);
8456 #endif /* !CONFIG_BT_CTLR_PRIVACY */
8457 
8458 		leecc->interval = sys_cpu_to_le16(cc->interval);
8459 		leecc->latency = sys_cpu_to_le16(cc->latency);
8460 		leecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
8461 		leecc->clock_accuracy = cc->sca;
8462 		return;
8463 	}
8464 #endif /* CONFIG_BT_CTLR_PRIVACY || CONFIG_BT_CTLR_ADV_EXT */
8465 
8466 	lecc = meta_evt(buf, BT_HCI_EVT_LE_CONN_COMPLETE, sizeof(*lecc));
8467 
8468 	if (status) {
8469 		(void)memset(lecc, 0x00, sizeof(*lecc));
8470 		lecc->status = status;
8471 		return;
8472 	}
8473 
8474 	lecc->status = 0x00;
8475 	lecc->handle = sys_cpu_to_le16(handle);
8476 	lecc->role = cc->role;
8477 	lecc->peer_addr.type = cc->peer_addr_type & 0x1;
8478 	memcpy(&lecc->peer_addr.a.val[0], &cc->peer_addr[0], BDADDR_SIZE);
8479 	lecc->interval = sys_cpu_to_le16(cc->interval);
8480 	lecc->latency = sys_cpu_to_le16(cc->latency);
8481 	lecc->supv_timeout = sys_cpu_to_le16(cc->timeout);
8482 	lecc->clock_accuracy = cc->sca;
8483 }
8484 
8485 void hci_disconn_complete_encode(struct pdu_data *pdu_data, uint16_t handle,
8486 				 struct net_buf *buf)
8487 {
8488 	struct bt_hci_evt_disconn_complete *ep;
8489 
8490 	if (!(event_mask & BT_EVT_MASK_DISCONN_COMPLETE)) {
8491 		return;
8492 	}
8493 
8494 	hci_evt_create(buf, BT_HCI_EVT_DISCONN_COMPLETE, sizeof(*ep));
8495 	ep = net_buf_add(buf, sizeof(*ep));
8496 
8497 	ep->status = 0x00;
8498 	ep->handle = sys_cpu_to_le16(handle);
8499 	ep->reason = *((uint8_t *)pdu_data);
8500 }
8501 
8502 void hci_disconn_complete_process(uint16_t handle)
8503 {
8504 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
8505 	/* Clear any pending packets upon disconnection */
8506 	/* Note: This requires linear handle values starting from 0 */
8507 	if (handle >= ARRAY_SIZE(hci_hbuf_pend)) {
8508 		return;
8509 	}
8510 
8511 	hci_hbuf_acked += hci_hbuf_pend[handle];
8512 	hci_hbuf_pend[handle] = 0U;
8513 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
8514 
8515 	conn_count--;
8516 }
8517 
8518 static void le_conn_update_complete(struct pdu_data *pdu_data, uint16_t handle,
8519 				    struct net_buf *buf)
8520 {
8521 	struct bt_hci_evt_le_conn_update_complete *sep;
8522 	struct node_rx_cu *cu;
8523 	void *node;
8524 
8525 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8526 	    !(le_event_mask & BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE)) {
8527 		return;
8528 	}
8529 
8530 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE, sizeof(*sep));
8531 
8532 	/* Check for pdu field being aligned before accessing connection
8533 	 * update complete event.
8534 	 */
8535 	node = pdu_data;
8536 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cu));
8537 
8538 	cu = node;
8539 	sep->status = cu->status;
8540 	sep->handle = sys_cpu_to_le16(handle);
8541 	sep->interval = sys_cpu_to_le16(cu->interval);
8542 	sep->latency = sys_cpu_to_le16(cu->latency);
8543 	sep->supv_timeout = sys_cpu_to_le16(cu->timeout);
8544 }
8545 
8546 #if defined(CONFIG_BT_CTLR_LE_ENC)
8547 static void enc_refresh_complete(struct pdu_data *pdu_data, uint16_t handle,
8548 				 struct net_buf *buf)
8549 {
8550 	struct bt_hci_evt_encrypt_key_refresh_complete *ep;
8551 
8552 	if (!(event_mask & BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE)) {
8553 		return;
8554 	}
8555 
8556 	hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
8557 		       sizeof(*ep));
8558 	ep = net_buf_add(buf, sizeof(*ep));
8559 
8560 	ep->status = 0x00;
8561 	ep->handle = sys_cpu_to_le16(handle);
8562 }
8563 #endif /* CONFIG_BT_CTLR_LE_ENC */
8564 
8565 #if defined(CONFIG_BT_CTLR_LE_PING)
8566 static void auth_payload_timeout_exp(struct pdu_data *pdu_data, uint16_t handle,
8567 				     struct net_buf *buf)
8568 {
8569 	struct bt_hci_evt_auth_payload_timeout_exp *ep;
8570 
8571 	if (!(event_mask_page_2 & BT_EVT_MASK_AUTH_PAYLOAD_TIMEOUT_EXP)) {
8572 		return;
8573 	}
8574 
8575 	hci_evt_create(buf, BT_HCI_EVT_AUTH_PAYLOAD_TIMEOUT_EXP, sizeof(*ep));
8576 	ep = net_buf_add(buf, sizeof(*ep));
8577 
8578 	ep->handle = sys_cpu_to_le16(handle);
8579 }
8580 #endif /* CONFIG_BT_CTLR_LE_PING */
8581 
8582 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8583 static void le_chan_sel_algo(struct pdu_data *pdu_data, uint16_t handle,
8584 			     struct net_buf *buf)
8585 {
8586 	struct bt_hci_evt_le_chan_sel_algo *sep;
8587 	struct node_rx_cs *cs;
8588 
8589 	cs = (void *)pdu_data;
8590 
8591 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8592 	    !(le_event_mask & BT_EVT_MASK_LE_CHAN_SEL_ALGO)) {
8593 		LOG_DBG("handle: 0x%04x, CSA: %x.", handle, cs->csa);
8594 		return;
8595 	}
8596 
8597 	sep = meta_evt(buf, BT_HCI_EVT_LE_CHAN_SEL_ALGO, sizeof(*sep));
8598 
8599 	sep->handle = sys_cpu_to_le16(handle);
8600 	sep->chan_sel_algo = cs->csa;
8601 }
8602 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8603 
8604 #if defined(CONFIG_BT_CTLR_PHY)
8605 static void le_phy_upd_complete(struct pdu_data *pdu_data, uint16_t handle,
8606 				struct net_buf *buf)
8607 {
8608 	struct bt_hci_evt_le_phy_update_complete *sep;
8609 	struct node_rx_pu *pu;
8610 
8611 	pu = (void *)pdu_data;
8612 
8613 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8614 	    !(le_event_mask & BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE)) {
8615 		LOG_WRN("handle: 0x%04x, status: %x, tx: %x, rx: %x.", handle, pu->status,
8616 			find_lsb_set(pu->tx), find_lsb_set(pu->rx));
8617 		return;
8618 	}
8619 
8620 	sep = meta_evt(buf, BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE, sizeof(*sep));
8621 
8622 	sep->status = pu->status;
8623 	sep->handle = sys_cpu_to_le16(handle);
8624 	sep->tx_phy = find_lsb_set(pu->tx);
8625 	sep->rx_phy = find_lsb_set(pu->rx);
8626 }
8627 #endif /* CONFIG_BT_CTLR_PHY */
8628 
8629 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8630 static void le_req_peer_sca_complete(struct pdu_data *pdu, uint16_t handle,
8631 				struct net_buf *buf)
8632 {
8633 	struct bt_hci_evt_le_req_peer_sca_complete *sep;
8634 	struct node_rx_sca *scau;
8635 
8636 	scau = (void *)pdu;
8637 
8638 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8639 	    !(le_event_mask & BT_EVT_MASK_LE_REQ_PEER_SCA_COMPLETE)) {
8640 		LOG_WRN("handle: 0x%04x, status: %x, sca: %x.", handle,
8641 			scau->status,
8642 			scau->sca);
8643 		return;
8644 	}
8645 
8646 	sep = meta_evt(buf, BT_HCI_EVT_LE_REQ_PEER_SCA_COMPLETE, sizeof(*sep));
8647 
8648 	sep->status = scau->status;
8649 	sep->handle = sys_cpu_to_le16(handle);
8650 	sep->sca = scau->sca;
8651 }
8652 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8653 #endif /* CONFIG_BT_CONN */
8654 
8655 #if defined(CONFIG_BT_HCI_MESH_EXT)
8656 static void mesh_adv_cplt(struct pdu_data *pdu_data,
8657 			  struct node_rx_pdu *node_rx,
8658 			  struct net_buf *buf)
8659 {
8660 	struct bt_hci_evt_mesh_adv_complete *mep;
8661 
8662 	mep = mesh_evt(buf, BT_HCI_EVT_MESH_ADV_COMPLETE, sizeof(*mep));
8663 	mep->adv_slot = ((uint8_t *)pdu_data)[0];
8664 }
8665 #endif /* CONFIG_BT_HCI_MESH_EXT */
8666 
8667 /**
8668  * @brief Encode a control-PDU into an HCI buffer
8669  * @details Execution context: Host thread
8670  *
8671  * @param node_rx_pdu[in] RX node containing header and PDU
8672  * @param pdu_data[in]    PDU. Same as node_rx_pdu->pdu, but more convenient
8673  * @param net_buf[out]    Upwards-going HCI buffer to fill
8674  */
8675 static void encode_control(struct node_rx_pdu *node_rx,
8676 			   struct pdu_data *pdu_data, struct net_buf *buf)
8677 {
8678 	uint16_t handle;
8679 
8680 	handle = node_rx->hdr.handle;
8681 
8682 	switch (node_rx->hdr.type) {
8683 #if defined(CONFIG_BT_OBSERVER)
8684 	case NODE_RX_TYPE_REPORT:
8685 		le_advertising_report(pdu_data, node_rx, buf);
8686 		break;
8687 
8688 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8689 	case NODE_RX_TYPE_EXT_1M_REPORT:
8690 		le_adv_ext_1M_report(pdu_data, node_rx, buf);
8691 		break;
8692 
8693 	case NODE_RX_TYPE_EXT_2M_REPORT:
8694 		le_adv_ext_2M_report(pdu_data, node_rx, buf);
8695 		break;
8696 
8697 	case NODE_RX_TYPE_EXT_CODED_REPORT:
8698 		le_adv_ext_coded_report(pdu_data, node_rx, buf);
8699 		break;
8700 
8701 	case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
8702 		le_scan_timeout(pdu_data, node_rx, buf);
8703 		break;
8704 
8705 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
8706 	case NODE_RX_TYPE_SYNC:
8707 		le_per_adv_sync_established(pdu_data, node_rx, buf);
8708 		break;
8709 
8710 	case NODE_RX_TYPE_SYNC_REPORT:
8711 		le_per_adv_sync_report(pdu_data, node_rx, buf);
8712 		break;
8713 
8714 	case NODE_RX_TYPE_SYNC_LOST:
8715 		le_per_adv_sync_lost(pdu_data, node_rx, buf);
8716 		break;
8717 
8718 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
8719 	case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
8720 		le_per_adv_sync_transfer_received(pdu_data, node_rx, buf);
8721 		return;
8722 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
8723 
8724 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
8725 	case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
8726 #if defined(CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
8727 		vs_le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8728 #else
8729 		le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8730 #endif /* CONFIG_BT_CTLR_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
8731 		break;
8732 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
8733 
8734 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
8735 	case NODE_RX_TYPE_SYNC_ISO:
8736 		le_big_sync_established(pdu_data, node_rx, buf);
8737 		break;
8738 
8739 	case NODE_RX_TYPE_SYNC_ISO_LOST:
8740 		le_big_sync_lost(pdu_data, node_rx, buf);
8741 		break;
8742 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
8743 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
8744 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8745 #endif /* CONFIG_BT_OBSERVER */
8746 
8747 #if defined(CONFIG_BT_BROADCASTER)
8748 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8749 	case NODE_RX_TYPE_EXT_ADV_TERMINATE:
8750 		le_adv_ext_terminate(pdu_data, node_rx, buf);
8751 		break;
8752 
8753 #if defined(CONFIG_BT_CTLR_ADV_ISO)
8754 	case NODE_RX_TYPE_BIG_COMPLETE:
8755 		le_big_complete(pdu_data, node_rx, buf);
8756 		break;
8757 	case NODE_RX_TYPE_BIG_TERMINATE:
8758 		le_big_terminate(pdu_data, node_rx, buf);
8759 		break;
8760 #endif /* CONFIG_BT_CTLR_ADV_ISO */
8761 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8762 #endif /* CONFIG_BT_BROADCASTER */
8763 
8764 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
8765 	case NODE_RX_TYPE_SCAN_REQ:
8766 #if defined(CONFIG_BT_CTLR_ADV_EXT)
8767 		le_scan_req_received(pdu_data, node_rx, buf);
8768 #elif defined(CONFIG_BT_CTLR_VS_SCAN_REQ_RX)
8769 		le_vs_scan_req_received(pdu_data, node_rx, buf);
8770 #else
8771 		LL_ASSERT(0);
8772 #endif /* CONFIG_BT_CTLR_ADV_EXT */
8773 		break;
8774 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
8775 
8776 #if defined(CONFIG_BT_CONN)
8777 	case NODE_RX_TYPE_CONNECTION:
8778 		le_conn_complete(pdu_data, handle, buf);
8779 		break;
8780 
8781 	case NODE_RX_TYPE_TERMINATE:
8782 		hci_disconn_complete_encode(pdu_data, handle, buf);
8783 		break;
8784 
8785 	case NODE_RX_TYPE_CONN_UPDATE:
8786 		le_conn_update_complete(pdu_data, handle, buf);
8787 		break;
8788 
8789 #if defined(CONFIG_BT_CTLR_LE_ENC)
8790 	case NODE_RX_TYPE_ENC_REFRESH:
8791 		enc_refresh_complete(pdu_data, handle, buf);
8792 		break;
8793 #endif /* CONFIG_BT_CTLR_LE_ENC */
8794 
8795 #if defined(CONFIG_BT_CTLR_LE_PING)
8796 	case NODE_RX_TYPE_APTO:
8797 		auth_payload_timeout_exp(pdu_data, handle, buf);
8798 		break;
8799 #endif /* CONFIG_BT_CTLR_LE_PING */
8800 
8801 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
8802 	case NODE_RX_TYPE_CHAN_SEL_ALGO:
8803 		le_chan_sel_algo(pdu_data, handle, buf);
8804 		break;
8805 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
8806 
8807 #if defined(CONFIG_BT_CTLR_PHY)
8808 	case NODE_RX_TYPE_PHY_UPDATE:
8809 		le_phy_upd_complete(pdu_data, handle, buf);
8810 		return;
8811 #endif /* CONFIG_BT_CTLR_PHY */
8812 
8813 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
8814 	case NODE_RX_TYPE_RSSI:
8815 		LOG_INF("handle: 0x%04x, rssi: -%d dB.", handle, pdu_data->rssi);
8816 		return;
8817 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
8818 
8819 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
8820 	case NODE_RX_TYPE_CIS_REQUEST:
8821 		le_cis_request(pdu_data, node_rx, buf);
8822 		return;
8823 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
8824 
8825 #if defined(CONFIG_BT_CTLR_CONN_ISO)
8826 	case NODE_RX_TYPE_CIS_ESTABLISHED:
8827 		le_cis_established(pdu_data, node_rx, buf);
8828 		return;
8829 #endif /* CONFIG_BT_CTLR_CONN_ISO */
8830 
8831 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
8832 	case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
8833 		le_req_peer_sca_complete(pdu_data, handle, buf);
8834 		return;
8835 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
8836 
8837 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
8838 	case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
8839 #if defined(CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
8840 		vs_le_df_connection_iq_report(node_rx, buf);
8841 #else
8842 		le_df_connection_iq_report(node_rx, buf);
8843 #endif /* CONFIG_BT_CTLR_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
8844 		return;
8845 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
8846 #endif /* CONFIG_BT_CONN */
8847 
8848 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
8849 	case NODE_RX_TYPE_ADV_INDICATION:
8850 		LOG_INF("Advertised.");
8851 		return;
8852 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
8853 
8854 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
8855 	case NODE_RX_TYPE_SCAN_INDICATION:
8856 		LOG_INF("Scanned.");
8857 		return;
8858 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
8859 
8860 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
8861 	case NODE_RX_TYPE_PROFILE:
8862 		LOG_INF("l: %u, %u, %u; t: %u, %u, %u; cpu: %u (%u), %u (%u), %u (%u), %u (%u).",
8863 			pdu_data->profile.lcur, pdu_data->profile.lmin, pdu_data->profile.lmax,
8864 			pdu_data->profile.cur, pdu_data->profile.min, pdu_data->profile.max,
8865 			pdu_data->profile.radio, pdu_data->profile.radio_ticks,
8866 			pdu_data->profile.lll, pdu_data->profile.lll_ticks,
8867 			pdu_data->profile.ull_high, pdu_data->profile.ull_high_ticks,
8868 			pdu_data->profile.ull_low, pdu_data->profile.ull_low_ticks);
8869 		return;
8870 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
8871 
8872 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
8873 	case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
8874 		le_df_connectionless_iq_report(pdu_data, node_rx, buf);
8875 		return;
8876 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
8877 
8878 #if defined(CONFIG_BT_HCI_MESH_EXT)
8879 	case NODE_RX_TYPE_MESH_ADV_CPLT:
8880 		mesh_adv_cplt(pdu_data, node_rx, buf);
8881 		return;
8882 
8883 	case NODE_RX_TYPE_MESH_REPORT:
8884 		le_advertising_report(pdu_data, node_rx, buf);
8885 		return;
8886 #endif /* CONFIG_BT_HCI_MESH_EXT */
8887 
8888 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
8889 	case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
8890 		hci_user_ext_encode_control(node_rx, pdu_data, buf);
8891 		return;
8892 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
8893 
8894 	default:
8895 		LL_ASSERT(0);
8896 		return;
8897 	}
8898 }
8899 
8900 #if defined(CONFIG_BT_CTLR_LE_ENC)
8901 static void le_ltk_request(struct pdu_data *pdu_data, uint16_t handle,
8902 			   struct net_buf *buf)
8903 {
8904 	struct bt_hci_evt_le_ltk_request *sep;
8905 
8906 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8907 	    !(le_event_mask & BT_EVT_MASK_LE_LTK_REQUEST)) {
8908 		return;
8909 	}
8910 
8911 	sep = meta_evt(buf, BT_HCI_EVT_LE_LTK_REQUEST, sizeof(*sep));
8912 
8913 	sep->handle = sys_cpu_to_le16(handle);
8914 	memcpy(&sep->rand, pdu_data->llctrl.enc_req.rand, sizeof(uint64_t));
8915 	memcpy(&sep->ediv, pdu_data->llctrl.enc_req.ediv, sizeof(uint16_t));
8916 }
8917 
8918 static void encrypt_change(uint8_t err, uint16_t handle,
8919 			   struct net_buf *buf, bool encryption_on)
8920 {
8921 	struct bt_hci_evt_encrypt_change *ep;
8922 
8923 	if (!(event_mask & BT_EVT_MASK_ENCRYPT_CHANGE)) {
8924 		return;
8925 	}
8926 
8927 	hci_evt_create(buf, BT_HCI_EVT_ENCRYPT_CHANGE, sizeof(*ep));
8928 	ep = net_buf_add(buf, sizeof(*ep));
8929 
8930 	ep->status = err ? err : (encryption_on ? err : BT_HCI_ERR_UNSPECIFIED);
8931 	ep->handle = sys_cpu_to_le16(handle);
8932 	ep->encrypt = encryption_on ? 1 : 0;
8933 }
8934 #endif /* CONFIG_BT_CTLR_LE_ENC */
8935 
8936 static void le_remote_feat_complete(uint8_t status, struct pdu_data *pdu_data,
8937 				    uint16_t handle, struct net_buf *buf)
8938 {
8939 	struct bt_hci_evt_le_remote_feat_complete *sep;
8940 
8941 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8942 	    !(le_event_mask & BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE)) {
8943 		return;
8944 	}
8945 
8946 	sep = meta_evt(buf, BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE, sizeof(*sep));
8947 
8948 	sep->status = status;
8949 	sep->handle = sys_cpu_to_le16(handle);
8950 	if (!status) {
8951 		memcpy(&sep->features[0],
8952 		       &pdu_data->llctrl.feature_rsp.features[0],
8953 		       sizeof(sep->features));
8954 	} else {
8955 		(void)memset(&sep->features[0], 0x00, sizeof(sep->features));
8956 	}
8957 }
8958 
8959 static void le_unknown_rsp(struct pdu_data *pdu_data, uint16_t handle,
8960 			   struct net_buf *buf)
8961 {
8962 
8963 	switch (pdu_data->llctrl.unknown_rsp.type) {
8964 	case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
8965 		le_remote_feat_complete(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE,
8966 					    NULL, handle, buf);
8967 		break;
8968 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8969 	case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8970 		le_df_cte_req_failed(BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, handle, buf);
8971 		break;
8972 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8973 	default:
8974 		LOG_WRN("type: 0x%02x",	pdu_data->llctrl.unknown_rsp.type);
8975 		break;
8976 	}
8977 }
8978 
8979 static void le_reject_ext_ind(struct pdu_data *pdu, uint16_t handle, struct net_buf *buf)
8980 {
8981 	switch (pdu->llctrl.reject_ext_ind.reject_opcode) {
8982 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
8983 	case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
8984 		le_df_cte_req_failed(pdu->llctrl.reject_ext_ind.error_code, handle, buf);
8985 		break;
8986 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
8987 	default:
8988 		LOG_WRN("reject opcode: 0x%02x", pdu->llctrl.reject_ext_ind.reject_opcode);
8989 		break;
8990 	}
8991 }
8992 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
8993 static void le_conn_param_req(struct pdu_data *pdu_data, uint16_t handle,
8994 			      struct net_buf *buf)
8995 {
8996 	struct bt_hci_evt_le_conn_param_req *sep;
8997 
8998 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
8999 	    !(le_event_mask & BT_EVT_MASK_LE_CONN_PARAM_REQ)) {
9000 		/* event masked, reject the conn param req */
9001 		ll_conn_update(handle, 2, BT_HCI_ERR_UNSUPP_REMOTE_FEATURE, 0,
9002 			       0, 0, 0, NULL);
9003 
9004 		return;
9005 	}
9006 
9007 	sep = meta_evt(buf, BT_HCI_EVT_LE_CONN_PARAM_REQ, sizeof(*sep));
9008 
9009 	sep->handle = sys_cpu_to_le16(handle);
9010 	sep->interval_min = pdu_data->llctrl.conn_param_req.interval_min;
9011 	sep->interval_max = pdu_data->llctrl.conn_param_req.interval_max;
9012 	sep->latency = pdu_data->llctrl.conn_param_req.latency;
9013 	sep->timeout = pdu_data->llctrl.conn_param_req.timeout;
9014 }
9015 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
9016 
9017 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
9018 static void le_data_len_change(struct pdu_data *pdu_data, uint16_t handle,
9019 			       struct net_buf *buf)
9020 {
9021 	struct bt_hci_evt_le_data_len_change *sep;
9022 
9023 	if (!(event_mask & BT_EVT_MASK_LE_META_EVENT) ||
9024 	    !(le_event_mask & BT_EVT_MASK_LE_DATA_LEN_CHANGE)) {
9025 		return;
9026 	}
9027 
9028 	sep = meta_evt(buf, BT_HCI_EVT_LE_DATA_LEN_CHANGE, sizeof(*sep));
9029 
9030 	sep->handle = sys_cpu_to_le16(handle);
9031 	sep->max_tx_octets = pdu_data->llctrl.length_rsp.max_tx_octets;
9032 	sep->max_tx_time = pdu_data->llctrl.length_rsp.max_tx_time;
9033 	sep->max_rx_octets = pdu_data->llctrl.length_rsp.max_rx_octets;
9034 	sep->max_rx_time = pdu_data->llctrl.length_rsp.max_rx_time;
9035 }
9036 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
9037 
9038 #if defined(CONFIG_BT_REMOTE_VERSION)
9039 static void remote_version_info_encode(struct pdu_data *pdu_data,
9040 				       uint16_t handle, struct net_buf *buf)
9041 {
9042 	struct pdu_data_llctrl_version_ind *ver_ind;
9043 	struct bt_hci_evt_remote_version_info *ep;
9044 
9045 	if (!(event_mask & BT_EVT_MASK_REMOTE_VERSION_INFO)) {
9046 		return;
9047 	}
9048 
9049 	hci_evt_create(buf, BT_HCI_EVT_REMOTE_VERSION_INFO, sizeof(*ep));
9050 	ep = net_buf_add(buf, sizeof(*ep));
9051 
9052 	ver_ind = &pdu_data->llctrl.version_ind;
9053 	ep->status = 0x00;
9054 	ep->handle = sys_cpu_to_le16(handle);
9055 	ep->version = ver_ind->version_number;
9056 	ep->manufacturer = ver_ind->company_id;
9057 	ep->subversion = ver_ind->sub_version_number;
9058 }
9059 #endif /* CONFIG_BT_REMOTE_VERSION */
9060 
9061 static void encode_data_ctrl(struct node_rx_pdu *node_rx,
9062 			     struct pdu_data *pdu_data, struct net_buf *buf)
9063 {
9064 	uint16_t handle = node_rx->hdr.handle;
9065 
9066 	switch (pdu_data->llctrl.opcode) {
9067 
9068 #if defined(CONFIG_BT_CTLR_LE_ENC)
9069 	case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
9070 		le_ltk_request(pdu_data, handle, buf);
9071 		break;
9072 
9073 	case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
9074 		encrypt_change(0x00, handle, buf, true);
9075 		break;
9076 #endif /* CONFIG_BT_CTLR_LE_ENC */
9077 
9078 #if defined(CONFIG_BT_REMOTE_VERSION)
9079 	case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
9080 		remote_version_info_encode(pdu_data, handle, buf);
9081 		break;
9082 #endif /* defined(CONFIG_BT_REMOTE_VERSION) */
9083 
9084 	case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
9085 		le_remote_feat_complete(0x00, pdu_data, handle, buf);
9086 		break;
9087 
9088 #if defined(CONFIG_BT_CTLR_LE_ENC)
9089 	case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
9090 		encrypt_change(pdu_data->llctrl.reject_ind.error_code, handle,
9091 			       buf, false);
9092 		break;
9093 #endif /* CONFIG_BT_CTLR_LE_ENC */
9094 
9095 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
9096 	case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
9097 		le_conn_param_req(pdu_data, handle, buf);
9098 		break;
9099 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
9100 
9101 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
9102 	case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
9103 	case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
9104 		le_data_len_change(pdu_data, handle, buf);
9105 		break;
9106 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
9107 
9108 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
9109 	case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
9110 		le_df_cte_req_failed(BT_HCI_CTE_REQ_STATUS_RSP_WITHOUT_CTE, handle, buf);
9111 		break;
9112 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
9113 
9114 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
9115 		le_unknown_rsp(pdu_data, handle, buf);
9116 		break;
9117 
9118 	case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
9119 		le_reject_ext_ind(pdu_data, handle, buf);
9120 		break;
9121 
9122 	default:
9123 		LL_ASSERT(0);
9124 		return;
9125 	}
9126 }
9127 
9128 #if defined(CONFIG_BT_CONN)
9129 void hci_acl_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
9130 {
9131 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
9132 	struct bt_hci_acl_hdr *acl;
9133 	uint16_t handle_flags;
9134 	uint16_t handle;
9135 	uint8_t *data;
9136 
9137 	handle = node_rx->hdr.handle;
9138 
9139 	switch (pdu_data->ll_id) {
9140 	case PDU_DATA_LLID_DATA_CONTINUE:
9141 	case PDU_DATA_LLID_DATA_START:
9142 		acl = (void *)net_buf_add(buf, sizeof(*acl));
9143 		if (pdu_data->ll_id == PDU_DATA_LLID_DATA_START) {
9144 			handle_flags = bt_acl_handle_pack(handle, BT_ACL_START);
9145 		} else {
9146 			handle_flags = bt_acl_handle_pack(handle, BT_ACL_CONT);
9147 		}
9148 		acl->handle = sys_cpu_to_le16(handle_flags);
9149 		acl->len = sys_cpu_to_le16(pdu_data->len);
9150 		data = (void *)net_buf_add(buf, pdu_data->len);
9151 		memcpy(data, pdu_data->lldata, pdu_data->len);
9152 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
9153 		if (hci_hbuf_total > 0) {
9154 			LL_ASSERT((hci_hbuf_sent - hci_hbuf_acked) <
9155 				  hci_hbuf_total);
9156 			hci_hbuf_sent++;
9157 			/* Note: This requires linear handle values starting
9158 			 * from 0
9159 			 */
9160 			LL_ASSERT(handle < ARRAY_SIZE(hci_hbuf_pend));
9161 			hci_hbuf_pend[handle]++;
9162 		}
9163 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
9164 		break;
9165 
9166 	default:
9167 		LL_ASSERT(0);
9168 		break;
9169 	}
9170 }
9171 #endif /* CONFIG_BT_CONN */
9172 
9173 void hci_evt_encode(struct node_rx_pdu *node_rx, struct net_buf *buf)
9174 {
9175 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
9176 
9177 	if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
9178 		encode_control(node_rx, pdu_data, buf);
9179 	} else if (IS_ENABLED(CONFIG_BT_CONN)) {
9180 		encode_data_ctrl(node_rx, pdu_data, buf);
9181 	}
9182 }
9183 
9184 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO) || \
9185 	defined(CONFIG_BT_CTLR_CONN_ISO)
9186 void hci_num_cmplt_encode(struct net_buf *buf, uint16_t handle, uint8_t num)
9187 {
9188 	struct bt_hci_evt_num_completed_packets *ep;
9189 	struct bt_hci_handle_count *hc;
9190 	uint8_t num_handles;
9191 	uint8_t len;
9192 
9193 	num_handles = 1U;
9194 
9195 	len = (sizeof(*ep) + (sizeof(*hc) * num_handles));
9196 	hci_evt_create(buf, BT_HCI_EVT_NUM_COMPLETED_PACKETS, len);
9197 
9198 	ep = net_buf_add(buf, len);
9199 	ep->num_handles = num_handles;
9200 	hc = &ep->h[0];
9201 	hc->handle = sys_cpu_to_le16(handle);
9202 	hc->count = sys_cpu_to_le16(num);
9203 }
9204 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
9205 
9206 uint8_t hci_get_class(struct node_rx_pdu *node_rx)
9207 {
9208 #if defined(CONFIG_BT_CONN)
9209 	struct pdu_data *pdu_data = (void *)node_rx->pdu;
9210 #endif
9211 
9212 	if (node_rx->hdr.type != NODE_RX_TYPE_DC_PDU) {
9213 
9214 		switch (node_rx->hdr.type) {
9215 #if defined(CONFIG_BT_OBSERVER) || \
9216 	defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
9217 	defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
9218 	defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
9219 	defined(CONFIG_BT_CTLR_PROFILE_ISR)
9220 #if defined(CONFIG_BT_OBSERVER)
9221 		case NODE_RX_TYPE_REPORT:
9222 #endif /* CONFIG_BT_OBSERVER */
9223 
9224 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
9225 		case NODE_RX_TYPE_SCAN_REQ:
9226 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
9227 
9228 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
9229 		case NODE_RX_TYPE_ADV_INDICATION:
9230 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
9231 
9232 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
9233 		case NODE_RX_TYPE_SCAN_INDICATION:
9234 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
9235 
9236 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
9237 		case NODE_RX_TYPE_PROFILE:
9238 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
9239 			return HCI_CLASS_EVT_DISCARDABLE;
9240 #endif
9241 
9242 #if defined(CONFIG_BT_HCI_MESH_EXT)
9243 		case NODE_RX_TYPE_MESH_ADV_CPLT:
9244 		case NODE_RX_TYPE_MESH_REPORT:
9245 #endif /* CONFIG_BT_HCI_MESH_EXT */
9246 
9247 #if defined(CONFIG_BT_CTLR_ADV_EXT)
9248 #if defined(CONFIG_BT_BROADCASTER)
9249 		case NODE_RX_TYPE_EXT_ADV_TERMINATE:
9250 
9251 #if defined(CONFIG_BT_CTLR_ADV_ISO)
9252 		case NODE_RX_TYPE_BIG_COMPLETE:
9253 		case NODE_RX_TYPE_BIG_TERMINATE:
9254 #endif /* CONFIG_BT_CTLR_ADV_ISO */
9255 #endif /* CONFIG_BT_BROADCASTER */
9256 
9257 #if defined(CONFIG_BT_OBSERVER)
9258 		case NODE_RX_TYPE_EXT_1M_REPORT:
9259 		case NODE_RX_TYPE_EXT_2M_REPORT:
9260 		case NODE_RX_TYPE_EXT_CODED_REPORT:
9261 		case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
9262 
9263 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
9264 		case NODE_RX_TYPE_SYNC:
9265 		case NODE_RX_TYPE_SYNC_REPORT:
9266 		case NODE_RX_TYPE_SYNC_LOST:
9267 
9268 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
9269 		case NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED:
9270 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
9271 
9272 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
9273 		case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
9274 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
9275 
9276 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
9277 		case NODE_RX_TYPE_SYNC_ISO:
9278 		case NODE_RX_TYPE_SYNC_ISO_LOST:
9279 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
9280 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
9281 #endif /* CONFIG_BT_OBSERVER */
9282 
9283 			return HCI_CLASS_EVT_REQUIRED;
9284 #endif /* CONFIG_BT_CTLR_ADV_EXT */
9285 
9286 #if defined(CONFIG_BT_CONN)
9287 		case NODE_RX_TYPE_CONNECTION:
9288 
9289 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
9290 		case NODE_RX_TYPE_CIS_REQUEST:
9291 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
9292 
9293 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
9294 		case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
9295 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
9296 
9297 #if defined(CONFIG_BT_CTLR_CONN_ISO)
9298 		case NODE_RX_TYPE_CIS_ESTABLISHED:
9299 #endif /* CONFIG_BT_CTLR_CONN_ISO */
9300 
9301 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
9302 		case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
9303 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
9304 
9305 			return HCI_CLASS_EVT_REQUIRED;
9306 
9307 		case NODE_RX_TYPE_TERMINATE:
9308 		case NODE_RX_TYPE_CONN_UPDATE:
9309 
9310 #if defined(CONFIG_BT_CTLR_LE_ENC)
9311 		case NODE_RX_TYPE_ENC_REFRESH:
9312 #endif /* CONFIG_BT_CTLR_LE_ENC */
9313 
9314 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
9315 		case NODE_RX_TYPE_RSSI:
9316 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
9317 
9318 #if defined(CONFIG_BT_CTLR_LE_PING)
9319 		case NODE_RX_TYPE_APTO:
9320 #endif /* CONFIG_BT_CTLR_LE_PING */
9321 
9322 #if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
9323 		case NODE_RX_TYPE_CHAN_SEL_ALGO:
9324 #endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
9325 
9326 #if defined(CONFIG_BT_CTLR_PHY)
9327 		case NODE_RX_TYPE_PHY_UPDATE:
9328 #endif /* CONFIG_BT_CTLR_PHY */
9329 
9330 			return HCI_CLASS_EVT_CONNECTION;
9331 #endif /* CONFIG_BT_CONN */
9332 
9333 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
9334 		case NODE_RX_TYPE_ISO_PDU:
9335 			return HCI_CLASS_ISO_DATA;
9336 #endif /* CONFIG_BT_CTLR_SYNC_ISO || CONFIG_BT_CTLR_CONN_ISO */
9337 
9338 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
9339 		case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
9340 			return HCI_CLASS_EVT_REQUIRED;
9341 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
9342 
9343 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
9344 		case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
9345 			return hci_user_ext_get_class(node_rx);
9346 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
9347 
9348 		default:
9349 			return HCI_CLASS_NONE;
9350 		}
9351 
9352 #if defined(CONFIG_BT_CONN)
9353 	} else if (pdu_data->ll_id == PDU_DATA_LLID_CTRL) {
9354 		return HCI_CLASS_EVT_LLCP;
9355 	} else {
9356 		return HCI_CLASS_ACL_DATA;
9357 	}
9358 #else
9359 	} else {
9360 		return HCI_CLASS_NONE;
9361 	}
9362 #endif
9363 }
9364 
9365 void hci_init(struct k_poll_signal *signal_host_buf)
9366 {
9367 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
9368 	hbuf_signal = signal_host_buf;
9369 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
9370 
9371 	reset(NULL, NULL);
9372 }
9373