1 /*
2  * Copyright (c) 2016-2021 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <string.h>
9 
10 #include <zephyr.h>
11 #include <soc.h>
12 #include <bluetooth/hci.h>
13 #include <sys/byteorder.h>
14 
15 #include "hal/cpu.h"
16 #include "hal/ccm.h"
17 #include "hal/radio.h"
18 #include "hal/ticker.h"
19 #include "hal/cntr.h"
20 
21 #include "util/util.h"
22 #include "util/mem.h"
23 #include "util/memq.h"
24 #include "util/mayfly.h"
25 
26 #include "ticker/ticker.h"
27 
28 #include "pdu.h"
29 
30 #include "lll.h"
31 #include "lll_clock.h"
32 #include "lll/lll_vendor.h"
33 #include "lll/lll_adv_types.h"
34 #include "lll_adv.h"
35 #include "lll/lll_adv_pdu.h"
36 #include "lll_scan.h"
37 #include "lll_conn.h"
38 #include "lll_filter.h"
39 #include "lll/lll_df_types.h"
40 
41 #include "ull_adv_types.h"
42 #include "ull_scan_types.h"
43 #include "ull_conn_types.h"
44 #include "ull_filter.h"
45 
46 #include "ull_adv_internal.h"
47 #include "ull_scan_internal.h"
48 #include "ull_conn_internal.h"
49 #include "ull_internal.h"
50 
51 #include "ll.h"
52 #include "ll_feat.h"
53 #include "ll_settings.h"
54 
55 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
56 #define LOG_MODULE_NAME bt_ctlr_ull_adv
57 #include "common/log.h"
58 #include "hal/debug.h"
59 
60 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle);
61 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv);
62 
63 static int init_reset(void);
64 static inline struct ll_adv_set *is_disabled_get(uint8_t handle);
65 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
66 			     uint8_t adv_chn_cnt, uint8_t phy,
67 			     uint8_t phy_flags);
68 
69 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
70 		      uint32_t remainder, uint16_t lazy, uint8_t force,
71 		      void *param);
72 static void ticker_update_op_cb(uint32_t status, void *param);
73 
74 #if defined(CONFIG_BT_PERIPHERAL)
75 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
76 			   uint32_t remainder, uint16_t lazy, uint8_t force,
77 			   void *param);
78 static void ticker_stop_op_cb(uint32_t status, void *param);
79 static void adv_disable(void *param);
80 static void disabled_cb(void *param);
81 static void conn_release(struct ll_adv_set *adv);
82 #endif /* CONFIG_BT_PERIPHERAL */
83 
84 #if defined(CONFIG_BT_CTLR_ADV_EXT)
85 static void adv_max_events_duration_set(struct ll_adv_set *adv,
86 					uint16_t duration,
87 					uint8_t max_ext_adv_evts);
88 static void ticker_stop_aux_op_cb(uint32_t status, void *param);
89 static void aux_disable(void *param);
90 static void aux_disabled_cb(void *param);
91 static void ticker_stop_ext_op_cb(uint32_t status, void *param);
92 static void ext_disable(void *param);
93 static void ext_disabled_cb(void *param);
94 #endif /* CONFIG_BT_CTLR_ADV_EXT */
95 
96 static inline uint8_t disable(uint8_t handle);
97 
98 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
99 					struct pdu_adv *pdu,
100 					struct pdu_adv *pdu_scan);
101 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
102 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
103 
104 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type);
105 static void init_set(struct ll_adv_set *adv);
106 
107 static struct ll_adv_set ll_adv[BT_CTLR_ADV_SET];
108 
109 #if defined(CONFIG_BT_TICKER_EXT)
110 static struct ticker_ext ll_adv_ticker_ext[BT_CTLR_ADV_SET];
111 #endif /* CONFIG_BT_TICKER_EXT */
112 
113 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_CTLR_ADV_EXT)
114 static uint8_t ll_adv_cmds;
115 
ll_adv_cmds_set(uint8_t adv_cmds)116 int ll_adv_cmds_set(uint8_t adv_cmds)
117 {
118 	if (!ll_adv_cmds) {
119 		ll_adv_cmds = adv_cmds;
120 
121 		if (adv_cmds == LL_ADV_CMDS_LEGACY) {
122 			struct ll_adv_set *adv = &ll_adv[0];
123 
124 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
125 			adv->hci_handle = 0;
126 #endif
127 			adv->is_created = 1;
128 		}
129 	}
130 
131 	if (ll_adv_cmds != adv_cmds) {
132 		return -EINVAL;
133 	}
134 
135 	return 0;
136 }
137 
ll_adv_cmds_is_ext(void)138 int ll_adv_cmds_is_ext(void)
139 {
140 	return ll_adv_cmds == LL_ADV_CMDS_EXT;
141 }
142 #endif
143 
144 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_set_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)145 uint8_t ll_adv_set_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
146 {
147 	struct ll_adv_set *adv;
148 	uint8_t idx;
149 
150 	adv =  &ll_adv[0];
151 
152 	for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
153 		if (adv->is_created && (adv->hci_handle == hci_handle)) {
154 			*handle = idx;
155 			return 0;
156 		}
157 	}
158 
159 	return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
160 }
161 
ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle,uint8_t * handle)162 uint8_t ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle, uint8_t *handle)
163 {
164 	struct ll_adv_set *adv, *adv_empty;
165 	uint8_t idx;
166 
167 	adv =  &ll_adv[0];
168 	adv_empty = NULL;
169 
170 	for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
171 		if (adv->is_created) {
172 			if (adv->hci_handle == hci_handle) {
173 				*handle = idx;
174 				return 0;
175 			}
176 		} else if (!adv_empty) {
177 			adv_empty = adv;
178 		}
179 	}
180 
181 	if (adv_empty) {
182 		adv_empty->hci_handle = hci_handle;
183 		*handle = ull_adv_handle_get(adv_empty);
184 		return 0;
185 	}
186 
187 	return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
188 }
189 
ll_adv_set_hci_handle_get(uint8_t handle)190 uint8_t ll_adv_set_hci_handle_get(uint8_t handle)
191 {
192 	struct ll_adv_set *adv;
193 
194 	adv = ull_adv_set_get(handle);
195 	LL_ASSERT(adv && adv->is_created);
196 
197 	return adv->hci_handle;
198 }
199 #endif
200 
201 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_adv_params_set(uint8_t handle,uint16_t evt_prop,uint32_t interval,uint8_t adv_type,uint8_t own_addr_type,uint8_t direct_addr_type,uint8_t const * const direct_addr,uint8_t chan_map,uint8_t filter_policy,uint8_t * const tx_pwr,uint8_t phy_p,uint8_t skip,uint8_t phy_s,uint8_t sid,uint8_t sreq)202 uint8_t ll_adv_params_set(uint8_t handle, uint16_t evt_prop, uint32_t interval,
203 		       uint8_t adv_type, uint8_t own_addr_type,
204 		       uint8_t direct_addr_type, uint8_t const *const direct_addr,
205 		       uint8_t chan_map, uint8_t filter_policy,
206 		       uint8_t *const tx_pwr, uint8_t phy_p, uint8_t skip,
207 		       uint8_t phy_s, uint8_t sid, uint8_t sreq)
208 {
209 	uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
210 				     PDU_ADV_TYPE_DIRECT_IND,
211 				     PDU_ADV_TYPE_SCAN_IND,
212 				     PDU_ADV_TYPE_NONCONN_IND,
213 				     PDU_ADV_TYPE_DIRECT_IND,
214 				     PDU_ADV_TYPE_EXT_IND};
215 	uint8_t is_pdu_type_changed = 0;
216 	uint8_t is_new_set;
217 #else /* !CONFIG_BT_CTLR_ADV_EXT */
218 uint8_t ll_adv_params_set(uint16_t interval, uint8_t adv_type,
219 		       uint8_t own_addr_type, uint8_t direct_addr_type,
220 		       uint8_t const *const direct_addr, uint8_t chan_map,
221 		       uint8_t filter_policy)
222 {
223 	uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
224 				     PDU_ADV_TYPE_DIRECT_IND,
225 				     PDU_ADV_TYPE_SCAN_IND,
226 				     PDU_ADV_TYPE_NONCONN_IND,
227 				     PDU_ADV_TYPE_DIRECT_IND};
228 	uint8_t const handle = 0;
229 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
230 
231 	struct ll_adv_set *adv;
232 	uint8_t pdu_type_prev;
233 	struct pdu_adv *pdu;
234 
235 	adv = is_disabled_get(handle);
236 	if (!adv) {
237 		return BT_HCI_ERR_CMD_DISALLOWED;
238 	}
239 
240 #if defined(CONFIG_BT_CTLR_ADV_EXT)
241 	/* TODO: check and fail (0x12, invalid HCI cmd param) if invalid
242 	 * evt_prop bits.
243 	 */
244 
245 	/* Extended adv param set command used */
246 	if (adv_type == PDU_ADV_TYPE_EXT_IND) {
247 		/* legacy */
248 		if (evt_prop & BT_HCI_LE_ADV_PROP_LEGACY) {
249 			/* lookup evt_prop to PDU type in  pdu_adv_type[] */
250 			uint8_t const leg_adv_type[] = {
251 				0x03, /* PDU_ADV_TYPE_NONCONN_IND */
252 				0x04, /* PDU_ADV_TYPE_DIRECT_IND */
253 				0x02, /* PDU_ADV_TYPE_SCAN_IND */
254 				0x00  /* PDU_ADV_TYPE_ADV_IND */
255 			};
256 
257 			if (evt_prop & BT_HCI_LE_ADV_PROP_ANON) {
258 				return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
259 			}
260 
261 			adv_type = leg_adv_type[evt_prop & 0x03];
262 
263 			/* high duty cycle directed */
264 			if (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN) {
265 				adv_type = 0x01; /* PDU_ADV_TYPE_DIRECT_IND */
266 			}
267 
268 			adv->lll.phy_p = PHY_1M;
269 		} else {
270 			/* - Connectable and scannable not allowed;
271 			 * - High duty cycle directed connectable not allowed
272 			 */
273 			if (((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
274 					 BT_HCI_LE_ADV_PROP_SCAN)) ==
275 			     (BT_HCI_LE_ADV_PROP_CONN |
276 			      BT_HCI_LE_ADV_PROP_SCAN)) ||
277 			    (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN)) {
278 				return BT_HCI_ERR_INVALID_PARAM;
279 			}
280 
281 #if (CONFIG_BT_CTLR_ADV_AUX_SET == 0)
282 			/* Connectable or scannable requires aux */
283 			if (evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
284 					BT_HCI_LE_ADV_PROP_SCAN)) {
285 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
286 			}
287 #endif
288 
289 			adv_type = 0x05; /* PDU_ADV_TYPE_EXT_IND in */
290 					 /* pdu_adv_type array. */
291 
292 			adv->lll.phy_p = phy_p;
293 			adv->lll.phy_flags = PHY_FLAGS_S8;
294 		}
295 	} else {
296 		adv->lll.phy_p = PHY_1M;
297 	}
298 
299 	is_new_set = !adv->is_created;
300 	adv->is_created = 1;
301 #endif /* CONFIG_BT_CTLR_ADV_EXT */
302 
303 	/* remember parameters so that set adv/scan data and adv enable
304 	 * interface can correctly update adv/scan data in the
305 	 * double buffer between caller and controller context.
306 	 */
307 	/* Set interval for Undirected or Low Duty Cycle Directed Advertising */
308 	if (adv_type != 0x01) {
309 		adv->interval = interval;
310 	} else {
311 		adv->interval = 0;
312 	}
313 	adv->lll.chan_map = chan_map;
314 	adv->lll.filter_policy = filter_policy;
315 
316 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
317 	adv->lll.scan_req_notify = sreq;
318 #endif
319 
320 	/* update the "current" primary adv PDU */
321 	pdu = lll_adv_data_peek(&adv->lll);
322 	pdu_type_prev = pdu->type;
323 #if defined(CONFIG_BT_CTLR_ADV_EXT)
324 	if (is_new_set) {
325 		is_pdu_type_changed = 1;
326 
327 		pdu->type = pdu_adv_type[adv_type];
328 		if (pdu->type != PDU_ADV_TYPE_EXT_IND) {
329 			pdu->len = 0U;
330 		}
331 	/* check if new PDU type is different that past one */
332 	} else if (pdu->type != pdu_adv_type[adv_type]) {
333 		is_pdu_type_changed = 1;
334 
335 		/* If old PDU was extended advertising PDU, release
336 		 * auxiliary and periodic advertising sets.
337 		 */
338 		if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
339 			struct lll_adv_aux *lll_aux = adv->lll.aux;
340 
341 			if (lll_aux) {
342 				struct ll_adv_aux_set *aux;
343 
344 				/* FIXME: copy AD data from auxiliary channel
345 				 * PDU.
346 				 */
347 				pdu->len = 0;
348 
349 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
350 				if (adv->lll.sync) {
351 					struct ll_adv_sync_set *sync;
352 
353 					sync = HDR_LLL2ULL(adv->lll.sync);
354 					adv->lll.sync = NULL;
355 
356 					ull_adv_sync_release(sync);
357 				}
358 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
359 
360 				/* Release auxiliary channel set */
361 				aux = HDR_LLL2ULL(lll_aux);
362 				adv->lll.aux = NULL;
363 
364 				ull_adv_aux_release(aux);
365 			} else {
366 				/* No previous AD data in auxiliary channel
367 				 * PDU.
368 				 */
369 				pdu->len = 0;
370 			}
371 		}
372 
373 		pdu->type = pdu_adv_type[adv_type];
374 	}
375 
376 #else /* !CONFIG_BT_CTLR_ADV_EXT */
377 	pdu->type = pdu_adv_type[adv_type];
378 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
379 
380 	pdu->rfu = 0;
381 
382 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2) &&
383 	    ((pdu->type == PDU_ADV_TYPE_ADV_IND) ||
384 	     (pdu->type == PDU_ADV_TYPE_DIRECT_IND))) {
385 		pdu->chan_sel = 1;
386 	} else {
387 		pdu->chan_sel = 0;
388 	}
389 
390 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
391 	/* Backup the legacy AD Data if switching to legacy directed advertising
392 	 * or to Extended Advertising.
393 	 */
394 	if (((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
395 	     (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
396 	      (pdu->type == PDU_ADV_TYPE_EXT_IND))) &&
397 	    (pdu_type_prev != PDU_ADV_TYPE_DIRECT_IND) &&
398 	    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
399 	     (pdu_type_prev != PDU_ADV_TYPE_EXT_IND))) {
400 		if (pdu->len == 0U) {
401 			adv->ad_data_backup.len = 0U;
402 		} else {
403 			LL_ASSERT(pdu->len >=
404 				  offsetof(struct pdu_adv_adv_ind, data));
405 
406 			adv->ad_data_backup.len = pdu->len -
407 				offsetof(struct pdu_adv_adv_ind, data);
408 			memcpy(adv->ad_data_backup.data, pdu->adv_ind.data,
409 			       adv->ad_data_backup.len);
410 		}
411 	}
412 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
413 
414 #if defined(CONFIG_BT_CTLR_PRIVACY)
415 	adv->own_addr_type = own_addr_type;
416 	if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
417 	    adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) {
418 		adv->peer_addr_type = direct_addr_type;
419 		memcpy(&adv->peer_addr, direct_addr, BDADDR_SIZE);
420 	}
421 #endif /* CONFIG_BT_CTLR_PRIVACY */
422 
423 	if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
424 		pdu->tx_addr = own_addr_type & 0x1;
425 		pdu->rx_addr = direct_addr_type;
426 		memcpy(&pdu->direct_ind.tgt_addr[0], direct_addr, BDADDR_SIZE);
427 		pdu->len = sizeof(struct pdu_adv_direct_ind);
428 
429 #if defined(CONFIG_BT_CTLR_ADV_EXT)
430 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
431 		struct pdu_adv_ext_hdr *pri_hdr, pri_hdr_prev;
432 		struct pdu_adv_com_ext_adv *pri_com_hdr;
433 		uint8_t *pri_dptr_prev, *pri_dptr;
434 		uint8_t len;
435 
436 		pri_com_hdr = (void *)&pdu->adv_ext_ind;
437 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
438 		pri_dptr = pri_hdr->data;
439 		pri_dptr_prev = pri_dptr;
440 
441 		/* No ACAD and no AdvData */
442 		pri_com_hdr->adv_mode = evt_prop & 0x03;
443 
444 		/* Zero-init header flags */
445 		if (is_pdu_type_changed) {
446 			*(uint8_t *)&pri_hdr_prev = 0U;
447 		} else {
448 			pri_hdr_prev = *pri_hdr;
449 		}
450 		*(uint8_t *)pri_hdr = 0U;
451 
452 		/* AdvA flag */
453 		if (pri_hdr_prev.adv_addr) {
454 			pri_dptr_prev += BDADDR_SIZE;
455 		}
456 		if (!pri_com_hdr->adv_mode &&
457 		    (!pri_hdr_prev.aux_ptr ||
458 		     (!(evt_prop & BT_HCI_LE_ADV_PROP_ANON) &&
459 		      (phy_p != PHY_CODED)))) {
460 			/* TODO: optional on 1M with Aux Ptr */
461 			pri_hdr->adv_addr = 1;
462 
463 			/* NOTE: AdvA is filled at enable */
464 			pdu->tx_addr = own_addr_type & 0x1;
465 			pri_dptr += BDADDR_SIZE;
466 		} else {
467 			pdu->tx_addr = 0;
468 		}
469 
470 		/* TargetA flag */
471 		if (pri_hdr_prev.tgt_addr) {
472 			pri_dptr_prev += BDADDR_SIZE;
473 		}
474 		/* TargetA flag in primary channel PDU only for directed */
475 		if (evt_prop & BT_HCI_LE_ADV_PROP_DIRECT) {
476 			pri_hdr->tgt_addr = 1;
477 			pdu->rx_addr = direct_addr_type;
478 			pri_dptr += BDADDR_SIZE;
479 		} else {
480 			pdu->rx_addr = 0;
481 		}
482 
483 		/* No CTEInfo flag in primary channel PDU */
484 
485 		/* ADI flag */
486 		if (pri_hdr_prev.adi) {
487 			pri_dptr_prev += sizeof(struct pdu_adv_adi);
488 
489 			pri_hdr->adi = 1;
490 			pri_dptr += sizeof(struct pdu_adv_adi);
491 		}
492 
493 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
494 		/* AuxPtr flag */
495 		if (pri_hdr_prev.aux_ptr) {
496 			pri_dptr_prev += sizeof(struct pdu_adv_aux_ptr);
497 		}
498 		/* Need aux for connectable or scannable extended advertising */
499 		if (pri_hdr_prev.aux_ptr ||
500 		    ((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
501 				  BT_HCI_LE_ADV_PROP_SCAN)))) {
502 			pri_hdr->aux_ptr = 1;
503 			pri_dptr += sizeof(struct pdu_adv_aux_ptr);
504 		}
505 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
506 
507 		/* No SyncInfo flag in primary channel PDU */
508 
509 		/* Tx Power flag */
510 		if (pri_hdr_prev.tx_pwr) {
511 			pri_dptr_prev += sizeof(uint8_t);
512 		}
513 		/* C1, Tx Power is optional on the LE 1M PHY, and reserved for
514 		 * for future use on the LE Coded PHY.
515 		 */
516 		if ((evt_prop & BT_HCI_LE_ADV_PROP_TX_POWER) &&
517 		    (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
518 			pri_hdr->tx_pwr = 1;
519 			pri_dptr += sizeof(uint8_t);
520 		}
521 
522 		/* Calc primary PDU len */
523 		len = ull_adv_aux_hdr_len_calc(pri_com_hdr, &pri_dptr);
524 		ull_adv_aux_hdr_len_fill(pri_com_hdr, len);
525 
526 		/* Set PDU length */
527 		pdu->len = len;
528 
529 		/* Start filling primary PDU payload based on flags */
530 
531 		/* No AdvData in primary channel PDU */
532 
533 		/* No ACAD in primary channel PDU */
534 
535 		/* Tx Power */
536 		if (pri_hdr_prev.tx_pwr) {
537 			pri_dptr_prev -= sizeof(uint8_t);
538 		}
539 		if (pri_hdr->tx_pwr) {
540 			uint8_t _tx_pwr;
541 
542 			_tx_pwr = 0;
543 			if (tx_pwr) {
544 				if (*tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) {
545 					_tx_pwr = *tx_pwr;
546 				} else {
547 					*tx_pwr = _tx_pwr;
548 				}
549 			}
550 
551 			pri_dptr -= sizeof(uint8_t);
552 			*pri_dptr = _tx_pwr;
553 		}
554 
555 		/* No SyncInfo in primary channel PDU */
556 
557 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
558 		/* AuxPtr */
559 		if (pri_hdr_prev.aux_ptr) {
560 			pri_dptr_prev -= sizeof(struct pdu_adv_aux_ptr);
561 		}
562 		if (pri_hdr->aux_ptr) {
563 			ull_adv_aux_ptr_fill(&pri_dptr, phy_s);
564 		}
565 		adv->lll.phy_s = phy_s;
566 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
567 
568 		/* ADI */
569 		if (pri_hdr_prev.adi) {
570 			pri_dptr_prev -= sizeof(struct pdu_adv_adi);
571 		}
572 		if (pri_hdr->adi) {
573 			struct pdu_adv_adi *adi;
574 
575 			pri_dptr -= sizeof(struct pdu_adv_adi);
576 
577 			/* NOTE: memmove shall handle overlapping buffers */
578 			memmove(pri_dptr, pri_dptr_prev,
579 				sizeof(struct pdu_adv_adi));
580 
581 			adi = (void *)pri_dptr;
582 			adi->sid = sid;
583 		}
584 		adv->sid = sid;
585 
586 		/* No CTEInfo field in primary channel PDU */
587 
588 		/* TargetA */
589 		if (pri_hdr_prev.tgt_addr) {
590 			pri_dptr_prev -= BDADDR_SIZE;
591 		}
592 		if (pri_hdr->tgt_addr) {
593 			pri_dptr -= BDADDR_SIZE;
594 			/* NOTE: RPA will be updated on enable, if needed */
595 			memcpy(pri_dptr, direct_addr, BDADDR_SIZE);
596 		}
597 
598 		/* NOTE: AdvA, filled at enable and RPA timeout */
599 
600 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
601 		/* Make sure aux is created if we have AuxPtr */
602 		if (pri_hdr->aux_ptr) {
603 			uint8_t pri_idx;
604 			uint8_t err;
605 
606 			err = ull_adv_aux_hdr_set_clear(adv,
607 							ULL_ADV_PDU_HDR_FIELD_ADVA,
608 							0, &own_addr_type,
609 							NULL, &pri_idx);
610 			if (err) {
611 				/* TODO: cleanup? */
612 				return err;
613 			}
614 
615 			lll_adv_data_enqueue(&adv->lll, pri_idx);
616 		}
617 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
618 
619 #endif /* CONFIG_BT_CTLR_ADV_EXT */
620 
621 	} else if (pdu->len == 0) {
622 		pdu->tx_addr = own_addr_type & 0x1;
623 		pdu->rx_addr = 0;
624 		pdu->len = BDADDR_SIZE;
625 	} else {
626 
627 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
628 		if (((pdu_type_prev == PDU_ADV_TYPE_DIRECT_IND) ||
629 		     (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
630 		      (pdu_type_prev == PDU_ADV_TYPE_EXT_IND))) &&
631 		    (pdu->type != PDU_ADV_TYPE_DIRECT_IND) &&
632 		    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
633 		     (pdu->type != PDU_ADV_TYPE_EXT_IND))) {
634 			/* Restore the legacy AD Data */
635 			memcpy(pdu->adv_ind.data, adv->ad_data_backup.data,
636 			       adv->ad_data_backup.len);
637 			pdu->len = offsetof(struct pdu_adv_adv_ind, data) +
638 				   adv->ad_data_backup.len;
639 		}
640 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
641 
642 		pdu->tx_addr = own_addr_type & 0x1;
643 		pdu->rx_addr = 0;
644 	}
645 
646 	if (0) {
647 #if defined(CONFIG_BT_CTLR_ADV_EXT)
648 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
649 		/* Make sure new extended advertising set is initialized with no
650 		 * scan response data. Existing sets keep whatever data was set.
651 		 */
652 		if (is_new_set) {
653 			pdu = lll_adv_scan_rsp_peek(&adv->lll);
654 			pdu->type = PDU_ADV_TYPE_AUX_SCAN_REQ;
655 			pdu->len = 0;
656 		}
657 #endif /* CONFIG_BT_CTLR_ADV_EXT */
658 	} else {
659 		/* Make sure legacy advertising set has scan response data
660 		 * initialized.
661 		 */
662 		pdu = lll_adv_scan_rsp_peek(&adv->lll);
663 		pdu->type = PDU_ADV_TYPE_SCAN_RSP;
664 		pdu->rfu = 0;
665 		pdu->chan_sel = 0;
666 		pdu->tx_addr = own_addr_type & 0x1;
667 		pdu->rx_addr = 0;
668 		if (pdu->len == 0) {
669 			pdu->len = BDADDR_SIZE;
670 		}
671 	}
672 
673 	return 0;
674 }
675 
676 #if defined(CONFIG_BT_CTLR_ADV_EXT)
677 uint8_t ll_adv_data_set(uint8_t handle, uint8_t len, uint8_t const *const data)
678 {
679 #else /* !CONFIG_BT_CTLR_ADV_EXT */
680 uint8_t ll_adv_data_set(uint8_t len, uint8_t const *const data)
681 {
682 	const uint8_t handle = 0;
683 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
684 	struct ll_adv_set *adv;
685 
686 	adv = ull_adv_set_get(handle);
687 	if (!adv) {
688 		return BT_HCI_ERR_CMD_DISALLOWED;
689 	}
690 
691 	return ull_adv_data_set(adv, len, data);
692 }
693 
694 #if defined(CONFIG_BT_CTLR_ADV_EXT)
695 uint8_t ll_adv_scan_rsp_set(uint8_t handle, uint8_t len,
696 			    uint8_t const *const data)
697 {
698 #else /* !CONFIG_BT_CTLR_ADV_EXT */
699 uint8_t ll_adv_scan_rsp_set(uint8_t len, uint8_t const *const data)
700 {
701 	const uint8_t handle = 0;
702 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
703 	struct ll_adv_set *adv;
704 
705 	adv = ull_adv_set_get(handle);
706 	if (!adv) {
707 		return BT_HCI_ERR_CMD_DISALLOWED;
708 	}
709 
710 	return ull_scan_rsp_set(adv, len, data);
711 }
712 
713 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
714 #if defined(CONFIG_BT_HCI_MESH_EXT)
715 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
716 		   uint8_t at_anchor, uint32_t ticks_anchor, uint8_t retry,
717 		   uint8_t scan_window, uint8_t scan_delay)
718 {
719 #else /* !CONFIG_BT_HCI_MESH_EXT */
720 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
721 		   uint16_t duration, uint8_t max_ext_adv_evts)
722 {
723 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
724 	struct ll_adv_sync_set *sync = NULL;
725 	uint8_t sync_is_started = 0U;
726 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
727 	struct ll_adv_aux_set *aux = NULL;
728 	uint8_t aux_is_started = 0U;
729 	uint32_t ticks_anchor;
730 #endif /* !CONFIG_BT_HCI_MESH_EXT */
731 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
732 uint8_t ll_adv_enable(uint8_t enable)
733 {
734 	uint8_t const handle = 0;
735 	uint32_t ticks_anchor;
736 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
737 	uint32_t ticks_slot_overhead;
738 	uint32_t ticks_slot_offset;
739 	uint32_t volatile ret_cb;
740 	struct pdu_adv *pdu_scan;
741 	struct pdu_adv *pdu_adv;
742 	struct ll_adv_set *adv;
743 	struct lll_adv *lll;
744 	uint8_t hci_err;
745 	uint32_t ret;
746 
747 	if (!enable) {
748 		return disable(handle);
749 	}
750 
751 	adv = is_disabled_get(handle);
752 	if (!adv) {
753 		/* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
754 		 * Enabling advertising when it is already enabled can cause the
755 		 * random address to change. As the current implementation does
756 		 * does not update RPAs on every advertising enable, only on
757 		 * `rpa_timeout_ms` timeout, we are not going to implement the
758 		 * "can cause the random address to change" for legacy
759 		 * advertisements.
760 		 */
761 
762 		/* If HCI LE Set Extended Advertising Enable command is sent
763 		 * again for an advertising set while that set is enabled, the
764 		 * timer used for duration and the number of events counter are
765 		 * reset and any change to the random address shall take effect.
766 		 */
767 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT) ||
768 		    IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
769 #if defined(CONFIG_BT_CTLR_ADV_EXT)
770 			if (ll_adv_cmds_is_ext()) {
771 				enum node_rx_type volatile *type;
772 
773 				adv = ull_adv_is_enabled_get(handle);
774 				if (!adv) {
775 					/* This should not be happening as
776 					 * is_disabled_get failed.
777 					 */
778 					return BT_HCI_ERR_CMD_DISALLOWED;
779 				}
780 
781 				/* Change random address in the primary or
782 				 * auxiliary PDU as necessary.
783 				 */
784 				lll = &adv->lll;
785 				pdu_adv = lll_adv_data_peek(lll);
786 				pdu_scan = lll_adv_scan_rsp_peek(lll);
787 				hci_err = adv_scan_pdu_addr_update(adv,
788 								   pdu_adv,
789 								   pdu_scan);
790 				if (hci_err) {
791 					return hci_err;
792 				}
793 
794 				if (!adv->lll.node_rx_adv_term) {
795 					/* This should not be happening,
796 					 * adv->is_enabled would be 0 if
797 					 * node_rx_adv_term is released back to
798 					 * pool.
799 					 */
800 					return BT_HCI_ERR_CMD_DISALLOWED;
801 				}
802 
803 				/* Check advertising not terminated */
804 				type = &adv->lll.node_rx_adv_term->type;
805 				if (*type == NODE_RX_TYPE_NONE) {
806 					/* Reset event counter, update duration,
807 					 * and max events
808 					 */
809 					adv_max_events_duration_set(adv,
810 						duration, max_ext_adv_evts);
811 				}
812 
813 				/* Check the counter reset did not race with
814 				 * advertising terminated.
815 				 */
816 				if (*type != NODE_RX_TYPE_NONE) {
817 					/* Race with advertising terminated */
818 					return BT_HCI_ERR_CMD_DISALLOWED;
819 				}
820 			}
821 #endif /* CONFIG_BT_CTLR_ADV_EXT */
822 
823 			return 0;
824 		}
825 
826 		/* Fail on being strict as a legacy controller, valid only under
827 		 * Bluetooth Specification v4.x.
828 		 * Bluetooth Specification v5.0 and above shall not fail to
829 		 * enable already enabled advertising.
830 		 */
831 		return BT_HCI_ERR_CMD_DISALLOWED;
832 	}
833 
834 	lll = &adv->lll;
835 
836 #if defined(CONFIG_BT_CTLR_PRIVACY)
837 	lll->rl_idx = FILTER_IDX_NONE;
838 
839 	/* Prepare filter accept list and optionally resolving list */
840 	ull_filter_adv_update(lll->filter_policy);
841 
842 	if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
843 	    adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) {
844 		/* Look up the resolving list */
845 		lll->rl_idx = ull_filter_rl_find(adv->peer_addr_type,
846 						 adv->peer_addr, NULL);
847 
848 		if (lll->rl_idx != FILTER_IDX_NONE) {
849 			/* Generate RPAs if required */
850 			ull_filter_rpa_update(false);
851 		}
852 	}
853 #endif /* !CONFIG_BT_CTLR_PRIVACY */
854 
855 	pdu_adv = lll_adv_data_peek(lll);
856 	pdu_scan = lll_adv_scan_rsp_peek(lll);
857 
858 	/* Update Bluetooth Device address in advertising and scan response
859 	 * PDUs.
860 	 */
861 	hci_err = adv_scan_pdu_addr_update(adv, pdu_adv, pdu_scan);
862 	if (hci_err) {
863 		return hci_err;
864 	}
865 
866 #if defined(CONFIG_BT_HCI_MESH_EXT)
867 	if (scan_delay) {
868 		if (ull_scan_is_enabled(0)) {
869 			return BT_HCI_ERR_CMD_DISALLOWED;
870 		}
871 
872 		lll->is_mesh = 1;
873 	}
874 #endif /* CONFIG_BT_HCI_MESH_EXT */
875 
876 #if defined(CONFIG_BT_PERIPHERAL)
877 	/* prepare connectable advertising */
878 	if ((pdu_adv->type == PDU_ADV_TYPE_ADV_IND) ||
879 	    (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND) ||
880 #if defined(CONFIG_BT_CTLR_ADV_EXT)
881 	    ((pdu_adv->type == PDU_ADV_TYPE_EXT_IND) &&
882 	     (pdu_adv->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_CONN))
883 #else
884 	    0
885 #endif
886 	     ) {
887 		struct node_rx_pdu *node_rx;
888 		struct ll_conn *conn;
889 		struct lll_conn *conn_lll;
890 		void *link;
891 		int err;
892 
893 		if (lll->conn) {
894 			return BT_HCI_ERR_CMD_DISALLOWED;
895 		}
896 
897 		link = ll_rx_link_alloc();
898 		if (!link) {
899 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
900 		}
901 
902 		node_rx = ll_rx_alloc();
903 		if (!node_rx) {
904 			ll_rx_link_release(link);
905 
906 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
907 		}
908 
909 		conn = ll_conn_acquire();
910 		if (!conn) {
911 			ll_rx_release(node_rx);
912 			ll_rx_link_release(link);
913 
914 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
915 		}
916 
917 		conn_lll = &conn->lll;
918 		conn_lll->handle = 0xFFFF;
919 
920 		if (!conn_lll->link_tx_free) {
921 			conn_lll->link_tx_free = &conn_lll->link_tx;
922 		}
923 
924 		memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head,
925 			  &conn_lll->memq_tx.tail);
926 		conn_lll->link_tx_free = NULL;
927 
928 		conn_lll->packet_tx_head_len = 0;
929 		conn_lll->packet_tx_head_offset = 0;
930 
931 		conn_lll->sn = 0;
932 		conn_lll->nesn = 0;
933 		conn_lll->empty = 0;
934 
935 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
936 		conn_lll->max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
937 		conn_lll->max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
938 
939 #if defined(CONFIG_BT_CTLR_PHY)
940 		/* Use the default 1M packet max time */
941 		conn_lll->max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
942 						      PHY_1M);
943 		conn_lll->max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
944 						      PHY_1M);
945 #if defined(CONFIG_BT_CTLR_ADV_EXT)
946 		conn_lll->max_tx_time = MAX(conn_lll->max_tx_time,
947 					    PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
948 							  lll->phy_s));
949 		conn_lll->max_rx_time = MAX(conn_lll->max_rx_time,
950 					    PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
951 							  lll->phy_s));
952 #endif /* CONFIG_BT_CTLR_ADV_EXT */
953 #endif /* CONFIG_BT_CTLR_PHY */
954 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
955 
956 #if defined(CONFIG_BT_CTLR_PHY)
957 		conn_lll->phy_flags = 0;
958 		if (0) {
959 #if defined(CONFIG_BT_CTLR_ADV_EXT)
960 		} else if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
961 			conn_lll->phy_tx = lll->phy_s;
962 			conn_lll->phy_tx_time = lll->phy_s;
963 			conn_lll->phy_rx = lll->phy_s;
964 #endif /* CONFIG_BT_CTLR_ADV_EXT */
965 		} else {
966 			conn_lll->phy_tx = PHY_1M;
967 			conn_lll->phy_tx_time = PHY_1M;
968 			conn_lll->phy_rx = PHY_1M;
969 		}
970 #endif /* CONFIG_BT_CTLR_PHY */
971 
972 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
973 		conn_lll->rssi_latest = BT_HCI_LE_RSSI_NOT_AVAILABLE;
974 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
975 		conn_lll->rssi_reported = BT_HCI_LE_RSSI_NOT_AVAILABLE;
976 		conn_lll->rssi_sample_count = 0;
977 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
978 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
979 
980 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
981 		conn_lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
982 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
983 
984 		/* FIXME: BEGIN: Move to ULL? */
985 		conn_lll->role = 1;
986 		conn_lll->periph.initiated = 0;
987 		conn_lll->periph.cancelled = 0;
988 		conn_lll->data_chan_sel = 0;
989 		conn_lll->data_chan_use = 0;
990 		conn_lll->event_counter = 0;
991 
992 		conn_lll->latency_prepare = 0;
993 		conn_lll->latency_event = 0;
994 		conn_lll->periph.latency_enabled = 0;
995 		conn_lll->periph.window_widening_prepare_us = 0;
996 		conn_lll->periph.window_widening_event_us = 0;
997 		conn_lll->periph.window_size_prepare_us = 0;
998 		/* FIXME: END: Move to ULL? */
999 #if defined(CONFIG_BT_CTLR_CONN_META)
1000 		memset(&conn_lll->conn_meta, 0, sizeof(conn_lll->conn_meta));
1001 #endif /* CONFIG_BT_CTLR_CONN_META */
1002 
1003 		conn->connect_expire = 6;
1004 		conn->supervision_expire = 0;
1005 		conn->procedure_expire = 0;
1006 
1007 #if defined(CONFIG_BT_CTLR_LE_PING)
1008 		conn->apto_expire = 0U;
1009 		conn->appto_expire = 0U;
1010 #endif
1011 
1012 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
1013 		conn->own_id_addr_type = BT_ADDR_LE_NONE->type;
1014 		(void)memcpy(conn->own_id_addr, BT_ADDR_LE_NONE->a.val,
1015 			     sizeof(conn->own_id_addr));
1016 		conn->peer_id_addr_type = BT_ADDR_LE_NONE->type;
1017 		(void)memcpy(conn->peer_id_addr, BT_ADDR_LE_NONE->a.val,
1018 			     sizeof(conn->peer_id_addr));
1019 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1020 
1021 		conn->common.fex_valid = 0;
1022 		conn->common.txn_lock = 0;
1023 		conn->periph.latency_cancel = 0;
1024 
1025 		conn->llcp_req = conn->llcp_ack = conn->llcp_type = 0;
1026 		conn->llcp_rx = NULL;
1027 		conn->llcp_cu.req = conn->llcp_cu.ack = 0;
1028 		conn->llcp_feature.req = conn->llcp_feature.ack = 0;
1029 		conn->llcp_feature.features_conn = ll_feat_get();
1030 		conn->llcp_feature.features_peer = 0;
1031 		conn->llcp_version.req = conn->llcp_version.ack = 0;
1032 		conn->llcp_version.tx = conn->llcp_version.rx = 0;
1033 		conn->llcp_terminate.req = conn->llcp_terminate.ack = 0;
1034 		conn->llcp_terminate.reason_final = 0;
1035 		/* NOTE: use allocated link for generating dedicated
1036 		 * terminate ind rx node
1037 		 */
1038 		conn->llcp_terminate.node_rx.hdr.link = link;
1039 
1040 #if defined(CONFIG_BT_CTLR_LE_ENC)
1041 		conn_lll->enc_rx = conn_lll->enc_tx = 0U;
1042 		conn->llcp_enc.req = conn->llcp_enc.ack = 0U;
1043 		conn->llcp_enc.pause_tx = conn->llcp_enc.pause_rx = 0U;
1044 		conn->llcp_enc.refresh = 0U;
1045 		conn->periph.llcp_type = 0U;
1046 #endif /* CONFIG_BT_CTLR_LE_ENC */
1047 
1048 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1049 		conn->llcp_conn_param.req = 0;
1050 		conn->llcp_conn_param.ack = 0;
1051 		conn->llcp_conn_param.disabled = 0;
1052 		conn->periph.ticks_to_offset = 0;
1053 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1054 
1055 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1056 		conn->llcp_length.req = conn->llcp_length.ack = 0U;
1057 		conn->llcp_length.disabled = 0U;
1058 		conn->llcp_length.cache.tx_octets = 0U;
1059 		conn->default_tx_octets = ull_conn_default_tx_octets_get();
1060 
1061 #if defined(CONFIG_BT_CTLR_PHY)
1062 		conn->default_tx_time = ull_conn_default_tx_time_get();
1063 #endif /* CONFIG_BT_CTLR_PHY */
1064 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1065 
1066 #if defined(CONFIG_BT_CTLR_PHY)
1067 		conn->llcp_phy.req = conn->llcp_phy.ack = 0;
1068 		conn->llcp_phy.disabled = 0U;
1069 		conn->llcp_phy.pause_tx = 0U;
1070 		conn->phy_pref_tx = ull_conn_default_phy_tx_get();
1071 		conn->phy_pref_rx = ull_conn_default_phy_rx_get();
1072 #endif /* CONFIG_BT_CTLR_PHY */
1073 
1074 		conn->tx_head = conn->tx_ctrl = conn->tx_ctrl_last =
1075 		conn->tx_data = conn->tx_data_last = 0;
1076 
1077 		/* NOTE: using same link as supplied for terminate ind */
1078 		adv->link_cc_free = link;
1079 		adv->node_rx_cc_free = node_rx;
1080 		lll->conn = conn_lll;
1081 
1082 		ull_hdr_init(&conn->ull);
1083 		lll_hdr_init(&conn->lll, conn);
1084 
1085 		/* wait for stable clocks */
1086 		err = lll_clock_wait();
1087 		if (err) {
1088 			conn_release(adv);
1089 
1090 			return BT_HCI_ERR_HW_FAILURE;
1091 		}
1092 	}
1093 #endif /* CONFIG_BT_PERIPHERAL */
1094 
1095 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1096 	if (ll_adv_cmds_is_ext()) {
1097 		struct node_rx_pdu *node_rx_adv_term;
1098 		void *link_adv_term;
1099 
1100 		/* The alloc here used for ext adv termination event */
1101 		link_adv_term = ll_rx_link_alloc();
1102 		if (!link_adv_term) {
1103 #if defined(CONFIG_BT_PERIPHERAL)
1104 			if (adv->lll.conn) {
1105 				conn_release(adv);
1106 			}
1107 #endif /* CONFIG_BT_PERIPHERAL */
1108 
1109 			/* TODO: figure out right return value */
1110 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1111 		}
1112 
1113 		node_rx_adv_term = ll_rx_alloc();
1114 		if (!node_rx_adv_term) {
1115 #if defined(CONFIG_BT_PERIPHERAL)
1116 			if (adv->lll.conn) {
1117 				conn_release(adv);
1118 			}
1119 #endif /* CONFIG_BT_PERIPHERAL */
1120 
1121 			ll_rx_link_release(link_adv_term);
1122 
1123 			/* TODO: figure out right return value */
1124 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1125 		}
1126 
1127 		node_rx_adv_term->hdr.type = NODE_RX_TYPE_NONE;
1128 
1129 		node_rx_adv_term->hdr.link = (void *)link_adv_term;
1130 		adv->lll.node_rx_adv_term = (void *)node_rx_adv_term;
1131 
1132 		if (0) {
1133 #if defined(CONFIG_BT_PERIPHERAL)
1134 		} else if (lll->is_hdcd) {
1135 			adv_max_events_duration_set(adv, 0U, 0U);
1136 #endif /* CONFIG_BT_PERIPHERAL */
1137 		} else {
1138 			adv_max_events_duration_set(adv, duration,
1139 						    max_ext_adv_evts);
1140 		}
1141 	} else {
1142 		adv->lll.node_rx_adv_term = NULL;
1143 		adv_max_events_duration_set(adv, 0U, 0U);
1144 	}
1145 
1146 	const uint8_t phy = lll->phy_p;
1147 	const uint8_t phy_flags = lll->phy_flags;
1148 
1149 	adv->event_counter = 0U;
1150 #else
1151 	/* Legacy ADV only supports LE_1M PHY */
1152 	const uint8_t phy = PHY_1M;
1153 	const uint8_t phy_flags = 0U;
1154 #endif
1155 
1156 	/* For now we adv on all channels enabled in channel map */
1157 	uint8_t ch_map = lll->chan_map;
1158 	const uint8_t adv_chn_cnt = util_ones_count_get(&ch_map, sizeof(ch_map));
1159 
1160 	if (adv_chn_cnt == 0) {
1161 		/* ADV needs at least one channel */
1162 		goto failure_cleanup;
1163 	}
1164 
1165 	/* Calculate the advertising time reservation */
1166 	uint16_t time_us = adv_time_get(pdu_adv, pdu_scan, adv_chn_cnt, phy,
1167 					phy_flags);
1168 
1169 	uint16_t interval = adv->interval;
1170 #if defined(CONFIG_BT_HCI_MESH_EXT)
1171 	if (lll->is_mesh) {
1172 		uint16_t interval_min_us;
1173 
1174 		_radio.advertiser.retry = retry;
1175 		_radio.advertiser.scan_delay_ms = scan_delay;
1176 		_radio.advertiser.scan_window_ms = scan_window;
1177 
1178 		interval_min_us = time_us +
1179 				  (scan_delay + scan_window) * USEC_PER_MSEC;
1180 		if ((interval * SCAN_INT_UNIT_US) < interval_min_us) {
1181 			interval = (interval_min_us +
1182 				(SCAN_INT_UNIT_US - 1)) /
1183 				SCAN_INT_UNIT_US;
1184 		}
1185 
1186 		/* passive scanning */
1187 		_radio.scanner.type = 0;
1188 
1189 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1190 		/* TODO: Coded PHY support */
1191 		_radio.scanner.phy = 0;
1192 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1193 
1194 #if defined(CONFIG_BT_CTLR_PRIVACY)
1195 		/* TODO: Privacy support */
1196 		_radio.scanner.rpa_gen = 0;
1197 		_radio.scanner.rl_idx = rl_idx;
1198 #endif /* CONFIG_BT_CTLR_PRIVACY */
1199 
1200 		_radio.scanner.filter_policy = filter_policy;
1201 	}
1202 #endif /* CONFIG_BT_HCI_MESH_EXT */
1203 
1204 	ull_hdr_init(&adv->ull);
1205 	lll_hdr_init(lll, adv);
1206 
1207 	/* TODO: active_to_start feature port */
1208 	adv->ull.ticks_active_to_start = 0;
1209 	adv->ull.ticks_prepare_to_start =
1210 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1211 	adv->ull.ticks_preempt_to_start =
1212 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1213 	adv->ull.ticks_slot = HAL_TICKER_US_TO_TICKS(time_us);
1214 
1215 	ticks_slot_offset = MAX(adv->ull.ticks_active_to_start,
1216 				adv->ull.ticks_prepare_to_start);
1217 
1218 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1219 		ticks_slot_overhead = ticks_slot_offset;
1220 	} else {
1221 		ticks_slot_overhead = 0;
1222 	}
1223 
1224 #if !defined(CONFIG_BT_HCI_MESH_EXT)
1225 	ticks_anchor = ticker_ticks_now_get();
1226 #else /* CONFIG_BT_HCI_MESH_EXT */
1227 	if (!at_anchor) {
1228 		ticks_anchor = ticker_ticks_now_get();
1229 	}
1230 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1231 
1232 	/* High Duty Cycle Directed Advertising if interval is 0. */
1233 #if defined(CONFIG_BT_PERIPHERAL)
1234 	lll->is_hdcd = !interval && (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND);
1235 	if (lll->is_hdcd) {
1236 		ret_cb = TICKER_STATUS_BUSY;
1237 		ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1238 				   TICKER_USER_ID_THREAD,
1239 				   (TICKER_ID_ADV_BASE + handle),
1240 				   ticks_anchor, 0,
1241 				   (adv->ull.ticks_slot + ticks_slot_overhead),
1242 				   TICKER_NULL_REMAINDER, TICKER_NULL_LAZY,
1243 				   (adv->ull.ticks_slot + ticks_slot_overhead),
1244 				   ticker_cb, adv,
1245 				   ull_ticker_status_give, (void *)&ret_cb);
1246 		ret = ull_ticker_status_take(ret, &ret_cb);
1247 		if (ret != TICKER_STATUS_SUCCESS) {
1248 			goto failure_cleanup;
1249 		}
1250 
1251 		ret_cb = TICKER_STATUS_BUSY;
1252 		ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1253 				   TICKER_USER_ID_THREAD,
1254 				   TICKER_ID_ADV_STOP, ticks_anchor,
1255 				   HAL_TICKER_US_TO_TICKS(ticks_slot_offset +
1256 							  (1280 * 1000)),
1257 				   TICKER_NULL_PERIOD, TICKER_NULL_REMAINDER,
1258 				   TICKER_NULL_LAZY, TICKER_NULL_SLOT,
1259 				   ticker_stop_cb, adv,
1260 				   ull_ticker_status_give, (void *)&ret_cb);
1261 	} else
1262 #endif /* CONFIG_BT_PERIPHERAL */
1263 	{
1264 		const uint32_t ticks_slot = adv->ull.ticks_slot +
1265 					 ticks_slot_overhead;
1266 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1267 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1268 		uint8_t pri_idx = 0U;
1269 
1270 		/* Add sync_info into auxiliary PDU */
1271 		if (lll->sync) {
1272 			sync = HDR_LLL2ULL(lll->sync);
1273 			if (sync->is_enabled && !sync->is_started) {
1274 				struct pdu_adv_sync_info *sync_info;
1275 				uint8_t value[1 + sizeof(sync_info)];
1276 				uint8_t err;
1277 
1278 				err = ull_adv_aux_hdr_set_clear(adv,
1279 					ULL_ADV_PDU_HDR_FIELD_SYNC_INFO,
1280 					0, value, NULL, &pri_idx);
1281 				if (err) {
1282 					return err;
1283 				}
1284 
1285 				/* First byte in the length-value encoded
1286 				 * parameter is size of sync_info structure,
1287 				 * followed by pointer to sync_info in the
1288 				 * PDU.
1289 				 */
1290 				memcpy(&sync_info, &value[1], sizeof(sync_info));
1291 				ull_adv_sync_info_fill(sync, sync_info);
1292 			} else {
1293 				/* Do not start periodic advertising */
1294 				sync = NULL;
1295 			}
1296 		}
1297 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1298 
1299 		if (lll->aux) {
1300 			struct lll_adv_aux *lll_aux = lll->aux;
1301 			uint32_t ticks_slot_overhead_aux;
1302 			uint32_t ticks_anchor_aux;
1303 
1304 			aux = HDR_LLL2ULL(lll_aux);
1305 
1306 			/* Schedule auxiliary PDU after primary channel
1307 			 * PDUs.
1308 			 * Reduce the MAFS offset by the Event Overhead
1309 			 * so that actual radio air packet start as
1310 			 * close as possible after the MAFS gap.
1311 			 * Add 2 ticks offset as compensation towards
1312 			 * the +/- 1 tick ticker scheduling jitter due
1313 			 * to accumulation of remainder to maintain
1314 			 * average ticker interval.
1315 			 */
1316 			ticks_anchor_aux =
1317 				ticks_anchor + ticks_slot +
1318 				HAL_TICKER_US_TO_TICKS(
1319 					MAX(EVENT_MAFS_US,
1320 					    EVENT_OVERHEAD_START_US) -
1321 					EVENT_OVERHEAD_START_US +
1322 					(EVENT_TICKER_RES_MARGIN_US << 1));
1323 
1324 			ticks_slot_overhead_aux = ull_adv_aux_evt_init(aux);
1325 
1326 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1327 			/* Start periodic advertising if enabled and not already
1328 			 * started.
1329 			 */
1330 			if (sync) {
1331 				const uint32_t ticks_slot_aux =
1332 					aux->ull.ticks_slot +
1333 					ticks_slot_overhead_aux;
1334 
1335 				/* Schedule periodic advertising PDU after
1336 				 * auxiliary PDUs.
1337 				 * Reduce the MAFS offset by the Event Overhead
1338 				 * so that actual radio air packet start as
1339 				 * close as possible after the MAFS gap.
1340 				 * Add 2 ticks offset as compensation towards
1341 				 * the +/- 1 tick ticker scheduling jitter due
1342 				 * to accumulation of remainder to maintain
1343 				 * average ticker interval.
1344 				 */
1345 				uint32_t ticks_anchor_sync =
1346 					ticks_anchor_aux + ticks_slot_aux +
1347 					HAL_TICKER_US_TO_TICKS(
1348 						MAX(EVENT_MAFS_US,
1349 						    EVENT_OVERHEAD_START_US) -
1350 						EVENT_OVERHEAD_START_US +
1351 						(EVENT_TICKER_RES_MARGIN_US << 1));
1352 
1353 				ret = ull_adv_sync_start(adv, sync,
1354 							 ticks_anchor_sync);
1355 				if (ret) {
1356 					goto failure_cleanup;
1357 				}
1358 
1359 				sync_is_started = 1U;
1360 
1361 				lll_adv_data_enqueue(lll, pri_idx);
1362 			}
1363 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1364 
1365 			/* Keep aux interval equal or higher than primary PDU
1366 			 * interval.
1367 			 */
1368 			aux->interval = adv->interval +
1369 					(HAL_TICKER_TICKS_TO_US(
1370 						ULL_ADV_RANDOM_DELAY) /
1371 						ADV_INT_UNIT_US);
1372 
1373 			ret = ull_adv_aux_start(aux, ticks_anchor_aux,
1374 						ticks_slot_overhead_aux);
1375 			if (ret) {
1376 				goto failure_cleanup;
1377 			}
1378 
1379 			aux_is_started = 1U;
1380 		}
1381 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1382 
1383 		ret_cb = TICKER_STATUS_BUSY;
1384 
1385 #if defined(CONFIG_BT_TICKER_EXT)
1386 		ll_adv_ticker_ext[handle].ticks_slot_window =
1387 			ULL_ADV_RANDOM_DELAY + ticks_slot;
1388 
1389 		ret = ticker_start_ext(
1390 #else
1391 		ret = ticker_start(
1392 #endif /* CONFIG_BT_TICKER_EXT */
1393 				   TICKER_INSTANCE_ID_CTLR,
1394 				   TICKER_USER_ID_THREAD,
1395 				   (TICKER_ID_ADV_BASE + handle),
1396 				   ticks_anchor, 0,
1397 				   HAL_TICKER_US_TO_TICKS((uint64_t)interval *
1398 							  ADV_INT_UNIT_US),
1399 				   TICKER_NULL_REMAINDER,
1400 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1401 	!defined(CONFIG_BT_CTLR_LOW_LAT)
1402 				   /* Force expiry to ensure timing update */
1403 				   TICKER_LAZY_MUST_EXPIRE,
1404 #else
1405 				   TICKER_NULL_LAZY,
1406 #endif /* !CONFIG_BT_TICKER_LOW_LAT && !CONFIG_BT_CTLR_LOW_LAT */
1407 				   ticks_slot,
1408 				   ticker_cb, adv,
1409 				   ull_ticker_status_give,
1410 				   (void *)&ret_cb
1411 #if defined(CONFIG_BT_TICKER_EXT)
1412 				   ,
1413 				   &ll_adv_ticker_ext[handle]
1414 #endif /* CONFIG_BT_TICKER_EXT */
1415 				   );
1416 	}
1417 
1418 	ret = ull_ticker_status_take(ret, &ret_cb);
1419 	if (ret != TICKER_STATUS_SUCCESS) {
1420 		goto failure_cleanup;
1421 	}
1422 
1423 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1424 	if (aux_is_started) {
1425 		aux->is_started = aux_is_started;
1426 
1427 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1428 		if (sync_is_started) {
1429 			sync->is_started = sync_is_started;
1430 		}
1431 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1432 	}
1433 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1434 
1435 	adv->is_enabled = 1;
1436 
1437 #if defined(CONFIG_BT_CTLR_PRIVACY)
1438 #if defined(CONFIG_BT_HCI_MESH_EXT)
1439 	if (_radio.advertiser.is_mesh) {
1440 		_radio.scanner.is_enabled = 1;
1441 
1442 		ull_filter_adv_scan_state_cb(BIT(0) | BIT(1));
1443 	}
1444 #else /* !CONFIG_BT_HCI_MESH_EXT */
1445 	if (IS_ENABLED(CONFIG_BT_OBSERVER) && !ull_scan_is_enabled_get(0)) {
1446 		ull_filter_adv_scan_state_cb(BIT(0));
1447 	}
1448 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1449 #endif /* CONFIG_BT_CTLR_PRIVACY */
1450 
1451 	return 0;
1452 
1453 failure_cleanup:
1454 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1455 	if (aux_is_started) {
1456 		/* TODO: Stop extended advertising and release resources */
1457 	}
1458 
1459 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1460 	if (sync_is_started) {
1461 		/* TODO: Stop periodic advertising and release resources */
1462 	}
1463 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1464 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1465 
1466 #if defined(CONFIG_BT_PERIPHERAL)
1467 	if (adv->lll.conn) {
1468 		conn_release(adv);
1469 	}
1470 #endif /* CONFIG_BT_PERIPHERAL */
1471 
1472 	return BT_HCI_ERR_CMD_DISALLOWED;
1473 }
1474 
1475 int ull_adv_init(void)
1476 {
1477 	int err;
1478 
1479 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1480 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1481 	if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1482 		err = ull_adv_aux_init();
1483 		if (err) {
1484 			return err;
1485 		}
1486 	}
1487 
1488 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1489 	err = ull_adv_sync_init();
1490 	if (err) {
1491 		return err;
1492 	}
1493 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1494 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1495 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1496 
1497 	err = init_reset();
1498 	if (err) {
1499 		return err;
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 int ull_adv_reset(void)
1506 {
1507 	uint8_t handle;
1508 
1509 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1510 		(void)disable(handle);
1511 	}
1512 
1513 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1514 #if defined(CONFIG_BT_HCI_RAW)
1515 	ll_adv_cmds = LL_ADV_CMDS_ANY;
1516 #endif
1517 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1518 	{
1519 		int err;
1520 
1521 		err = ull_adv_sync_reset();
1522 		if (err) {
1523 			return err;
1524 		}
1525 	}
1526 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1527 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1528 
1529 	return 0;
1530 }
1531 
1532 int ull_adv_reset_finalize(void)
1533 {
1534 	uint8_t handle;
1535 	int err;
1536 
1537 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1538 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1539 	if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1540 		err = ull_adv_aux_reset_finalize();
1541 		if (err) {
1542 			return err;
1543 		}
1544 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1545 		err = ull_adv_sync_reset_finalize();
1546 		if (err) {
1547 			return err;
1548 		}
1549 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1550 	}
1551 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1552 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1553 
1554 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1555 		struct ll_adv_set *adv = &ll_adv[handle];
1556 		struct lll_adv *lll = &adv->lll;
1557 
1558 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1559 		adv->is_created = 0;
1560 		lll->aux = NULL;
1561 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1562 		lll->sync = NULL;
1563 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1564 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1565 		lll_adv_data_reset(&lll->adv_data);
1566 		lll_adv_data_reset(&lll->scan_rsp);
1567 	}
1568 
1569 	err = init_reset();
1570 	if (err) {
1571 		return err;
1572 	}
1573 
1574 	return 0;
1575 }
1576 
1577 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle)
1578 {
1579 	if (handle >= BT_CTLR_ADV_SET) {
1580 		return NULL;
1581 	}
1582 
1583 	return &ll_adv[handle];
1584 }
1585 
1586 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv)
1587 {
1588 	return ((uint8_t *)adv - (uint8_t *)ll_adv) / sizeof(*adv);
1589 }
1590 
1591 uint16_t ull_adv_lll_handle_get(struct lll_adv *lll)
1592 {
1593 	return ull_adv_handle_get(HDR_LLL2ULL(lll));
1594 }
1595 
1596 inline struct ll_adv_set *ull_adv_is_enabled_get(uint8_t handle)
1597 {
1598 	struct ll_adv_set *adv;
1599 
1600 	adv = ull_adv_set_get(handle);
1601 	if (!adv || !adv->is_enabled) {
1602 		return NULL;
1603 	}
1604 
1605 	return adv;
1606 }
1607 
1608 int ull_adv_is_enabled(uint8_t handle)
1609 {
1610 	struct ll_adv_set *adv;
1611 
1612 	adv = ull_adv_is_enabled_get(handle);
1613 
1614 	return adv != NULL;
1615 }
1616 
1617 uint32_t ull_adv_filter_pol_get(uint8_t handle)
1618 {
1619 	struct ll_adv_set *adv;
1620 
1621 	adv = ull_adv_is_enabled_get(handle);
1622 	if (!adv) {
1623 		return 0;
1624 	}
1625 
1626 	return adv->lll.filter_policy;
1627 }
1628 
1629 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1630 struct ll_adv_set *ull_adv_is_created_get(uint8_t handle)
1631 {
1632 	struct ll_adv_set *adv;
1633 
1634 	adv = ull_adv_set_get(handle);
1635 	if (!adv || !adv->is_created) {
1636 		return NULL;
1637 	}
1638 
1639 	return adv;
1640 }
1641 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1642 
1643 uint8_t ull_adv_data_set(struct ll_adv_set *adv, uint8_t len,
1644 			 uint8_t const *const data)
1645 {
1646 	struct pdu_adv *prev;
1647 	struct pdu_adv *pdu;
1648 	uint8_t idx;
1649 
1650 	/* Check invalid AD Data length */
1651 	if (len > PDU_AC_DATA_SIZE_MAX) {
1652 		return BT_HCI_ERR_INVALID_PARAM;
1653 	}
1654 
1655 	prev = lll_adv_data_peek(&adv->lll);
1656 
1657 	/* Dont update data if directed, back it up */
1658 	if ((prev->type == PDU_ADV_TYPE_DIRECT_IND) ||
1659 	    (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
1660 	     (prev->type == PDU_ADV_TYPE_EXT_IND))) {
1661 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
1662 		/* Update the backup AD Data */
1663 		adv->ad_data_backup.len = len;
1664 		memcpy(adv->ad_data_backup.data, data, adv->ad_data_backup.len);
1665 		return 0;
1666 
1667 #else /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1668 		return BT_HCI_ERR_CMD_DISALLOWED;
1669 #endif /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1670 	}
1671 
1672 	/* update adv pdu fields. */
1673 	pdu = lll_adv_data_alloc(&adv->lll, &idx);
1674 
1675 	/* check for race condition with LLL ISR */
1676 	if (IS_ENABLED(CONFIG_ASSERT)) {
1677 		uint8_t idx_test;
1678 
1679 		lll_adv_data_alloc(&adv->lll, &idx_test);
1680 		__ASSERT((idx == idx_test), "Probable AD Data Corruption.\n");
1681 	}
1682 
1683 	pdu->type = prev->type;
1684 	pdu->rfu = 0U;
1685 
1686 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
1687 		pdu->chan_sel = prev->chan_sel;
1688 	} else {
1689 		pdu->chan_sel = 0U;
1690 	}
1691 
1692 	pdu->tx_addr = prev->tx_addr;
1693 	pdu->rx_addr = prev->rx_addr;
1694 	memcpy(&pdu->adv_ind.addr[0], &prev->adv_ind.addr[0], BDADDR_SIZE);
1695 	memcpy(&pdu->adv_ind.data[0], data, len);
1696 	pdu->len = BDADDR_SIZE + len;
1697 
1698 	/* Update time reservation */
1699 	if (adv->is_enabled) {
1700 		struct pdu_adv *pdu_scan;
1701 		struct lll_adv *lll;
1702 		uint8_t err;
1703 
1704 		lll = &adv->lll;
1705 		pdu_scan = lll_adv_scan_rsp_peek(lll);
1706 
1707 		err = ull_adv_time_update(adv, pdu, pdu_scan);
1708 		if (err) {
1709 			return err;
1710 		}
1711 	}
1712 
1713 	lll_adv_data_enqueue(&adv->lll, idx);
1714 
1715 	return 0;
1716 }
1717 
1718 uint8_t ull_scan_rsp_set(struct ll_adv_set *adv, uint8_t len,
1719 			 uint8_t const *const data)
1720 {
1721 	struct pdu_adv *prev;
1722 	struct pdu_adv *pdu;
1723 	uint8_t idx;
1724 
1725 	if (len > PDU_AC_DATA_SIZE_MAX) {
1726 		return BT_HCI_ERR_INVALID_PARAM;
1727 	}
1728 
1729 	/* update scan pdu fields. */
1730 	prev = lll_adv_scan_rsp_peek(&adv->lll);
1731 	pdu = lll_adv_scan_rsp_alloc(&adv->lll, &idx);
1732 	pdu->type = PDU_ADV_TYPE_SCAN_RSP;
1733 	pdu->rfu = 0;
1734 	pdu->chan_sel = 0;
1735 	pdu->tx_addr = prev->tx_addr;
1736 	pdu->rx_addr = 0;
1737 	pdu->len = BDADDR_SIZE + len;
1738 	memcpy(&pdu->scan_rsp.addr[0], &prev->scan_rsp.addr[0], BDADDR_SIZE);
1739 	memcpy(&pdu->scan_rsp.data[0], data, len);
1740 
1741 	/* Update time reservation */
1742 	if (adv->is_enabled) {
1743 		struct pdu_adv *pdu_adv_scan;
1744 		struct lll_adv *lll;
1745 		uint8_t err;
1746 
1747 		lll = &adv->lll;
1748 		pdu_adv_scan = lll_adv_data_peek(lll);
1749 
1750 		if ((pdu_adv_scan->type == PDU_ADV_TYPE_ADV_IND) ||
1751 		    (pdu_adv_scan->type == PDU_ADV_TYPE_SCAN_IND)) {
1752 			err = ull_adv_time_update(adv, pdu_adv_scan, pdu);
1753 			if (err) {
1754 				return err;
1755 			}
1756 		}
1757 	}
1758 
1759 	lll_adv_scan_rsp_enqueue(&adv->lll, idx);
1760 
1761 	return 0;
1762 }
1763 
1764 static uint32_t ticker_update_rand(struct ll_adv_set *adv, uint32_t ticks_delay_window,
1765 				   uint32_t ticks_delay_window_offset,
1766 				   uint32_t ticks_adjust_minus)
1767 {
1768 	uint32_t random_delay;
1769 	uint32_t ret;
1770 
1771 	/* Get pseudo-random number in the range [0..ticks_delay_window].
1772 	 * Please note that using modulo of 2^32 samle space has an uneven
1773 	 * distribution, slightly favoring smaller values.
1774 	 */
1775 	lll_rand_isr_get(&random_delay, sizeof(random_delay));
1776 	random_delay %= ticks_delay_window;
1777 	random_delay += (ticks_delay_window_offset + 1);
1778 
1779 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1780 			    TICKER_USER_ID_ULL_HIGH,
1781 			    TICKER_ID_ADV_BASE + ull_adv_handle_get(adv),
1782 			    random_delay,
1783 			    ticks_adjust_minus, 0, 0, 0, 0,
1784 			    ticker_update_op_cb, adv);
1785 
1786 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1787 		  (ret == TICKER_STATUS_BUSY));
1788 
1789 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1790 	adv->delay = random_delay;
1791 #endif
1792 	return random_delay;
1793 }
1794 
1795 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
1796 	defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1797 void ull_adv_done(struct node_rx_event_done *done)
1798 {
1799 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1800 	struct lll_adv_aux *lll_aux;
1801 	struct node_rx_hdr *rx_hdr;
1802 	uint8_t handle;
1803 	uint32_t ret;
1804 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1805 	struct ll_adv_set *adv;
1806 	struct lll_adv *lll;
1807 
1808 	/* Get reference to ULL context */
1809 	adv = CONTAINER_OF(done->param, struct ll_adv_set, ull);
1810 	lll = &adv->lll;
1811 
1812 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1813 	if (done->extra.result == DONE_COMPLETED) {
1814 		/* Event completed successfully */
1815 		adv->delay_remain = ULL_ADV_RANDOM_DELAY;
1816 	} else {
1817 		/* Event aborted or too late - try to re-schedule */
1818 		uint32_t ticks_elapsed;
1819 		uint32_t ticks_now;
1820 
1821 		const uint32_t prepare_overhead =
1822 			HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1823 		const uint32_t ticks_adv_airtime = adv->ticks_at_expire +
1824 			prepare_overhead;
1825 
1826 		ticks_elapsed = 0;
1827 
1828 		ticks_now = cntr_cnt_get();
1829 		if ((int32_t)(ticks_now - ticks_adv_airtime) > 0) {
1830 			ticks_elapsed = ticks_now - ticks_adv_airtime;
1831 		}
1832 
1833 		if (adv->delay_remain >= adv->delay + ticks_elapsed) {
1834 			/* The perturbation window is still open */
1835 			adv->delay_remain -= (adv->delay + ticks_elapsed);
1836 		} else {
1837 			adv->delay_remain = 0;
1838 		}
1839 
1840 		/* Check if we have enough time to re-schedule */
1841 		if (adv->delay_remain > prepare_overhead) {
1842 			uint32_t ticks_adjust_minus;
1843 
1844 			/* Get negative ticker adjustment needed to pull back ADV one
1845 			 * interval plus the randomized delay. This means that the ticker
1846 			 * will be updated to expire in time frame of now + start
1847 			 * overhead, until 10 ms window is exhausted.
1848 			 */
1849 			ticks_adjust_minus = HAL_TICKER_US_TO_TICKS(
1850 				(uint64_t)adv->interval * ADV_INT_UNIT_US) + adv->delay;
1851 
1852 			/* Apply random delay in range [prepare_overhead..delay_remain] */
1853 			ticker_update_rand(adv, adv->delay_remain - prepare_overhead,
1854 					   prepare_overhead, ticks_adjust_minus);
1855 
1856 			/* Score of the event was increased due to the result, but since
1857 			 * we're getting a another chance we'll set it back.
1858 			 */
1859 			adv->lll.hdr.score -= 1;
1860 		} else {
1861 			adv->delay_remain = ULL_ADV_RANDOM_DELAY;
1862 		}
1863 	}
1864 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
1865 
1866 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1867 	if (adv->max_events && (adv->event_counter >= adv->max_events)) {
1868 		adv->max_events = 0;
1869 
1870 		rx_hdr = (void *)lll->node_rx_adv_term;
1871 		rx_hdr->rx_ftr.param_adv_term.status = BT_HCI_ERR_LIMIT_REACHED;
1872 	} else if (adv->ticks_remain_duration &&
1873 		   (adv->ticks_remain_duration <=
1874 		    HAL_TICKER_US_TO_TICKS((uint64_t)adv->interval *
1875 			ADV_INT_UNIT_US))) {
1876 		adv->ticks_remain_duration = 0;
1877 
1878 		rx_hdr = (void *)lll->node_rx_adv_term;
1879 		rx_hdr->rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
1880 	} else {
1881 		return;
1882 	}
1883 
1884 	handle = ull_adv_handle_get(adv);
1885 	LL_ASSERT(handle < BT_CTLR_ADV_SET);
1886 
1887 	rx_hdr->type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
1888 	rx_hdr->handle = handle;
1889 	rx_hdr->rx_ftr.param_adv_term.conn_handle = 0xffff;
1890 	rx_hdr->rx_ftr.param_adv_term.num_events = adv->event_counter;
1891 
1892 	lll_aux = lll->aux;
1893 	if (lll_aux) {
1894 		struct ll_adv_aux_set *aux;
1895 		uint8_t aux_handle;
1896 
1897 		aux = HDR_LLL2ULL(lll_aux);
1898 		aux_handle = ull_adv_aux_handle_get(aux);
1899 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1900 				  TICKER_USER_ID_ULL_HIGH,
1901 				  (TICKER_ID_ADV_AUX_BASE + aux_handle),
1902 				  ticker_stop_aux_op_cb, adv);
1903 	} else {
1904 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1905 				  TICKER_USER_ID_ULL_HIGH,
1906 				  (TICKER_ID_ADV_BASE + handle),
1907 				  ticker_stop_ext_op_cb, adv);
1908 	}
1909 
1910 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1911 		  (ret == TICKER_STATUS_BUSY));
1912 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1913 }
1914 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
1915 
1916 const uint8_t *ull_adv_pdu_update_addrs(struct ll_adv_set *adv,
1917 					struct pdu_adv *pdu)
1918 {
1919 	const uint8_t *adv_addr;
1920 
1921 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1922 	struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
1923 	struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
1924 	struct pdu_adv_ext_hdr hdr_flags;
1925 
1926 	if (com_hdr->ext_hdr_len) {
1927 		hdr_flags = *hdr;
1928 	} else {
1929 		*(uint8_t *)&hdr_flags = 0U;
1930 	}
1931 #endif
1932 
1933 	adv_addr = adva_update(adv, pdu);
1934 
1935 	/* Update TargetA only if directed advertising PDU is supplied. Note
1936 	 * that AUX_SCAN_REQ does not have TargetA flag set so it will be
1937 	 * ignored here as expected.
1938 	 */
1939 	if ((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
1940 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1941 	    ((pdu->type == PDU_ADV_TYPE_EXT_IND) && hdr_flags.tgt_addr) ||
1942 #endif
1943 	    0) {
1944 		tgta_update(adv, pdu);
1945 	}
1946 
1947 	return adv_addr;
1948 }
1949 
1950 uint8_t ull_adv_time_update(struct ll_adv_set *adv, struct pdu_adv *pdu,
1951 			    struct pdu_adv *pdu_scan)
1952 {
1953 	uint32_t volatile ret_cb;
1954 	uint32_t ticks_minus;
1955 	uint32_t ticks_plus;
1956 	struct lll_adv *lll;
1957 	uint32_t time_ticks;
1958 	uint8_t phy_flags;
1959 	uint16_t time_us;
1960 	uint8_t chan_map;
1961 	uint8_t chan_cnt;
1962 	uint32_t ret;
1963 	uint8_t phy;
1964 
1965 	lll = &adv->lll;
1966 
1967 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1968 	phy = lll->phy_p;
1969 	phy_flags = lll->phy_flags;
1970 #else
1971 	phy = PHY_1M;
1972 	phy_flags = 0U;
1973 #endif
1974 
1975 	chan_map = lll->chan_map;
1976 	chan_cnt = util_ones_count_get(&chan_map, sizeof(chan_map));
1977 	time_us = adv_time_get(pdu, pdu_scan, chan_cnt, phy, phy_flags);
1978 	time_ticks = HAL_TICKER_US_TO_TICKS(time_us);
1979 	if (adv->ull.ticks_slot > time_ticks) {
1980 		ticks_minus = adv->ull.ticks_slot - time_ticks;
1981 		ticks_plus = 0U;
1982 	} else if (adv->ull.ticks_slot < time_ticks) {
1983 		ticks_minus = 0U;
1984 		ticks_plus = time_ticks - adv->ull.ticks_slot;
1985 	} else {
1986 		return BT_HCI_ERR_SUCCESS;
1987 	}
1988 
1989 	ret_cb = TICKER_STATUS_BUSY;
1990 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1991 			    TICKER_USER_ID_THREAD,
1992 			    (TICKER_ID_ADV_BASE +
1993 			     ull_adv_handle_get(adv)),
1994 			    0, 0, ticks_plus, ticks_minus, 0, 0,
1995 			    ull_ticker_status_give, (void *)&ret_cb);
1996 	ret = ull_ticker_status_take(ret, &ret_cb);
1997 	if (ret != TICKER_STATUS_SUCCESS) {
1998 		return BT_HCI_ERR_CMD_DISALLOWED;
1999 	}
2000 
2001 	adv->ull.ticks_slot = time_ticks;
2002 
2003 	return BT_HCI_ERR_SUCCESS;
2004 }
2005 
2006 static int init_reset(void)
2007 {
2008 	uint8_t handle;
2009 
2010 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL) && \
2011 	!defined(CONFIG_BT_CTLR_ADV_EXT)
2012 	ll_adv[0].lll.tx_pwr_lvl = RADIO_TXP_DEFAULT;
2013 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL && !CONFIG_BT_CTLR_ADV_EXT */
2014 
2015 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
2016 		lll_adv_data_init(&ll_adv[handle].lll.adv_data);
2017 		lll_adv_data_init(&ll_adv[handle].lll.scan_rsp);
2018 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
2019 		/* Pointer to DF configuration must be cleared on reset. In other case it will point
2020 		 * to a memory pool address that should be released. It may be used by the pool
2021 		 * itself. In such situation it may cause error.
2022 		 */
2023 		ll_adv[handle].df_cfg = NULL;
2024 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2025 	}
2026 
2027 	/* Make sure that set #0 is initialized with empty legacy PDUs. This is
2028 	 * especially important if legacy HCI interface is used for advertising
2029 	 * because it allows to enable advertising without any configuration,
2030 	 * thus we need to have PDUs already initialized.
2031 	 */
2032 	init_set(&ll_adv[0]);
2033 
2034 	return 0;
2035 }
2036 
2037 static inline struct ll_adv_set *is_disabled_get(uint8_t handle)
2038 {
2039 	struct ll_adv_set *adv;
2040 
2041 	adv = ull_adv_set_get(handle);
2042 	if (!adv || adv->is_enabled) {
2043 		return NULL;
2044 	}
2045 
2046 	return adv;
2047 }
2048 
2049 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
2050 			     uint8_t adv_chn_cnt, uint8_t phy,
2051 			     uint8_t phy_flags)
2052 {
2053 	uint16_t time_us = EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
2054 
2055 	/* Calculate the PDU Tx Time and hence the radio event length */
2056 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2057 	if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2058 		time_us += PDU_AC_US(pdu->len, phy, phy_flags) * adv_chn_cnt +
2059 			   EVENT_RX_TX_TURNAROUND(phy) * (adv_chn_cnt - 1);
2060 	} else
2061 #endif
2062 	{
2063 		uint16_t adv_size =
2064 			PDU_OVERHEAD_SIZE(PHY_1M) + ADVA_SIZE;
2065 		const uint16_t conn_ind_us =
2066 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2067 				 INITA_SIZE + ADVA_SIZE + LLDATA_SIZE), PHY_1M);
2068 		const uint8_t scan_req_us  =
2069 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2070 				 SCANA_SIZE + ADVA_SIZE), PHY_1M);
2071 		const uint16_t scan_rsp_us =
2072 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2073 				 ADVA_SIZE + pdu_scan->len), PHY_1M);
2074 		const uint8_t rx_to_us	= EVENT_RX_TO_US(PHY_1M);
2075 		const uint8_t rxtx_turn_us = EVENT_RX_TX_TURNAROUND(PHY_1M);
2076 
2077 		if (pdu->type == PDU_ADV_TYPE_NONCONN_IND) {
2078 			adv_size += pdu->len;
2079 			time_us += BYTES2US(adv_size, PHY_1M) * adv_chn_cnt +
2080 				   rxtx_turn_us * (adv_chn_cnt - 1);
2081 		} else {
2082 			if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
2083 				adv_size += TARGETA_SIZE;
2084 				time_us += conn_ind_us;
2085 			} else if (pdu->type == PDU_ADV_TYPE_ADV_IND) {
2086 				adv_size += pdu->len;
2087 				time_us += MAX(scan_req_us + EVENT_IFS_MAX_US +
2088 						scan_rsp_us, conn_ind_us);
2089 			} else if (pdu->type == PDU_ADV_TYPE_SCAN_IND) {
2090 				adv_size += pdu->len;
2091 				time_us += scan_req_us + EVENT_IFS_MAX_US +
2092 					   scan_rsp_us;
2093 			}
2094 
2095 			time_us += (BYTES2US(adv_size, PHY_1M) +
2096 				    EVENT_IFS_MAX_US + rx_to_us +
2097 				    rxtx_turn_us) * (adv_chn_cnt - 1) +
2098 				   BYTES2US(adv_size, PHY_1M) + EVENT_IFS_MAX_US;
2099 		}
2100 	}
2101 
2102 	return time_us;
2103 }
2104 
2105 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2106 		      uint32_t remainder, uint16_t lazy, uint8_t force,
2107 		      void *param)
2108 {
2109 	static memq_link_t link;
2110 	static struct mayfly mfy = {0, 0, &link, NULL, lll_adv_prepare};
2111 	static struct lll_prepare_param p;
2112 	struct ll_adv_set *adv = param;
2113 	uint32_t random_delay;
2114 	struct lll_adv *lll;
2115 	uint32_t ret;
2116 	uint8_t ref;
2117 
2118 	DEBUG_RADIO_PREPARE_A(1);
2119 
2120 	lll = &adv->lll;
2121 
2122 	if (IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) ||
2123 	    (lazy != TICKER_LAZY_MUST_EXPIRE)) {
2124 		/* Increment prepare reference count */
2125 		ref = ull_ref_inc(&adv->ull);
2126 		LL_ASSERT(ref);
2127 
2128 		/* Append timing parameters */
2129 		p.ticks_at_expire = ticks_at_expire;
2130 		p.remainder = remainder;
2131 		p.lazy = lazy;
2132 		p.force = force;
2133 		p.param = lll;
2134 		mfy.param = &p;
2135 
2136 		/* Kick LLL prepare */
2137 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2138 				     TICKER_USER_ID_LLL, 0, &mfy);
2139 		LL_ASSERT(!ret);
2140 
2141 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2142 		if (adv->lll.aux) {
2143 			ull_adv_aux_offset_get(adv);
2144 		}
2145 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2146 
2147 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2148 		adv->ticks_at_expire = ticks_at_expire;
2149 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2150 	}
2151 
2152 	/* Apply adv random delay */
2153 #if defined(CONFIG_BT_PERIPHERAL)
2154 	if (!lll->is_hdcd)
2155 #endif /* CONFIG_BT_PERIPHERAL */
2156 	{
2157 		/* Apply random delay in range [0..ULL_ADV_RANDOM_DELAY] */
2158 		random_delay = ticker_update_rand(adv, ULL_ADV_RANDOM_DELAY, 0, 0);
2159 
2160 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2161 		adv->event_counter += (lazy + 1);
2162 
2163 		if (adv->ticks_remain_duration) {
2164 			uint32_t ticks_interval =
2165 				HAL_TICKER_US_TO_TICKS((uint64_t)adv->interval *
2166 						       ADV_INT_UNIT_US);
2167 			uint32_t ticks_elapsed = ticks_interval * (lazy + 1) +
2168 						 ticks_drift;
2169 
2170 			if (adv->ticks_remain_duration > ticks_elapsed) {
2171 				adv->ticks_remain_duration -= ticks_elapsed;
2172 			} else {
2173 				adv->ticks_remain_duration = ticks_interval;
2174 			}
2175 		}
2176 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2177 	}
2178 
2179 	DEBUG_RADIO_PREPARE_A(1);
2180 }
2181 
2182 static void ticker_update_op_cb(uint32_t status, void *param)
2183 {
2184 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
2185 		  param == ull_disable_mark_get());
2186 }
2187 
2188 #if defined(CONFIG_BT_PERIPHERAL)
2189 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2190 			   uint32_t remainder, uint16_t lazy, uint8_t force,
2191 			   void *param)
2192 {
2193 	struct ll_adv_set *adv = param;
2194 	uint8_t handle;
2195 	uint32_t ret;
2196 
2197 	handle = ull_adv_handle_get(adv);
2198 	LL_ASSERT(handle < BT_CTLR_ADV_SET);
2199 
2200 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2201 			  TICKER_ID_ADV_BASE + handle,
2202 			  ticker_stop_op_cb, adv);
2203 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2204 		  (ret == TICKER_STATUS_BUSY));
2205 }
2206 
2207 static void ticker_stop_op_cb(uint32_t status, void *param)
2208 {
2209 	static memq_link_t link;
2210 	static struct mayfly mfy = {0, 0, &link, NULL, adv_disable};
2211 	uint32_t ret;
2212 
2213 	/* Ignore if race between thread and ULL */
2214 	if (status != TICKER_STATUS_SUCCESS) {
2215 		/* TODO: detect race */
2216 
2217 		return;
2218 	}
2219 
2220 #if defined(CONFIG_BT_HCI_MESH_EXT)
2221 	/* FIXME: why is this here for Mesh commands? */
2222 	if (param) {
2223 		return;
2224 	}
2225 #endif /* CONFIG_BT_HCI_MESH_EXT */
2226 
2227 	/* Check if any pending LLL events that need to be aborted */
2228 	mfy.param = param;
2229 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2230 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2231 	LL_ASSERT(!ret);
2232 }
2233 
2234 static void adv_disable(void *param)
2235 {
2236 	struct ll_adv_set *adv;
2237 	struct ull_hdr *hdr;
2238 
2239 	/* Check ref count to determine if any pending LLL events in pipeline */
2240 	adv = param;
2241 	hdr = &adv->ull;
2242 	if (ull_ref_get(hdr)) {
2243 		static memq_link_t link;
2244 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2245 		uint32_t ret;
2246 
2247 		mfy.param = &adv->lll;
2248 
2249 		/* Setup disabled callback to be called when ref count
2250 		 * returns to zero.
2251 		 */
2252 		LL_ASSERT(!hdr->disabled_cb);
2253 		hdr->disabled_param = mfy.param;
2254 		hdr->disabled_cb = disabled_cb;
2255 
2256 		/* Trigger LLL disable */
2257 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2258 				     TICKER_USER_ID_LLL, 0, &mfy);
2259 		LL_ASSERT(!ret);
2260 	} else {
2261 		/* No pending LLL events */
2262 		disabled_cb(&adv->lll);
2263 	}
2264 }
2265 
2266 static void disabled_cb(void *param)
2267 {
2268 	struct ll_adv_set *adv;
2269 	struct node_rx_pdu *rx;
2270 	struct node_rx_cc *cc;
2271 	memq_link_t *link;
2272 
2273 	adv = ((struct lll_hdr *)param)->parent;
2274 
2275 	LL_ASSERT(adv->link_cc_free);
2276 	link = adv->link_cc_free;
2277 	adv->link_cc_free = NULL;
2278 
2279 	LL_ASSERT(adv->node_rx_cc_free);
2280 	rx = adv->node_rx_cc_free;
2281 	adv->node_rx_cc_free = NULL;
2282 
2283 	rx->hdr.type = NODE_RX_TYPE_CONNECTION;
2284 	rx->hdr.handle = 0xffff;
2285 
2286 	cc = (void *)rx->pdu;
2287 	memset(cc, 0x00, sizeof(struct node_rx_cc));
2288 	cc->status = BT_HCI_ERR_ADV_TIMEOUT;
2289 
2290 	rx->hdr.rx_ftr.param = param;
2291 
2292 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2293 	if (adv->lll.node_rx_adv_term) {
2294 		uint8_t handle;
2295 
2296 		ll_rx_put(link, rx);
2297 
2298 		handle = ull_adv_handle_get(adv);
2299 		LL_ASSERT(handle < BT_CTLR_ADV_SET);
2300 
2301 		rx = (void *)adv->lll.node_rx_adv_term;
2302 		rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2303 		rx->hdr.handle = handle;
2304 		rx->hdr.rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2305 		rx->hdr.rx_ftr.param_adv_term.conn_handle = 0xffff;
2306 		rx->hdr.rx_ftr.param_adv_term.num_events = adv->event_counter;
2307 
2308 		link = rx->hdr.link;
2309 	}
2310 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2311 
2312 	ll_rx_put(link, rx);
2313 	ll_rx_sched();
2314 }
2315 
2316 static void conn_release(struct ll_adv_set *adv)
2317 {
2318 	struct lll_conn *lll = adv->lll.conn;
2319 	memq_link_t *link;
2320 
2321 	LL_ASSERT(!lll->link_tx_free);
2322 	link = memq_deinit(&lll->memq_tx.head, &lll->memq_tx.tail);
2323 	LL_ASSERT(link);
2324 	lll->link_tx_free = link;
2325 
2326 	ll_conn_release(lll->hdr.parent);
2327 	adv->lll.conn = NULL;
2328 
2329 	ll_rx_release(adv->node_rx_cc_free);
2330 	adv->node_rx_cc_free = NULL;
2331 	ll_rx_link_release(adv->link_cc_free);
2332 	adv->link_cc_free = NULL;
2333 }
2334 #endif /* CONFIG_BT_PERIPHERAL */
2335 
2336 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2337 static void adv_max_events_duration_set(struct ll_adv_set *adv,
2338 					uint16_t duration,
2339 					uint8_t max_ext_adv_evts)
2340 {
2341 	adv->event_counter = 0;
2342 	adv->max_events = max_ext_adv_evts;
2343 	adv->ticks_remain_duration =
2344 		HAL_TICKER_US_TO_TICKS((uint64_t)duration * 10 * USEC_PER_MSEC);
2345 }
2346 
2347 static void ticker_stop_aux_op_cb(uint32_t status, void *param)
2348 {
2349 	static memq_link_t link;
2350 	static struct mayfly mfy = {0, 0, &link, NULL, aux_disable};
2351 	uint32_t ret;
2352 
2353 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2354 
2355 	/* Check if any pending LLL events that need to be aborted */
2356 	mfy.param = param;
2357 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2358 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2359 	LL_ASSERT(!ret);
2360 }
2361 
2362 static void aux_disable(void *param)
2363 {
2364 	struct lll_adv_aux *lll_aux;
2365 	struct ll_adv_aux_set *aux;
2366 	struct ll_adv_set *adv;
2367 	struct ull_hdr *hdr;
2368 
2369 	adv = param;
2370 	lll_aux = adv->lll.aux;
2371 	aux = HDR_LLL2ULL(lll_aux);
2372 	hdr = &aux->ull;
2373 	if (ull_ref_get(hdr)) {
2374 		LL_ASSERT(!hdr->disabled_cb);
2375 		hdr->disabled_param = adv;
2376 		hdr->disabled_cb = aux_disabled_cb;
2377 	} else {
2378 		aux_disabled_cb(param);
2379 	}
2380 }
2381 
2382 static void aux_disabled_cb(void *param)
2383 {
2384 	uint8_t handle;
2385 	uint32_t ret;
2386 
2387 	handle = ull_adv_handle_get(param);
2388 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2389 			  TICKER_USER_ID_ULL_HIGH,
2390 			  (TICKER_ID_ADV_BASE + handle),
2391 			  ticker_stop_ext_op_cb, param);
2392 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2393 		  (ret == TICKER_STATUS_BUSY));
2394 }
2395 
2396 static void ticker_stop_ext_op_cb(uint32_t status, void *param)
2397 {
2398 	static memq_link_t link;
2399 	static struct mayfly mfy = {0, 0, &link, NULL, ext_disable};
2400 	uint32_t ret;
2401 
2402 	/* Ignore if race between thread and ULL */
2403 	if (status != TICKER_STATUS_SUCCESS) {
2404 		/* TODO: detect race */
2405 
2406 		return;
2407 	}
2408 
2409 	/* Check if any pending LLL events that need to be aborted */
2410 	mfy.param = param;
2411 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2412 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2413 	LL_ASSERT(!ret);
2414 }
2415 
2416 static void ext_disable(void *param)
2417 {
2418 	struct ll_adv_set *adv;
2419 	struct ull_hdr *hdr;
2420 
2421 	/* Check ref count to determine if any pending LLL events in pipeline */
2422 	adv = param;
2423 	hdr = &adv->ull;
2424 	if (ull_ref_get(hdr)) {
2425 		static memq_link_t link;
2426 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2427 		uint32_t ret;
2428 
2429 		mfy.param = &adv->lll;
2430 
2431 		/* Setup disabled callback to be called when ref count
2432 		 * returns to zero.
2433 		 */
2434 		LL_ASSERT(!hdr->disabled_cb);
2435 		hdr->disabled_param = mfy.param;
2436 		hdr->disabled_cb = ext_disabled_cb;
2437 
2438 		/* Trigger LLL disable */
2439 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2440 				     TICKER_USER_ID_LLL, 0, &mfy);
2441 		LL_ASSERT(!ret);
2442 	} else {
2443 		/* No pending LLL events */
2444 		ext_disabled_cb(&adv->lll);
2445 	}
2446 }
2447 
2448 static void ext_disabled_cb(void *param)
2449 {
2450 	struct lll_adv *lll = (void *)param;
2451 	struct node_rx_hdr *rx_hdr = (void *)lll->node_rx_adv_term;
2452 
2453 	/* Under race condition, if a connection has been established then
2454 	 * node_rx is already utilized to send terminate event on connection */
2455 	if (!rx_hdr) {
2456 		return;
2457 	}
2458 
2459 	/* NOTE: parameters are already populated on disable, just enqueue here
2460 	 */
2461 	ll_rx_put(rx_hdr->link, rx_hdr);
2462 	ll_rx_sched();
2463 }
2464 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2465 
2466 static inline uint8_t disable(uint8_t handle)
2467 {
2468 	uint32_t volatile ret_cb;
2469 	struct ll_adv_set *adv;
2470 	void *mark;
2471 	uint32_t ret;
2472 
2473 	adv = ull_adv_is_enabled_get(handle);
2474 	if (!adv) {
2475 		/* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
2476 		 * Disabling advertising when it is already disabled has no
2477 		 * effect.
2478 		 */
2479 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT)) {
2480 			return 0;
2481 		}
2482 
2483 		return BT_HCI_ERR_CMD_DISALLOWED;
2484 	}
2485 
2486 #if defined(CONFIG_BT_PERIPHERAL)
2487 	if (adv->lll.conn) {
2488 		/* Indicate to LLL that a cancellation is requested */
2489 		adv->lll.conn->periph.cancelled = 1U;
2490 		cpu_dmb();
2491 
2492 		/* Check if a connection was initiated (connection
2493 		 * establishment race between LLL and ULL).
2494 		 */
2495 		if (unlikely(adv->lll.conn->periph.initiated)) {
2496 			return BT_HCI_ERR_CMD_DISALLOWED;
2497 		}
2498 	}
2499 #endif /* CONFIG_BT_PERIPHERAL */
2500 
2501 	mark = ull_disable_mark(adv);
2502 	LL_ASSERT(mark == adv);
2503 
2504 #if defined(CONFIG_BT_PERIPHERAL)
2505 	if (adv->lll.is_hdcd) {
2506 		ret_cb = TICKER_STATUS_BUSY;
2507 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2508 				  TICKER_USER_ID_THREAD, TICKER_ID_ADV_STOP,
2509 				  ull_ticker_status_give, (void *)&ret_cb);
2510 		ret = ull_ticker_status_take(ret, &ret_cb);
2511 		if (ret) {
2512 			mark = ull_disable_unmark(adv);
2513 			LL_ASSERT(mark == adv);
2514 
2515 			return BT_HCI_ERR_CMD_DISALLOWED;
2516 		}
2517 	}
2518 #endif /* CONFIG_BT_PERIPHERAL */
2519 
2520 	ret_cb = TICKER_STATUS_BUSY;
2521 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
2522 			  TICKER_ID_ADV_BASE + handle,
2523 			  ull_ticker_status_give, (void *)&ret_cb);
2524 	ret = ull_ticker_status_take(ret, &ret_cb);
2525 	if (ret) {
2526 		mark = ull_disable_unmark(adv);
2527 		LL_ASSERT(mark == adv);
2528 
2529 		return BT_HCI_ERR_CMD_DISALLOWED;
2530 	}
2531 
2532 	ret = ull_disable(&adv->lll);
2533 	LL_ASSERT(!ret);
2534 
2535 	mark = ull_disable_unmark(adv);
2536 	LL_ASSERT(mark == adv);
2537 
2538 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2539 	struct lll_adv_aux *lll_aux = adv->lll.aux;
2540 
2541 	if (lll_aux) {
2542 		struct ll_adv_aux_set *aux;
2543 		uint8_t err;
2544 
2545 		aux = HDR_LLL2ULL(lll_aux);
2546 
2547 		err = ull_adv_aux_stop(aux);
2548 		if (err) {
2549 			return err;
2550 		}
2551 	}
2552 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2553 
2554 #if defined(CONFIG_BT_PERIPHERAL)
2555 	if (adv->lll.conn) {
2556 		conn_release(adv);
2557 	}
2558 #endif /* CONFIG_BT_PERIPHERAL */
2559 
2560 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2561 	struct lll_adv *lll = &adv->lll;
2562 
2563 	if (lll->node_rx_adv_term) {
2564 		struct node_rx_pdu *node_rx_adv_term =
2565 			(void *)lll->node_rx_adv_term;
2566 
2567 		lll->node_rx_adv_term = NULL;
2568 
2569 		ll_rx_link_release(node_rx_adv_term->hdr.link);
2570 		ll_rx_release(node_rx_adv_term);
2571 	}
2572 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2573 
2574 	adv->is_enabled = 0U;
2575 
2576 #if defined(CONFIG_BT_CTLR_PRIVACY)
2577 	if (IS_ENABLED(CONFIG_BT_OBSERVER) && !ull_scan_is_enabled_get(0)) {
2578 		ull_filter_adv_scan_state_cb(0);
2579 	}
2580 #endif /* CONFIG_BT_CTLR_PRIVACY */
2581 
2582 	return 0;
2583 }
2584 
2585 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
2586 					struct pdu_adv *pdu,
2587 					struct pdu_adv *pdu_scan)
2588 {
2589 	struct pdu_adv *pdu_adv_to_update;
2590 	struct lll_adv *lll;
2591 
2592 	pdu_adv_to_update = NULL;
2593 	lll = &adv->lll;
2594 
2595 	if (0) {
2596 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2597 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2598 		struct pdu_adv_com_ext_adv *pri_com_hdr;
2599 		struct pdu_adv_ext_hdr pri_hdr_flags;
2600 		struct pdu_adv_ext_hdr *pri_hdr;
2601 
2602 		pri_com_hdr = (void *)&pdu->adv_ext_ind;
2603 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
2604 		if (pri_com_hdr->ext_hdr_len) {
2605 			pri_hdr_flags = *pri_hdr;
2606 		} else {
2607 			*(uint8_t *)&pri_hdr_flags = 0U;
2608 		}
2609 
2610 		if (pri_com_hdr->adv_mode & BT_HCI_LE_ADV_PROP_SCAN) {
2611 			struct pdu_adv *sr = lll_adv_scan_rsp_peek(lll);
2612 
2613 			if (!sr->len) {
2614 				return BT_HCI_ERR_CMD_DISALLOWED;
2615 			}
2616 		}
2617 
2618 		/* AdvA, fill here at enable */
2619 		if (pri_hdr_flags.adv_addr) {
2620 			pdu_adv_to_update = pdu;
2621 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2622 		} else if (pri_hdr_flags.aux_ptr) {
2623 			struct pdu_adv_com_ext_adv *sec_com_hdr;
2624 			struct pdu_adv_ext_hdr sec_hdr_flags;
2625 			struct pdu_adv_ext_hdr *sec_hdr;
2626 			struct pdu_adv *sec_pdu;
2627 
2628 			sec_pdu = lll_adv_aux_data_peek(lll->aux);
2629 
2630 			sec_com_hdr = (void *)&sec_pdu->adv_ext_ind;
2631 			sec_hdr = (void *)sec_com_hdr->ext_hdr_adv_data;
2632 			if (sec_com_hdr->ext_hdr_len) {
2633 				sec_hdr_flags = *sec_hdr;
2634 			} else {
2635 				*(uint8_t *)&sec_hdr_flags = 0U;
2636 			}
2637 
2638 			if (sec_hdr_flags.adv_addr) {
2639 				pdu_adv_to_update = sec_pdu;
2640 			}
2641 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2642 		}
2643 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2644 	} else {
2645 		pdu_adv_to_update = pdu;
2646 	}
2647 
2648 	if (pdu_adv_to_update) {
2649 		const uint8_t *adv_addr;
2650 
2651 		adv_addr = ull_adv_pdu_update_addrs(adv, pdu_adv_to_update);
2652 
2653 		/* In case the local IRK was not set or no match was
2654 		 * found the fallback address was used instead, check
2655 		 * that a valid address has been set.
2656 		 */
2657 		if (pdu_adv_to_update->tx_addr &&
2658 		    !mem_nz((void *)adv_addr, BDADDR_SIZE)) {
2659 			return BT_HCI_ERR_INVALID_PARAM;
2660 		}
2661 
2662 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2663 		/* Do not update scan response for extended non-scannable since
2664 		 * there may be no scan response set.
2665 		 */
2666 		if ((pdu->type != PDU_ADV_TYPE_EXT_IND) ||
2667 		    (pdu->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_SCAN)) {
2668 #else
2669 		if (1) {
2670 #endif
2671 			ull_adv_pdu_update_addrs(adv, pdu_scan);
2672 		}
2673 
2674 	}
2675 
2676 	return 0;
2677 }
2678 
2679 static inline uint8_t *adv_pdu_adva_get(struct pdu_adv *pdu)
2680 {
2681 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2682 	struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
2683 	struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
2684 	struct pdu_adv_ext_hdr hdr_flags;
2685 
2686 	if (com_hdr->ext_hdr_len) {
2687 		hdr_flags = *hdr;
2688 	} else {
2689 		*(uint8_t *)&hdr_flags = 0U;
2690 	}
2691 
2692 	/* All extended PDUs have AdvA at the same offset in common header */
2693 	if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2694 		LL_ASSERT(hdr_flags.adv_addr);
2695 
2696 		return &com_hdr->ext_hdr_adv_data[1];
2697 	}
2698 #endif
2699 
2700 	/* All legacy PDUs have AdvA at the same offset */
2701 	return pdu->adv_ind.addr;
2702 }
2703 
2704 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
2705 {
2706 #if defined(CONFIG_BT_CTLR_PRIVACY)
2707 	const uint8_t *rpa = ull_filter_adva_get(adv);
2708 #else
2709 	const uint8_t *rpa = NULL;
2710 #endif
2711 	const uint8_t *own_id_addr;
2712 	const uint8_t *tx_addr;
2713 	uint8_t *adv_addr;
2714 
2715 	if (!rpa || IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)) {
2716 		if (0) {
2717 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2718 		} else if (ll_adv_cmds_is_ext() && pdu->tx_addr) {
2719 			own_id_addr = adv->rnd_addr;
2720 #endif
2721 		} else {
2722 			own_id_addr = ll_addr_get(pdu->tx_addr);
2723 		}
2724 	}
2725 
2726 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
2727 	(void)memcpy(adv->own_id_addr, own_id_addr, BDADDR_SIZE);
2728 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
2729 
2730 	if (rpa) {
2731 		pdu->tx_addr = 1;
2732 		tx_addr = rpa;
2733 	} else {
2734 		tx_addr = own_id_addr;
2735 	}
2736 
2737 	adv_addr = adv_pdu_adva_get(pdu);
2738 	memcpy(adv_addr, tx_addr, BDADDR_SIZE);
2739 
2740 	return adv_addr;
2741 }
2742 
2743 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
2744 {
2745 #if defined(CONFIG_BT_CTLR_PRIVACY)
2746 	const uint8_t *rx_addr = NULL;
2747 	uint8_t *tgt_addr;
2748 
2749 	rx_addr = ull_filter_tgta_get(adv);
2750 	if (rx_addr) {
2751 		pdu->rx_addr = 1;
2752 
2753 		/* TargetA always follows AdvA in all PDUs */
2754 		tgt_addr = adv_pdu_adva_get(pdu) + BDADDR_SIZE;
2755 		memcpy(tgt_addr, rx_addr, BDADDR_SIZE);
2756 	}
2757 #endif
2758 
2759 	/* NOTE: identity TargetA is set when configuring advertising set, no
2760 	 *       need to update if LL Privacy is not supported.
2761 	 */
2762 }
2763 
2764 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type)
2765 {
2766 	/* TODO: Add support for extended advertising PDU if needed */
2767 	pdu->type = pdu_type;
2768 	pdu->rfu = 0;
2769 	pdu->chan_sel = 0;
2770 	pdu->tx_addr = 0;
2771 	pdu->rx_addr = 0;
2772 	pdu->len = BDADDR_SIZE;
2773 }
2774 
2775 static void init_set(struct ll_adv_set *adv)
2776 {
2777 	adv->interval = BT_LE_ADV_INTERVAL_DEFAULT;
2778 #if defined(CONFIG_BT_CTLR_PRIVACY)
2779 	adv->own_addr_type = BT_ADDR_LE_PUBLIC;
2780 #endif /* CONFIG_BT_CTLR_PRIVACY */
2781 	adv->lll.chan_map = BT_LE_ADV_CHAN_MAP_ALL;
2782 	adv->lll.filter_policy = BT_LE_ADV_FP_NO_FILTER;
2783 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2784 	adv->delay_remain = ULL_ADV_RANDOM_DELAY;
2785 #endif /* ONFIG_BT_CTLR_JIT_SCHEDULING */
2786 
2787 	init_pdu(lll_adv_data_peek(&ll_adv[0].lll), PDU_ADV_TYPE_ADV_IND);
2788 	init_pdu(lll_adv_scan_rsp_peek(&ll_adv[0].lll), PDU_ADV_TYPE_SCAN_RSP);
2789 }
2790