1 /*
2  * Copyright (c) 2016-2021 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <string.h>
9 
10 #include <zephyr/kernel.h>
11 #include <soc.h>
12 #include <zephyr/bluetooth/hci_types.h>
13 #include <zephyr/sys/byteorder.h>
14 
15 #include "hal/cpu.h"
16 #include "hal/ccm.h"
17 #include "hal/radio.h"
18 #include "hal/ticker.h"
19 #include "hal/cntr.h"
20 
21 #include "util/util.h"
22 #include "util/mem.h"
23 #include "util/memq.h"
24 #include "util/mayfly.h"
25 #include "util/dbuf.h"
26 
27 #include "ticker/ticker.h"
28 
29 #include "pdu_df.h"
30 #include "lll/pdu_vendor.h"
31 #include "pdu.h"
32 
33 #include "lll.h"
34 #include "lll_clock.h"
35 #include "lll/lll_vendor.h"
36 #include "lll/lll_adv_types.h"
37 #include "lll_adv.h"
38 #include "lll/lll_adv_pdu.h"
39 #include "lll_scan.h"
40 #include "lll/lll_df_types.h"
41 #include "lll_conn.h"
42 #include "lll_filter.h"
43 #include "lll_conn_iso.h"
44 
45 #include "ll_sw/ull_tx_queue.h"
46 
47 #include "ull_adv_types.h"
48 #include "ull_scan_types.h"
49 #include "ull_conn_types.h"
50 #include "ull_filter.h"
51 
52 #include "ull_adv_internal.h"
53 #include "ull_scan_internal.h"
54 #include "ull_conn_internal.h"
55 #include "ull_internal.h"
56 
57 #include "ll.h"
58 #include "ll_feat.h"
59 #include "ll_settings.h"
60 
61 #include "ll_sw/isoal.h"
62 #include "ll_sw/ull_iso_types.h"
63 #include "ll_sw/ull_conn_iso_types.h"
64 
65 #include "ll_sw/ull_llcp.h"
66 
67 
68 #include "hal/debug.h"
69 
70 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle);
71 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv);
72 
73 static int init_reset(void);
74 static inline struct ll_adv_set *is_disabled_get(uint8_t handle);
75 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
76 			     uint8_t adv_chn_cnt, uint8_t phy,
77 			     uint8_t phy_flags);
78 
79 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
80 		      uint32_t remainder, uint16_t lazy, uint8_t force,
81 		      void *param);
82 static void ticker_update_op_cb(uint32_t status, void *param);
83 
84 #if defined(CONFIG_BT_PERIPHERAL)
85 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
86 			   uint32_t remainder, uint16_t lazy, uint8_t force,
87 			   void *param);
88 static void ticker_stop_op_cb(uint32_t status, void *param);
89 static void adv_disable(void *param);
90 static void disabled_cb(void *param);
91 static void conn_release(struct ll_adv_set *adv);
92 #endif /* CONFIG_BT_PERIPHERAL */
93 
94 #if defined(CONFIG_BT_CTLR_ADV_EXT)
95 static uint8_t leg_adv_type_get(uint8_t evt_prop);
96 static void adv_max_events_duration_set(struct ll_adv_set *adv,
97 					uint16_t duration,
98 					uint8_t max_ext_adv_evts);
99 static void ticker_stop_aux_op_cb(uint32_t status, void *param);
100 static void aux_disable(void *param);
101 static void aux_disabled_cb(void *param);
102 static void ticker_stop_ext_op_cb(uint32_t status, void *param);
103 static void ext_disable(void *param);
104 static void ext_disabled_cb(void *param);
105 #endif /* CONFIG_BT_CTLR_ADV_EXT */
106 
107 static inline uint8_t disable(uint8_t handle);
108 
109 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
110 					struct pdu_adv *pdu,
111 					struct pdu_adv *pdu_scan);
112 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
113 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
114 
115 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type);
116 static void init_set(struct ll_adv_set *adv);
117 
118 static struct ll_adv_set ll_adv[BT_CTLR_ADV_SET];
119 
120 static uint8_t ticker_update_req;
121 static uint8_t ticker_update_ack;
122 
123 #if defined(CONFIG_BT_TICKER_EXT)
124 static struct ticker_ext ll_adv_ticker_ext[BT_CTLR_ADV_SET];
125 #endif /* CONFIG_BT_TICKER_EXT */
126 
127 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_CTLR_ADV_EXT)
128 static uint8_t ll_adv_cmds;
129 
ll_adv_cmds_set(uint8_t adv_cmds)130 int ll_adv_cmds_set(uint8_t adv_cmds)
131 {
132 	if (!ll_adv_cmds) {
133 		ll_adv_cmds = adv_cmds;
134 
135 		if (adv_cmds == LL_ADV_CMDS_LEGACY) {
136 			struct ll_adv_set *adv = &ll_adv[0];
137 
138 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
139 			adv->hci_handle = 0;
140 #endif
141 			adv->is_created = 1;
142 		}
143 	}
144 
145 	if (ll_adv_cmds != adv_cmds) {
146 		return -EINVAL;
147 	}
148 
149 	return 0;
150 }
151 
ll_adv_cmds_is_ext(void)152 int ll_adv_cmds_is_ext(void)
153 {
154 	return ll_adv_cmds == LL_ADV_CMDS_EXT;
155 }
156 #endif
157 
158 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_set_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)159 uint8_t ll_adv_set_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
160 {
161 	struct ll_adv_set *adv;
162 	uint8_t idx;
163 
164 	adv =  &ll_adv[0];
165 
166 	for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
167 		if (adv->is_created && (adv->hci_handle == hci_handle)) {
168 			*handle = idx;
169 			return 0;
170 		}
171 	}
172 
173 	return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
174 }
175 
ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle,uint8_t * handle)176 uint8_t ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle, uint8_t *handle)
177 {
178 	struct ll_adv_set *adv, *adv_empty;
179 	uint8_t idx;
180 
181 	adv =  &ll_adv[0];
182 	adv_empty = NULL;
183 
184 	for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
185 		if (adv->is_created) {
186 			if (adv->hci_handle == hci_handle) {
187 				*handle = idx;
188 				return 0;
189 			}
190 		} else if (!adv_empty) {
191 			adv_empty = adv;
192 		}
193 	}
194 
195 	if (adv_empty) {
196 		adv_empty->hci_handle = hci_handle;
197 		*handle = ull_adv_handle_get(adv_empty);
198 		return 0;
199 	}
200 
201 	return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
202 }
203 
ll_adv_set_hci_handle_get(uint8_t handle)204 uint8_t ll_adv_set_hci_handle_get(uint8_t handle)
205 {
206 	struct ll_adv_set *adv;
207 
208 	adv = ull_adv_set_get(handle);
209 	LL_ASSERT(adv && adv->is_created);
210 
211 	return adv->hci_handle;
212 }
213 #endif
214 
215 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_adv_params_set(uint8_t handle,uint16_t evt_prop,uint32_t interval,uint8_t adv_type,uint8_t own_addr_type,uint8_t direct_addr_type,uint8_t const * const direct_addr,uint8_t chan_map,uint8_t filter_policy,uint8_t * const tx_pwr,uint8_t phy_p,uint8_t skip,uint8_t phy_s,uint8_t sid,uint8_t sreq)216 uint8_t ll_adv_params_set(uint8_t handle, uint16_t evt_prop, uint32_t interval,
217 		       uint8_t adv_type, uint8_t own_addr_type,
218 		       uint8_t direct_addr_type, uint8_t const *const direct_addr,
219 		       uint8_t chan_map, uint8_t filter_policy,
220 		       uint8_t *const tx_pwr, uint8_t phy_p, uint8_t skip,
221 		       uint8_t phy_s, uint8_t sid, uint8_t sreq)
222 {
223 	uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
224 				     PDU_ADV_TYPE_DIRECT_IND,
225 				     PDU_ADV_TYPE_SCAN_IND,
226 				     PDU_ADV_TYPE_NONCONN_IND,
227 				     PDU_ADV_TYPE_DIRECT_IND,
228 				     PDU_ADV_TYPE_EXT_IND};
229 	uint8_t is_pdu_type_changed = 0;
230 	uint8_t is_new_set;
231 #else /* !CONFIG_BT_CTLR_ADV_EXT */
232 uint8_t ll_adv_params_set(uint16_t interval, uint8_t adv_type,
233 		       uint8_t own_addr_type, uint8_t direct_addr_type,
234 		       uint8_t const *const direct_addr, uint8_t chan_map,
235 		       uint8_t filter_policy)
236 {
237 	uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
238 				     PDU_ADV_TYPE_DIRECT_IND,
239 				     PDU_ADV_TYPE_SCAN_IND,
240 				     PDU_ADV_TYPE_NONCONN_IND,
241 				     PDU_ADV_TYPE_DIRECT_IND};
242 	uint8_t const handle = 0;
243 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
244 
245 	struct ll_adv_set *adv;
246 	uint8_t pdu_type_prev;
247 	struct pdu_adv *pdu;
248 
249 	adv = is_disabled_get(handle);
250 	if (!adv) {
251 		return BT_HCI_ERR_CMD_DISALLOWED;
252 	}
253 
254 #if defined(CONFIG_BT_CTLR_ADV_EXT)
255 	/* TODO: check and fail (0x12, invalid HCI cmd param) if invalid
256 	 * evt_prop bits.
257 	 */
258 
259 	/* Extended adv param set command used */
260 	if (adv_type == PDU_ADV_TYPE_EXT_IND) {
261 		/* legacy */
262 		if (evt_prop & BT_HCI_LE_ADV_PROP_LEGACY) {
263 			if (evt_prop & BT_HCI_LE_ADV_PROP_ANON) {
264 				return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
265 			}
266 
267 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
268 			/* disallow changing to legacy advertising while
269 			 * periodic advertising enabled.
270 			 */
271 			if (adv->lll.sync) {
272 				const struct ll_adv_sync_set *sync;
273 
274 				sync = HDR_LLL2ULL(adv->lll.sync);
275 				if (sync->is_enabled) {
276 					return BT_HCI_ERR_INVALID_PARAM;
277 				}
278 			}
279 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
280 
281 			adv_type = leg_adv_type_get(evt_prop);
282 
283 			adv->lll.phy_p = PHY_1M;
284 		} else {
285 			/* - Connectable and scannable not allowed;
286 			 * - High duty cycle directed connectable not allowed
287 			 */
288 			if (((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
289 					 BT_HCI_LE_ADV_PROP_SCAN)) ==
290 			     (BT_HCI_LE_ADV_PROP_CONN |
291 			      BT_HCI_LE_ADV_PROP_SCAN)) ||
292 			    (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN)) {
293 				return BT_HCI_ERR_INVALID_PARAM;
294 			}
295 
296 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
297 			if (adv->lll.sync &&
298 			    (evt_prop & (BT_HCI_LE_ADV_PROP_ANON |
299 					 BT_HCI_LE_ADV_PROP_CONN |
300 					 BT_HCI_LE_ADV_PROP_SCAN))) {
301 				const struct ll_adv_sync_set *sync;
302 
303 				sync = HDR_LLL2ULL(adv->lll.sync);
304 				if (sync->is_enabled) {
305 					return BT_HCI_ERR_INVALID_PARAM;
306 				}
307 			}
308 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
309 
310 #if (CONFIG_BT_CTLR_ADV_AUX_SET == 0)
311 			/* Connectable or scannable requires aux */
312 			if (evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
313 					BT_HCI_LE_ADV_PROP_SCAN)) {
314 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
315 			}
316 #endif
317 
318 			adv_type = 0x05; /* index of PDU_ADV_TYPE_EXT_IND in */
319 					 /* pdu_adv_type[] */
320 
321 			/* Fallback to 1M if upper layer did not check HCI
322 			 * parameters for Coded PHY support.
323 			 * This fallback allows *testing* extended advertising
324 			 * using 1M using a upper layer that is requesting Coded
325 			 * PHY on Controllers without Coded PHY support.
326 			 */
327 			if (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
328 			    (phy_p == PHY_CODED)) {
329 				phy_p = PHY_1M;
330 			}
331 
332 			adv->lll.phy_p = phy_p;
333 			adv->lll.phy_flags = PHY_FLAGS_S8;
334 		}
335 	} else {
336 		adv->lll.phy_p = PHY_1M;
337 	}
338 
339 	is_new_set = !adv->is_created;
340 	adv->is_created = 1;
341 	adv->is_ad_data_cmplt = 1U;
342 #endif /* CONFIG_BT_CTLR_ADV_EXT */
343 
344 	/* remember parameters so that set adv/scan data and adv enable
345 	 * interface can correctly update adv/scan data in the
346 	 * double buffer between caller and controller context.
347 	 */
348 	/* Set interval for Undirected or Low Duty Cycle Directed Advertising */
349 	if (adv_type != 0x01) {
350 		adv->interval = interval;
351 	} else {
352 		adv->interval = 0;
353 	}
354 	adv->lll.chan_map = chan_map;
355 	adv->lll.filter_policy = filter_policy;
356 
357 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) && defined(CONFIG_BT_CTLR_ADV_EXT)
358 	adv->lll.scan_req_notify = sreq;
359 #endif
360 
361 	/* update the "current" primary adv PDU */
362 	pdu = lll_adv_data_peek(&adv->lll);
363 	pdu_type_prev = pdu->type;
364 #if defined(CONFIG_BT_CTLR_ADV_EXT)
365 	if (is_new_set) {
366 		is_pdu_type_changed = 1;
367 
368 		pdu->type = pdu_adv_type[adv_type];
369 		if (pdu->type != PDU_ADV_TYPE_EXT_IND) {
370 			pdu->len = 0U;
371 		}
372 	/* check if new PDU type is different that past one */
373 	} else if (pdu->type != pdu_adv_type[adv_type]) {
374 		is_pdu_type_changed = 1;
375 
376 		/* If old PDU was extended advertising PDU, release
377 		 * auxiliary and periodic advertising sets.
378 		 */
379 		if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
380 			struct lll_adv_aux *lll_aux = adv->lll.aux;
381 
382 			if (lll_aux) {
383 				struct ll_adv_aux_set *aux;
384 
385 				/* FIXME: copy AD data from auxiliary channel
386 				 * PDU.
387 				 */
388 				pdu->len = 0;
389 
390 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
391 				if (adv->lll.sync) {
392 					struct ll_adv_sync_set *sync;
393 
394 					sync = HDR_LLL2ULL(adv->lll.sync);
395 					adv->lll.sync = NULL;
396 
397 					ull_adv_sync_release(sync);
398 				}
399 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
400 
401 				/* Release auxiliary channel set */
402 				aux = HDR_LLL2ULL(lll_aux);
403 				adv->lll.aux = NULL;
404 
405 				ull_adv_aux_release(aux);
406 			} else {
407 				/* No previous AD data in auxiliary channel
408 				 * PDU.
409 				 */
410 				pdu->len = 0;
411 			}
412 		}
413 
414 		pdu->type = pdu_adv_type[adv_type];
415 	}
416 
417 #else /* !CONFIG_BT_CTLR_ADV_EXT */
418 	pdu->type = pdu_adv_type[adv_type];
419 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
420 
421 	pdu->rfu = 0;
422 
423 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2) &&
424 	    ((pdu->type == PDU_ADV_TYPE_ADV_IND) ||
425 	     (pdu->type == PDU_ADV_TYPE_DIRECT_IND))) {
426 		pdu->chan_sel = 1;
427 	} else {
428 		pdu->chan_sel = 0;
429 	}
430 
431 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
432 	/* Backup the legacy AD Data if switching to legacy directed advertising
433 	 * or to Extended Advertising.
434 	 */
435 	if (((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
436 	     (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
437 	      (pdu->type == PDU_ADV_TYPE_EXT_IND))) &&
438 	    (pdu_type_prev != PDU_ADV_TYPE_DIRECT_IND) &&
439 	    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
440 	     (pdu_type_prev != PDU_ADV_TYPE_EXT_IND))) {
441 		if (pdu->len == 0U) {
442 			adv->ad_data_backup.len = 0U;
443 		} else {
444 			LL_ASSERT(pdu->len >=
445 				  offsetof(struct pdu_adv_adv_ind, data));
446 
447 			adv->ad_data_backup.len = pdu->len -
448 				offsetof(struct pdu_adv_adv_ind, data);
449 			memcpy(adv->ad_data_backup.data, pdu->adv_ind.data,
450 			       adv->ad_data_backup.len);
451 		}
452 	}
453 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
454 
455 #if defined(CONFIG_BT_CTLR_PRIVACY)
456 	adv->own_addr_type = own_addr_type;
457 	if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
458 	    adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) {
459 		adv->peer_addr_type = direct_addr_type;
460 		memcpy(&adv->peer_addr, direct_addr, BDADDR_SIZE);
461 	}
462 #endif /* CONFIG_BT_CTLR_PRIVACY */
463 
464 	if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
465 		pdu->tx_addr = own_addr_type & 0x1;
466 		pdu->rx_addr = direct_addr_type;
467 		memcpy(&pdu->direct_ind.tgt_addr[0], direct_addr, BDADDR_SIZE);
468 		pdu->len = sizeof(struct pdu_adv_direct_ind);
469 
470 #if defined(CONFIG_BT_CTLR_ADV_EXT)
471 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
472 		struct pdu_adv_ext_hdr *pri_hdr, pri_hdr_prev;
473 		struct pdu_adv_com_ext_adv *pri_com_hdr;
474 		uint8_t *pri_dptr_prev, *pri_dptr;
475 		uint8_t len;
476 
477 		pri_com_hdr = (void *)&pdu->adv_ext_ind;
478 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
479 		pri_dptr = pri_hdr->data;
480 		pri_dptr_prev = pri_dptr;
481 
482 		/* No ACAD and no AdvData */
483 		pri_com_hdr->adv_mode = evt_prop & 0x03;
484 
485 		/* Zero-init header flags */
486 		if (is_pdu_type_changed) {
487 			*(uint8_t *)&pri_hdr_prev = 0U;
488 		} else {
489 			pri_hdr_prev = *pri_hdr;
490 		}
491 		*(uint8_t *)pri_hdr = 0U;
492 
493 		/* AdvA flag */
494 		if (pri_hdr_prev.adv_addr) {
495 			pri_dptr_prev += BDADDR_SIZE;
496 		}
497 		if (!pri_com_hdr->adv_mode &&
498 		    !(evt_prop & BT_HCI_LE_ADV_PROP_ANON) &&
499 		    (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
500 			/* TODO: optional on 1M with Aux Ptr */
501 			pri_hdr->adv_addr = 1;
502 
503 			/* NOTE: AdvA is filled at enable */
504 			pdu->tx_addr = own_addr_type & 0x1;
505 			pri_dptr += BDADDR_SIZE;
506 		} else {
507 			pdu->tx_addr = 0;
508 		}
509 
510 		/* TargetA flag */
511 		if (pri_hdr_prev.tgt_addr) {
512 			pri_dptr_prev += BDADDR_SIZE;
513 		}
514 		/* TargetA flag in primary channel PDU only for directed */
515 		if (evt_prop & BT_HCI_LE_ADV_PROP_DIRECT) {
516 			pri_hdr->tgt_addr = 1;
517 			pdu->rx_addr = direct_addr_type;
518 			pri_dptr += BDADDR_SIZE;
519 		} else {
520 			pdu->rx_addr = 0;
521 		}
522 
523 		/* No CTEInfo flag in primary channel PDU */
524 
525 		/* ADI flag */
526 		if (pri_hdr_prev.adi) {
527 			pri_dptr_prev += sizeof(struct pdu_adv_adi);
528 
529 			pri_hdr->adi = 1;
530 			pri_dptr += sizeof(struct pdu_adv_adi);
531 		}
532 
533 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
534 		/* AuxPtr flag */
535 		if (pri_hdr_prev.aux_ptr) {
536 			pri_dptr_prev += sizeof(struct pdu_adv_aux_ptr);
537 		}
538 		/* Need aux for connectable or scannable extended advertising */
539 		if (pri_hdr_prev.aux_ptr ||
540 		    ((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
541 				  BT_HCI_LE_ADV_PROP_SCAN)))) {
542 			pri_hdr->aux_ptr = 1;
543 			pri_dptr += sizeof(struct pdu_adv_aux_ptr);
544 		}
545 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
546 
547 		/* No SyncInfo flag in primary channel PDU */
548 
549 		/* Tx Power flag */
550 		if (pri_hdr_prev.tx_pwr) {
551 			pri_dptr_prev += sizeof(uint8_t);
552 		}
553 		/* C1, Tx Power is optional on the LE 1M PHY, and reserved for
554 		 * for future use on the LE Coded PHY.
555 		 */
556 		if ((evt_prop & BT_HCI_LE_ADV_PROP_TX_POWER) &&
557 		    (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
558 			pri_hdr->tx_pwr = 1;
559 			pri_dptr += sizeof(uint8_t);
560 		}
561 
562 		/* Calc primary PDU len */
563 		len = ull_adv_aux_hdr_len_calc(pri_com_hdr, &pri_dptr);
564 		ull_adv_aux_hdr_len_fill(pri_com_hdr, len);
565 
566 		/* Set PDU length */
567 		pdu->len = len;
568 
569 		/* Start filling primary PDU payload based on flags */
570 
571 		/* No AdvData in primary channel PDU */
572 
573 		/* No ACAD in primary channel PDU */
574 
575 		/* Tx Power */
576 		if (pri_hdr_prev.tx_pwr) {
577 			pri_dptr_prev -= sizeof(uint8_t);
578 		}
579 		if (pri_hdr->tx_pwr) {
580 			uint8_t _tx_pwr;
581 
582 			_tx_pwr = 0;
583 			if (tx_pwr) {
584 				if (*tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) {
585 					_tx_pwr = *tx_pwr;
586 				} else {
587 					*tx_pwr = _tx_pwr;
588 				}
589 			}
590 
591 			pri_dptr -= sizeof(uint8_t);
592 			*pri_dptr = _tx_pwr;
593 		}
594 
595 		/* No SyncInfo in primary channel PDU */
596 
597 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
598 		/* Fallback to 1M if upper layer did not check HCI
599 		 * parameters for Coded PHY support.
600 		 * This fallback allows *testing* extended advertising
601 		 * using 1M using a upper layer that is requesting Coded
602 		 * PHY on Controllers without Coded PHY support.
603 		 */
604 		if (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
605 		    (phy_s == PHY_CODED)) {
606 			phy_s = PHY_1M;
607 		}
608 
609 		adv->lll.phy_s = phy_s;
610 
611 		/* AuxPtr */
612 		if (pri_hdr_prev.aux_ptr) {
613 			pri_dptr_prev -= sizeof(struct pdu_adv_aux_ptr);
614 		}
615 		if (pri_hdr->aux_ptr) {
616 			pri_dptr -= sizeof(struct pdu_adv_aux_ptr);
617 			ull_adv_aux_ptr_fill((void *)pri_dptr, 0U,
618 					     adv->lll.phy_s);
619 		}
620 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
621 
622 		/* ADI */
623 		if (pri_hdr_prev.adi) {
624 			pri_dptr_prev -= sizeof(struct pdu_adv_adi);
625 		}
626 		if (pri_hdr->adi) {
627 			struct pdu_adv_adi *adi;
628 
629 			pri_dptr -= sizeof(struct pdu_adv_adi);
630 
631 			/* NOTE: memmove shall handle overlapping buffers */
632 			memmove(pri_dptr, pri_dptr_prev,
633 				sizeof(struct pdu_adv_adi));
634 
635 			adi = (void *)pri_dptr;
636 			PDU_ADV_ADI_SID_SET(adi, sid);
637 		}
638 		adv->sid = sid;
639 
640 		/* No CTEInfo field in primary channel PDU */
641 
642 		/* TargetA */
643 		if (pri_hdr_prev.tgt_addr) {
644 			pri_dptr_prev -= BDADDR_SIZE;
645 		}
646 		if (pri_hdr->tgt_addr) {
647 			pri_dptr -= BDADDR_SIZE;
648 			/* NOTE: RPA will be updated on enable, if needed */
649 			memcpy(pri_dptr, direct_addr, BDADDR_SIZE);
650 		}
651 
652 		/* NOTE: AdvA, filled at enable and RPA timeout */
653 
654 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
655 		/* Make sure aux is created if we have AuxPtr */
656 		if (pri_hdr->aux_ptr) {
657 			uint8_t pri_idx, sec_idx;
658 			uint8_t err;
659 
660 			err = ull_adv_aux_hdr_set_clear(adv,
661 						ULL_ADV_PDU_HDR_FIELD_ADVA,
662 						0U, &own_addr_type,
663 						&pri_idx, &sec_idx);
664 			if (err) {
665 				/* TODO: cleanup? */
666 				return err;
667 			}
668 
669 			lll_adv_aux_data_enqueue(adv->lll.aux, sec_idx);
670 			lll_adv_data_enqueue(&adv->lll, pri_idx);
671 		}
672 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
673 
674 #endif /* CONFIG_BT_CTLR_ADV_EXT */
675 
676 	} else if (pdu->len == 0) {
677 		pdu->tx_addr = own_addr_type & 0x1;
678 		pdu->rx_addr = 0;
679 		pdu->len = BDADDR_SIZE;
680 	} else {
681 
682 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
683 		if (((pdu_type_prev == PDU_ADV_TYPE_DIRECT_IND) ||
684 		     (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
685 		      (pdu_type_prev == PDU_ADV_TYPE_EXT_IND))) &&
686 		    (pdu->type != PDU_ADV_TYPE_DIRECT_IND) &&
687 		    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
688 		     (pdu->type != PDU_ADV_TYPE_EXT_IND))) {
689 			/* Restore the legacy AD Data */
690 			memcpy(pdu->adv_ind.data, adv->ad_data_backup.data,
691 			       adv->ad_data_backup.len);
692 			pdu->len = offsetof(struct pdu_adv_adv_ind, data) +
693 				   adv->ad_data_backup.len;
694 		}
695 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
696 
697 		pdu->tx_addr = own_addr_type & 0x1;
698 		pdu->rx_addr = 0;
699 	}
700 
701 	/* Initialize LLL header with parent pointer so that ULL contexts
702 	 * can be referenced in functions having the LLL context reference.
703 	 */
704 	lll_hdr_init(&adv->lll, adv);
705 
706 	if (0) {
707 #if defined(CONFIG_BT_CTLR_ADV_EXT)
708 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
709 		/* Make sure new extended advertising set is initialized with no
710 		 * scan response data. Existing sets keep whatever data was set.
711 		 */
712 		if (is_pdu_type_changed) {
713 			uint8_t err;
714 
715 			/* Make sure the scan response PDU is allocated from the right pool */
716 			(void)lll_adv_data_release(&adv->lll.scan_rsp);
717 			lll_adv_data_reset(&adv->lll.scan_rsp);
718 			err = lll_adv_aux_data_init(&adv->lll.scan_rsp);
719 			if (err) {
720 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
721 			}
722 
723 			pdu = lll_adv_scan_rsp_peek(&adv->lll);
724 			pdu->type = PDU_ADV_TYPE_AUX_SCAN_RSP;
725 			pdu->len = 0;
726 		}
727 #endif /* CONFIG_BT_CTLR_ADV_EXT */
728 	} else {
729 		pdu = lll_adv_scan_rsp_peek(&adv->lll);
730 
731 #if defined(CONFIG_BT_CTLR_ADV_EXT)
732 		if (is_pdu_type_changed || !pdu) {
733 			uint8_t err;
734 
735 			/* Make sure the scan response PDU is allocated from the right pool */
736 			(void)lll_adv_data_release(&adv->lll.scan_rsp);
737 			lll_adv_data_reset(&adv->lll.scan_rsp);
738 			err = lll_adv_data_init(&adv->lll.scan_rsp);
739 			if (err) {
740 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
741 			}
742 
743 			pdu = lll_adv_scan_rsp_peek(&adv->lll);
744 		}
745 #endif /* CONFIG_BT_CTLR_ADV_EXT */
746 
747 		/* Make sure legacy advertising set has scan response data
748 		 * initialized.
749 		 */
750 		pdu->type = PDU_ADV_TYPE_SCAN_RSP;
751 		pdu->rfu = 0;
752 		pdu->chan_sel = 0;
753 		pdu->tx_addr = own_addr_type & 0x1;
754 		pdu->rx_addr = 0;
755 		if (pdu->len == 0) {
756 			pdu->len = BDADDR_SIZE;
757 		}
758 	}
759 
760 	return 0;
761 }
762 
763 #if defined(CONFIG_BT_CTLR_ADV_EXT)
764 uint8_t ll_adv_data_set(uint8_t handle, uint8_t len, uint8_t const *const data)
765 {
766 #else /* !CONFIG_BT_CTLR_ADV_EXT */
767 uint8_t ll_adv_data_set(uint8_t len, uint8_t const *const data)
768 {
769 	const uint8_t handle = 0;
770 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
771 	struct ll_adv_set *adv;
772 
773 	adv = ull_adv_set_get(handle);
774 	if (!adv) {
775 		return BT_HCI_ERR_CMD_DISALLOWED;
776 	}
777 
778 	return ull_adv_data_set(adv, len, data);
779 }
780 
781 #if defined(CONFIG_BT_CTLR_ADV_EXT)
782 uint8_t ll_adv_scan_rsp_set(uint8_t handle, uint8_t len,
783 			    uint8_t const *const data)
784 {
785 #else /* !CONFIG_BT_CTLR_ADV_EXT */
786 uint8_t ll_adv_scan_rsp_set(uint8_t len, uint8_t const *const data)
787 {
788 	const uint8_t handle = 0;
789 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
790 	struct ll_adv_set *adv;
791 
792 	adv = ull_adv_set_get(handle);
793 	if (!adv) {
794 		return BT_HCI_ERR_CMD_DISALLOWED;
795 	}
796 
797 	return ull_scan_rsp_set(adv, len, data);
798 }
799 
800 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
801 #if defined(CONFIG_BT_HCI_MESH_EXT)
802 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
803 		   uint8_t at_anchor, uint32_t ticks_anchor, uint8_t retry,
804 		   uint8_t scan_window, uint8_t scan_delay)
805 {
806 #else /* !CONFIG_BT_HCI_MESH_EXT */
807 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
808 		   uint16_t duration, uint8_t max_ext_adv_evts)
809 {
810 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
811 	struct ll_adv_sync_set *sync = NULL;
812 	uint8_t sync_is_started = 0U;
813 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
814 	struct ll_adv_aux_set *aux = NULL;
815 	uint8_t aux_is_started = 0U;
816 	uint32_t ticks_anchor;
817 #endif /* !CONFIG_BT_HCI_MESH_EXT */
818 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
819 uint8_t ll_adv_enable(uint8_t enable)
820 {
821 	uint8_t const handle = 0;
822 	uint32_t ticks_anchor;
823 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
824 	uint32_t ticks_slot_overhead;
825 	uint32_t ticks_slot_offset;
826 	uint32_t volatile ret_cb;
827 	struct pdu_adv *pdu_scan;
828 	struct pdu_adv *pdu_adv;
829 	struct ll_adv_set *adv;
830 	struct lll_adv *lll;
831 	uint8_t hci_err;
832 	uint32_t ret;
833 
834 	if (!enable) {
835 		return disable(handle);
836 	}
837 
838 	adv = is_disabled_get(handle);
839 	if (!adv) {
840 		/* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
841 		 * Enabling advertising when it is already enabled can cause the
842 		 * random address to change. As the current implementation does
843 		 * does not update RPAs on every advertising enable, only on
844 		 * `rpa_timeout_ms` timeout, we are not going to implement the
845 		 * "can cause the random address to change" for legacy
846 		 * advertisements.
847 		 */
848 
849 		/* If HCI LE Set Extended Advertising Enable command is sent
850 		 * again for an advertising set while that set is enabled, the
851 		 * timer used for duration and the number of events counter are
852 		 * reset and any change to the random address shall take effect.
853 		 */
854 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT) ||
855 		    IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
856 #if defined(CONFIG_BT_CTLR_ADV_EXT)
857 			if (ll_adv_cmds_is_ext()) {
858 				enum node_rx_type volatile *type;
859 
860 				adv = ull_adv_is_enabled_get(handle);
861 				if (!adv) {
862 					/* This should not be happening as
863 					 * is_disabled_get failed.
864 					 */
865 					return BT_HCI_ERR_CMD_DISALLOWED;
866 				}
867 
868 				/* Change random address in the primary or
869 				 * auxiliary PDU as necessary.
870 				 */
871 				lll = &adv->lll;
872 				pdu_adv = lll_adv_data_peek(lll);
873 				pdu_scan = lll_adv_scan_rsp_peek(lll);
874 				hci_err = adv_scan_pdu_addr_update(adv,
875 								   pdu_adv,
876 								   pdu_scan);
877 				if (hci_err) {
878 					return hci_err;
879 				}
880 
881 				if (!adv->lll.node_rx_adv_term) {
882 					/* This should not be happening,
883 					 * adv->is_enabled would be 0 if
884 					 * node_rx_adv_term is released back to
885 					 * pool.
886 					 */
887 					return BT_HCI_ERR_CMD_DISALLOWED;
888 				}
889 
890 				/* Check advertising not terminated */
891 				type = &adv->lll.node_rx_adv_term->hdr.type;
892 				if (*type == NODE_RX_TYPE_NONE) {
893 					/* Reset event counter, update duration,
894 					 * and max events
895 					 */
896 					adv_max_events_duration_set(adv,
897 						duration, max_ext_adv_evts);
898 				}
899 
900 				/* Check the counter reset did not race with
901 				 * advertising terminated.
902 				 */
903 				if (*type != NODE_RX_TYPE_NONE) {
904 					/* Race with advertising terminated */
905 					return BT_HCI_ERR_CMD_DISALLOWED;
906 				}
907 			}
908 #endif /* CONFIG_BT_CTLR_ADV_EXT */
909 
910 			return 0;
911 		}
912 
913 		/* Fail on being strict as a legacy controller, valid only under
914 		 * Bluetooth Specification v4.x.
915 		 * Bluetooth Specification v5.0 and above shall not fail to
916 		 * enable already enabled advertising.
917 		 */
918 		return BT_HCI_ERR_CMD_DISALLOWED;
919 	}
920 
921 	lll = &adv->lll;
922 
923 #if defined(CONFIG_BT_CTLR_PRIVACY)
924 	lll->rl_idx = FILTER_IDX_NONE;
925 
926 	/* Prepare filter accept list and optionally resolving list */
927 	ull_filter_adv_update(lll->filter_policy);
928 
929 	if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
930 	    adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) {
931 		/* Look up the resolving list */
932 		lll->rl_idx = ull_filter_rl_find(adv->peer_addr_type,
933 						 adv->peer_addr, NULL);
934 
935 		if (lll->rl_idx != FILTER_IDX_NONE) {
936 			/* Generate RPAs if required */
937 			ull_filter_rpa_update(false);
938 		}
939 	}
940 #endif /* !CONFIG_BT_CTLR_PRIVACY */
941 
942 	pdu_adv = lll_adv_data_peek(lll);
943 	pdu_scan = lll_adv_scan_rsp_peek(lll);
944 
945 #if defined(CONFIG_BT_CTLR_ADV_EXT)
946 	if (!pdu_scan) {
947 		uint8_t err;
948 
949 		if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
950 			/* Should never happen */
951 			return BT_HCI_ERR_CMD_DISALLOWED;
952 		}
953 
954 		err = lll_adv_data_init(&adv->lll.scan_rsp);
955 		if (err) {
956 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
957 		}
958 
959 		pdu_scan = lll_adv_scan_rsp_peek(lll);
960 		init_pdu(pdu_scan, PDU_ADV_TYPE_SCAN_RSP);
961 	}
962 #endif /* CONFIG_BT_CTLR_ADV_EXT */
963 
964 	/* Update Bluetooth Device address in advertising and scan response
965 	 * PDUs.
966 	 */
967 	hci_err = adv_scan_pdu_addr_update(adv, pdu_adv, pdu_scan);
968 	if (hci_err) {
969 		return hci_err;
970 	}
971 
972 #if defined(CONFIG_BT_HCI_MESH_EXT)
973 	if (scan_delay) {
974 		if (ull_scan_is_enabled(0)) {
975 			return BT_HCI_ERR_CMD_DISALLOWED;
976 		}
977 
978 		lll->is_mesh = 1;
979 	}
980 #endif /* CONFIG_BT_HCI_MESH_EXT */
981 
982 #if defined(CONFIG_BT_PERIPHERAL)
983 	/* prepare connectable advertising */
984 	if ((pdu_adv->type == PDU_ADV_TYPE_ADV_IND) ||
985 	    (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND) ||
986 #if defined(CONFIG_BT_CTLR_ADV_EXT)
987 	    ((pdu_adv->type == PDU_ADV_TYPE_EXT_IND) &&
988 	     (pdu_adv->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_CONN))
989 #else
990 	    0
991 #endif
992 	     ) {
993 		struct node_rx_pdu *node_rx;
994 		struct ll_conn *conn;
995 		struct lll_conn *conn_lll;
996 		void *link;
997 		int err;
998 
999 		if (lll->conn) {
1000 			return BT_HCI_ERR_CMD_DISALLOWED;
1001 		}
1002 
1003 		link = ll_rx_link_alloc();
1004 		if (!link) {
1005 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1006 		}
1007 
1008 		node_rx = ll_rx_alloc();
1009 		if (!node_rx) {
1010 			ll_rx_link_release(link);
1011 
1012 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1013 		}
1014 
1015 		conn = ll_conn_acquire();
1016 		if (!conn) {
1017 			ll_rx_release(node_rx);
1018 			ll_rx_link_release(link);
1019 
1020 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1021 		}
1022 
1023 		conn_lll = &conn->lll;
1024 		conn_lll->handle = 0xFFFF;
1025 
1026 		if (!conn_lll->link_tx_free) {
1027 			conn_lll->link_tx_free = &conn_lll->link_tx;
1028 		}
1029 
1030 		memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head,
1031 			  &conn_lll->memq_tx.tail);
1032 		conn_lll->link_tx_free = NULL;
1033 
1034 		conn_lll->packet_tx_head_len = 0;
1035 		conn_lll->packet_tx_head_offset = 0;
1036 
1037 		conn_lll->sn = 0;
1038 		conn_lll->nesn = 0;
1039 		conn_lll->empty = 0;
1040 
1041 #if defined(CONFIG_BT_CTLR_PHY)
1042 		if (0) {
1043 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1044 		} else if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
1045 			conn_lll->phy_tx = lll->phy_s;
1046 			conn_lll->phy_tx_time = lll->phy_s;
1047 			conn_lll->phy_flags = lll->phy_flags;
1048 			conn_lll->phy_rx = lll->phy_s;
1049 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1050 		} else {
1051 			conn_lll->phy_tx = PHY_1M;
1052 			conn_lll->phy_tx_time = PHY_1M;
1053 			conn_lll->phy_flags = PHY_FLAGS_S8;
1054 			conn_lll->phy_rx = PHY_1M;
1055 		}
1056 #endif /* CONFIG_BT_CTLR_PHY */
1057 
1058 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1059 		conn_lll->rssi_latest = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1060 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1061 		conn_lll->rssi_reported = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1062 		conn_lll->rssi_sample_count = 0;
1063 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1064 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1065 
1066 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
1067 		conn_lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
1068 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
1069 
1070 		/* FIXME: BEGIN: Move to ULL? */
1071 		conn_lll->role = 1;
1072 		conn_lll->periph.initiated = 0;
1073 		conn_lll->periph.cancelled = 0;
1074 		conn_lll->data_chan_sel = 0;
1075 		conn_lll->data_chan_use = 0;
1076 		conn_lll->event_counter = 0;
1077 
1078 		conn_lll->latency_prepare = 0;
1079 		conn_lll->latency_event = 0;
1080 		conn_lll->periph.latency_enabled = 0;
1081 		conn_lll->periph.window_widening_prepare_us = 0;
1082 		conn_lll->periph.window_widening_event_us = 0;
1083 		conn_lll->periph.window_size_prepare_us = 0;
1084 		/* FIXME: END: Move to ULL? */
1085 #if defined(CONFIG_BT_CTLR_CONN_META)
1086 		memset(&conn_lll->conn_meta, 0, sizeof(conn_lll->conn_meta));
1087 #endif /* CONFIG_BT_CTLR_CONN_META */
1088 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1089 		conn_lll->df_rx_cfg.is_initialized = 0U;
1090 		conn_lll->df_rx_cfg.hdr.elem_size = sizeof(struct lll_df_conn_rx_params);
1091 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1092 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1093 		conn_lll->df_tx_cfg.is_initialized = 0U;
1094 		conn_lll->df_tx_cfg.cte_rsp_en = 0U;
1095 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1096 		conn->connect_expire = 6;
1097 		conn->supervision_expire = 0;
1098 
1099 #if defined(CONFIG_BT_CTLR_LE_PING)
1100 		conn->apto_expire = 0U;
1101 		conn->appto_expire = 0U;
1102 #endif
1103 
1104 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
1105 		conn->own_id_addr_type = BT_ADDR_LE_NONE->type;
1106 		(void)memcpy(conn->own_id_addr, BT_ADDR_LE_NONE->a.val,
1107 			     sizeof(conn->own_id_addr));
1108 		conn->peer_id_addr_type = BT_ADDR_LE_NONE->type;
1109 		(void)memcpy(conn->peer_id_addr, BT_ADDR_LE_NONE->a.val,
1110 			     sizeof(conn->peer_id_addr));
1111 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1112 
1113 		/* Re-initialize the control procedure data structures */
1114 		ull_llcp_init(conn);
1115 
1116 		conn->llcp_terminate.reason_final = 0;
1117 		/* NOTE: use allocated link for generating dedicated
1118 		 * terminate ind rx node
1119 		 */
1120 		conn->llcp_terminate.node_rx.rx.hdr.link = link;
1121 
1122 #if defined(CONFIG_BT_CTLR_PHY)
1123 		conn->phy_pref_tx = ull_conn_default_phy_tx_get();
1124 		conn->phy_pref_rx = ull_conn_default_phy_rx_get();
1125 #endif /* CONFIG_BT_CTLR_PHY */
1126 
1127 #if defined(CONFIG_BT_CTLR_LE_ENC)
1128 		conn->pause_rx_data = 0U;
1129 #endif /* CONFIG_BT_CTLR_LE_ENC */
1130 
1131 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1132 		uint8_t phy_in_use = PHY_1M;
1133 
1134 
1135 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1136 		if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
1137 			phy_in_use = lll->phy_s;
1138 		}
1139 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1140 
1141 		ull_dle_init(conn, phy_in_use);
1142 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1143 
1144 		/* Re-initialize the Tx Q */
1145 		ull_tx_q_init(&conn->tx_q);
1146 
1147 		/* NOTE: using same link as supplied for terminate ind */
1148 		adv->link_cc_free = link;
1149 		adv->node_rx_cc_free = node_rx;
1150 		lll->conn = conn_lll;
1151 
1152 		ull_hdr_init(&conn->ull);
1153 		lll_hdr_init(&conn->lll, conn);
1154 
1155 		/* wait for stable clocks */
1156 		err = lll_clock_wait();
1157 		if (err) {
1158 			conn_release(adv);
1159 
1160 			return BT_HCI_ERR_HW_FAILURE;
1161 		}
1162 	}
1163 #endif /* CONFIG_BT_PERIPHERAL */
1164 
1165 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1166 	if (ll_adv_cmds_is_ext()) {
1167 		struct node_rx_pdu *node_rx_adv_term;
1168 		void *link_adv_term;
1169 
1170 		/* The alloc here used for ext adv termination event */
1171 		link_adv_term = ll_rx_link_alloc();
1172 		if (!link_adv_term) {
1173 #if defined(CONFIG_BT_PERIPHERAL)
1174 			if (adv->lll.conn) {
1175 				conn_release(adv);
1176 			}
1177 #endif /* CONFIG_BT_PERIPHERAL */
1178 
1179 			/* TODO: figure out right return value */
1180 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1181 		}
1182 
1183 		node_rx_adv_term = ll_rx_alloc();
1184 		if (!node_rx_adv_term) {
1185 #if defined(CONFIG_BT_PERIPHERAL)
1186 			if (adv->lll.conn) {
1187 				conn_release(adv);
1188 			}
1189 #endif /* CONFIG_BT_PERIPHERAL */
1190 
1191 			ll_rx_link_release(link_adv_term);
1192 
1193 			/* TODO: figure out right return value */
1194 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1195 		}
1196 
1197 		node_rx_adv_term->hdr.type = NODE_RX_TYPE_NONE;
1198 
1199 		node_rx_adv_term->hdr.link = (void *)link_adv_term;
1200 		adv->lll.node_rx_adv_term = (void *)node_rx_adv_term;
1201 
1202 		if (0) {
1203 #if defined(CONFIG_BT_PERIPHERAL)
1204 		} else if (lll->is_hdcd) {
1205 			adv_max_events_duration_set(adv, 0U, 0U);
1206 #endif /* CONFIG_BT_PERIPHERAL */
1207 		} else {
1208 			adv_max_events_duration_set(adv, duration,
1209 						    max_ext_adv_evts);
1210 		}
1211 	} else {
1212 		adv->lll.node_rx_adv_term = NULL;
1213 		adv_max_events_duration_set(adv, 0U, 0U);
1214 	}
1215 
1216 	const uint8_t phy = lll->phy_p;
1217 	const uint8_t phy_flags = lll->phy_flags;
1218 
1219 	adv->event_counter = 0U;
1220 #else
1221 	/* Legacy ADV only supports LE_1M PHY */
1222 	const uint8_t phy = PHY_1M;
1223 	const uint8_t phy_flags = 0U;
1224 #endif
1225 
1226 	/* For now we adv on all channels enabled in channel map */
1227 	uint8_t ch_map = lll->chan_map;
1228 	const uint8_t adv_chn_cnt = util_ones_count_get(&ch_map, sizeof(ch_map));
1229 
1230 	if (adv_chn_cnt == 0) {
1231 		/* ADV needs at least one channel */
1232 		goto failure_cleanup;
1233 	}
1234 
1235 	/* Calculate the advertising time reservation */
1236 	uint16_t time_us = adv_time_get(pdu_adv, pdu_scan, adv_chn_cnt, phy,
1237 					phy_flags);
1238 
1239 	uint16_t interval = adv->interval;
1240 #if defined(CONFIG_BT_HCI_MESH_EXT)
1241 	if (lll->is_mesh) {
1242 		uint16_t interval_min_us;
1243 
1244 		_radio.advertiser.retry = retry;
1245 		_radio.advertiser.scan_delay_ms = scan_delay;
1246 		_radio.advertiser.scan_window_ms = scan_window;
1247 
1248 		interval_min_us = time_us +
1249 				  (scan_delay + scan_window) * USEC_PER_MSEC;
1250 		if ((interval * SCAN_INT_UNIT_US) < interval_min_us) {
1251 			interval = DIV_ROUND_UP(interval_min_us,
1252 						    SCAN_INT_UNIT_US);
1253 		}
1254 
1255 		/* passive scanning */
1256 		_radio.scanner.type = 0;
1257 
1258 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1259 		/* TODO: Coded PHY support */
1260 		_radio.scanner.phy = 0;
1261 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1262 
1263 #if defined(CONFIG_BT_CTLR_PRIVACY)
1264 		/* TODO: Privacy support */
1265 		_radio.scanner.rpa_gen = 0;
1266 		_radio.scanner.rl_idx = rl_idx;
1267 #endif /* CONFIG_BT_CTLR_PRIVACY */
1268 
1269 		_radio.scanner.filter_policy = filter_policy;
1270 	}
1271 #endif /* CONFIG_BT_HCI_MESH_EXT */
1272 
1273 	/* Initialize ULL context before radio event scheduling is started. */
1274 	ull_hdr_init(&adv->ull);
1275 
1276 	/* TODO: active_to_start feature port */
1277 	adv->ull.ticks_active_to_start = 0;
1278 	adv->ull.ticks_prepare_to_start =
1279 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1280 	adv->ull.ticks_preempt_to_start =
1281 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1282 	adv->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
1283 
1284 	ticks_slot_offset = MAX(adv->ull.ticks_active_to_start,
1285 				adv->ull.ticks_prepare_to_start);
1286 
1287 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1288 		ticks_slot_overhead = ticks_slot_offset;
1289 	} else {
1290 		ticks_slot_overhead = 0;
1291 	}
1292 
1293 #if !defined(CONFIG_BT_HCI_MESH_EXT)
1294 	ticks_anchor = ticker_ticks_now_get();
1295 	ticks_anchor += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1296 
1297 #else /* CONFIG_BT_HCI_MESH_EXT */
1298 	if (!at_anchor) {
1299 		ticks_anchor = ticker_ticks_now_get();
1300 	}
1301 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1302 
1303 	/* High Duty Cycle Directed Advertising if interval is 0. */
1304 #if defined(CONFIG_BT_PERIPHERAL)
1305 	lll->is_hdcd = !interval && (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND);
1306 	if (lll->is_hdcd) {
1307 		ret_cb = TICKER_STATUS_BUSY;
1308 
1309 #if defined(CONFIG_BT_TICKER_EXT)
1310 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1311 		ll_adv_ticker_ext[handle].ticks_slot_window = 0;
1312 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
1313 
1314 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1315 		ll_adv_ticker_ext[handle].expire_info_id = TICKER_NULL;
1316 		ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1317 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1318 
1319 		ret = ticker_start_ext(
1320 #else /* !CONFIG_BT_TICKER_EXT */
1321 		ret = ticker_start(
1322 #endif /* !CONFIG_BT_TICKER_EXT */
1323 				   TICKER_INSTANCE_ID_CTLR,
1324 				   TICKER_USER_ID_THREAD,
1325 				   (TICKER_ID_ADV_BASE + handle),
1326 				   ticks_anchor, 0,
1327 				   (adv->ull.ticks_slot + ticks_slot_overhead),
1328 				   TICKER_NULL_REMAINDER, TICKER_NULL_LAZY,
1329 				   (adv->ull.ticks_slot + ticks_slot_overhead),
1330 				   ticker_cb, adv,
1331 				   ull_ticker_status_give, (void *)&ret_cb
1332 #if defined(CONFIG_BT_TICKER_EXT)
1333 				   ,
1334 				   &ll_adv_ticker_ext[handle]
1335 #endif /* CONFIG_BT_TICKER_EXT */
1336 				   );
1337 		ret = ull_ticker_status_take(ret, &ret_cb);
1338 		if (ret != TICKER_STATUS_SUCCESS) {
1339 			goto failure_cleanup;
1340 		}
1341 
1342 		ret_cb = TICKER_STATUS_BUSY;
1343 		ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1344 				   TICKER_USER_ID_THREAD,
1345 				   TICKER_ID_ADV_STOP, ticks_anchor,
1346 				   HAL_TICKER_US_TO_TICKS(ticks_slot_offset +
1347 							  (1280 * 1000)),
1348 				   TICKER_NULL_PERIOD, TICKER_NULL_REMAINDER,
1349 				   TICKER_NULL_LAZY, TICKER_NULL_SLOT,
1350 				   ticker_stop_cb, adv,
1351 				   ull_ticker_status_give, (void *)&ret_cb);
1352 	} else
1353 #endif /* CONFIG_BT_PERIPHERAL */
1354 	{
1355 		const uint32_t ticks_slot = adv->ull.ticks_slot +
1356 					 ticks_slot_overhead;
1357 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1358 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1359 		uint8_t pri_idx, sec_idx;
1360 
1361 		/* Add sync_info into auxiliary PDU */
1362 		if (lll->sync) {
1363 			sync = HDR_LLL2ULL(lll->sync);
1364 			if (sync->is_enabled && !sync->is_started) {
1365 				struct pdu_adv_sync_info *sync_info;
1366 				uint8_t value[1 + sizeof(sync_info)];
1367 				uint8_t err;
1368 
1369 				err = ull_adv_aux_hdr_set_clear(adv,
1370 						ULL_ADV_PDU_HDR_FIELD_SYNC_INFO,
1371 						0U, value, &pri_idx, &sec_idx);
1372 				if (err) {
1373 					return err;
1374 				}
1375 
1376 				/* First byte in the length-value encoded
1377 				 * parameter is size of sync_info structure,
1378 				 * followed by pointer to sync_info in the
1379 				 * PDU.
1380 				 */
1381 				memcpy(&sync_info, &value[1], sizeof(sync_info));
1382 				ull_adv_sync_info_fill(sync, sync_info);
1383 			} else {
1384 				/* Do not start periodic advertising */
1385 				sync = NULL;
1386 			}
1387 		}
1388 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1389 
1390 		if (lll->aux) {
1391 			struct lll_adv_aux *lll_aux = lll->aux;
1392 			uint32_t ticks_slot_overhead_aux;
1393 			uint32_t ticks_anchor_aux;
1394 
1395 			aux = HDR_LLL2ULL(lll_aux);
1396 
1397 			/* Schedule auxiliary PDU after primary channel
1398 			 * PDUs.
1399 			 * Reduce the MAFS offset by the Event Overhead
1400 			 * so that actual radio air packet start as
1401 			 * close as possible after the MAFS gap.
1402 			 * Add 2 ticks offset as compensation towards
1403 			 * the +/- 1 tick ticker scheduling jitter due
1404 			 * to accumulation of remainder to maintain
1405 			 * average ticker interval.
1406 			 */
1407 			ticks_anchor_aux =
1408 				ticks_anchor + ticks_slot +
1409 				HAL_TICKER_US_TO_TICKS(
1410 					MAX(EVENT_MAFS_US,
1411 					    EVENT_OVERHEAD_START_US) -
1412 					EVENT_OVERHEAD_START_US +
1413 					(EVENT_TICKER_RES_MARGIN_US << 1));
1414 
1415 			ticks_slot_overhead_aux =
1416 				ull_adv_aux_evt_init(aux, &ticks_anchor_aux);
1417 
1418 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1419 			/* Start periodic advertising if enabled and not already
1420 			 * started.
1421 			 */
1422 			if (sync) {
1423 				uint32_t ticks_slot_overhead2;
1424 				uint32_t ticks_slot_aux;
1425 
1426 #if defined(CONFIG_BT_CTLR_ADV_RESERVE_MAX)
1427 				uint32_t us_slot;
1428 
1429 				us_slot = ull_adv_aux_time_get(aux,
1430 						PDU_AC_PAYLOAD_SIZE_MAX,
1431 						PDU_AC_PAYLOAD_SIZE_MAX);
1432 				ticks_slot_aux =
1433 					HAL_TICKER_US_TO_TICKS(us_slot) +
1434 					ticks_slot_overhead_aux;
1435 #else
1436 				ticks_slot_aux = aux->ull.ticks_slot +
1437 						 ticks_slot_overhead_aux;
1438 #endif
1439 
1440 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET) || \
1441 	(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET == 0)
1442 				/* Schedule periodic advertising PDU after
1443 				 * auxiliary PDUs.
1444 				 * Reduce the MAFS offset by the Event Overhead
1445 				 * so that actual radio air packet start as
1446 				 * close as possible after the MAFS gap.
1447 				 * Add 2 ticks offset as compensation towards
1448 				 * the +/- 1 tick ticker scheduling jitter due
1449 				 * to accumulation of remainder to maintain
1450 				 * average ticker interval.
1451 				 */
1452 				uint32_t ticks_anchor_sync = ticks_anchor_aux +
1453 					ticks_slot_aux +
1454 					HAL_TICKER_US_TO_TICKS(
1455 						MAX(EVENT_MAFS_US,
1456 						    EVENT_OVERHEAD_START_US) -
1457 						EVENT_OVERHEAD_START_US +
1458 						(EVENT_TICKER_RES_MARGIN_US << 1));
1459 
1460 #else /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
1461 				uint32_t ticks_anchor_sync = ticks_anchor_aux +
1462 					HAL_TICKER_US_TO_TICKS(
1463 						CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET);
1464 
1465 #endif /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
1466 
1467 				ticks_slot_overhead2 = ull_adv_sync_evt_init(adv, sync, NULL);
1468 				ret = ull_adv_sync_start(adv, sync,
1469 							 ticks_anchor_sync,
1470 							 ticks_slot_overhead2);
1471 				if (ret) {
1472 					goto failure_cleanup;
1473 				}
1474 
1475 				sync_is_started = 1U;
1476 
1477 				lll_adv_aux_data_enqueue(adv->lll.aux, sec_idx);
1478 				lll_adv_data_enqueue(lll, pri_idx);
1479 			} else {
1480 				/* TODO: Find the anchor before the group of
1481 				 *       active Periodic Advertising events, so
1482 				 *       that auxiliary sets are grouped such
1483 				 *       that auxiliary sets and Periodic
1484 				 *       Advertising sets are non-overlapping
1485 				 *       for the same event interval.
1486 				 */
1487 			}
1488 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1489 
1490 			/* Keep aux interval equal or higher than primary PDU
1491 			 * interval.
1492 			 * Use periodic interval units to represent the
1493 			 * periodic behavior of scheduling of AUX_ADV_IND PDUs
1494 			 * so that it is grouped with similar interval units
1495 			 * used for ACL Connections, Periodic Advertising and
1496 			 * BIG radio events.
1497 			 */
1498 			aux->interval =
1499 				DIV_ROUND_UP(((uint64_t)adv->interval *
1500 						  ADV_INT_UNIT_US) +
1501 						 HAL_TICKER_TICKS_TO_US(
1502 							ULL_ADV_RANDOM_DELAY),
1503 						 PERIODIC_INT_UNIT_US);
1504 
1505 			ret = ull_adv_aux_start(aux, ticks_anchor_aux,
1506 						ticks_slot_overhead_aux);
1507 			if (ret) {
1508 				goto failure_cleanup;
1509 			}
1510 
1511 			aux_is_started = 1U;
1512 		}
1513 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1514 
1515 		ret_cb = TICKER_STATUS_BUSY;
1516 
1517 #if defined(CONFIG_BT_TICKER_EXT)
1518 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1519 		ll_adv_ticker_ext[handle].ticks_slot_window =
1520 			ULL_ADV_RANDOM_DELAY + ticks_slot;
1521 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1522 
1523 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1524 		if (lll->aux) {
1525 			uint8_t aux_handle = ull_adv_aux_handle_get(aux);
1526 
1527 			ll_adv_ticker_ext[handle].expire_info_id = TICKER_ID_ADV_AUX_BASE +
1528 								  aux_handle;
1529 			ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1530 		} else {
1531 			ll_adv_ticker_ext[handle].expire_info_id = TICKER_NULL;
1532 			ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1533 		}
1534 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1535 
1536 		ret = ticker_start_ext(
1537 #else /* !CONFIG_BT_TICKER_EXT */
1538 		ret = ticker_start(
1539 #endif /* !CONFIG_BT_TICKER_EXT */
1540 				   TICKER_INSTANCE_ID_CTLR,
1541 				   TICKER_USER_ID_THREAD,
1542 				   (TICKER_ID_ADV_BASE + handle),
1543 				   ticks_anchor, 0,
1544 				   HAL_TICKER_US_TO_TICKS((uint64_t)interval *
1545 							  ADV_INT_UNIT_US),
1546 				   TICKER_NULL_REMAINDER,
1547 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1548 	!defined(CONFIG_BT_CTLR_LOW_LAT)
1549 				   /* Force expiry to ensure timing update */
1550 				   TICKER_LAZY_MUST_EXPIRE,
1551 #else
1552 				   TICKER_NULL_LAZY,
1553 #endif /* !CONFIG_BT_TICKER_LOW_LAT && !CONFIG_BT_CTLR_LOW_LAT */
1554 				   ticks_slot,
1555 				   ticker_cb, adv,
1556 				   ull_ticker_status_give, (void *)&ret_cb
1557 #if defined(CONFIG_BT_TICKER_EXT)
1558 				   ,
1559 				   &ll_adv_ticker_ext[handle]
1560 #endif /* CONFIG_BT_TICKER_EXT */
1561 				   );
1562 	}
1563 
1564 	ret = ull_ticker_status_take(ret, &ret_cb);
1565 	if (ret != TICKER_STATUS_SUCCESS) {
1566 		goto failure_cleanup;
1567 	}
1568 
1569 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1570 	if (aux_is_started) {
1571 		aux->is_started = aux_is_started;
1572 
1573 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1574 		if (sync_is_started) {
1575 			sync->is_started = sync_is_started;
1576 		}
1577 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1578 	}
1579 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1580 
1581 	adv->is_enabled = 1;
1582 
1583 #if defined(CONFIG_BT_CTLR_PRIVACY)
1584 #if defined(CONFIG_BT_HCI_MESH_EXT)
1585 	if (_radio.advertiser.is_mesh) {
1586 		_radio.scanner.is_enabled = 1;
1587 
1588 		ull_filter_adv_scan_state_cb(BIT(0) | BIT(1));
1589 	}
1590 #else /* !CONFIG_BT_HCI_MESH_EXT */
1591 	if (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled_get(0)) {
1592 		ull_filter_adv_scan_state_cb(BIT(0));
1593 	}
1594 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1595 #endif /* CONFIG_BT_CTLR_PRIVACY */
1596 
1597 	return 0;
1598 
1599 failure_cleanup:
1600 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1601 	if (aux_is_started) {
1602 		/* TODO: Stop extended advertising and release resources */
1603 	}
1604 
1605 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1606 	if (sync_is_started) {
1607 		/* TODO: Stop periodic advertising and release resources */
1608 	}
1609 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1610 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1611 
1612 #if defined(CONFIG_BT_PERIPHERAL)
1613 	if (adv->lll.conn) {
1614 		conn_release(adv);
1615 	}
1616 #endif /* CONFIG_BT_PERIPHERAL */
1617 
1618 	return BT_HCI_ERR_CMD_DISALLOWED;
1619 }
1620 
1621 int ull_adv_init(void)
1622 {
1623 	int err;
1624 
1625 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1626 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1627 	if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1628 		err = ull_adv_aux_init();
1629 		if (err) {
1630 			return err;
1631 		}
1632 	}
1633 
1634 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1635 	err = ull_adv_sync_init();
1636 	if (err) {
1637 		return err;
1638 	}
1639 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1640 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1641 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1642 
1643 	err = init_reset();
1644 	if (err) {
1645 		return err;
1646 	}
1647 
1648 	return 0;
1649 }
1650 
1651 uint8_t ll_adv_disable_all(void)
1652 {
1653 	uint8_t handle;
1654 
1655 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1656 		(void)disable(handle);
1657 	}
1658 
1659 	return 0U;
1660 }
1661 
1662 int ull_adv_reset(void)
1663 {
1664 	(void)ll_adv_disable_all();
1665 
1666 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1667 #if defined(CONFIG_BT_HCI_RAW)
1668 	ll_adv_cmds = LL_ADV_CMDS_ANY;
1669 #endif
1670 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1671 	{
1672 		int err;
1673 
1674 		err = ull_adv_sync_reset();
1675 		if (err) {
1676 			return err;
1677 		}
1678 	}
1679 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1680 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1681 
1682 	return 0;
1683 }
1684 
1685 int ull_adv_reset_finalize(void)
1686 {
1687 	uint8_t handle;
1688 	int err;
1689 
1690 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1691 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1692 	if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1693 		err = ull_adv_aux_reset_finalize();
1694 		if (err) {
1695 			return err;
1696 		}
1697 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1698 		err = ull_adv_sync_reset_finalize();
1699 		if (err) {
1700 			return err;
1701 		}
1702 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1703 	}
1704 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1705 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1706 
1707 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1708 		struct ll_adv_set *adv = &ll_adv[handle];
1709 		struct lll_adv *lll = &adv->lll;
1710 
1711 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1712 		adv->is_created = 0;
1713 		lll->aux = NULL;
1714 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1715 		lll->sync = NULL;
1716 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1717 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1718 		lll_adv_data_reset(&lll->adv_data);
1719 		lll_adv_data_reset(&lll->scan_rsp);
1720 	}
1721 
1722 	err = init_reset();
1723 	if (err) {
1724 		return err;
1725 	}
1726 
1727 	return 0;
1728 }
1729 
1730 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle)
1731 {
1732 	if (handle >= BT_CTLR_ADV_SET) {
1733 		return NULL;
1734 	}
1735 
1736 	return &ll_adv[handle];
1737 }
1738 
1739 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv)
1740 {
1741 	return ((uint8_t *)adv - (uint8_t *)ll_adv) / sizeof(*adv);
1742 }
1743 
1744 uint16_t ull_adv_lll_handle_get(struct lll_adv *lll)
1745 {
1746 	return ull_adv_handle_get(HDR_LLL2ULL(lll));
1747 }
1748 
1749 inline struct ll_adv_set *ull_adv_is_enabled_get(uint8_t handle)
1750 {
1751 	struct ll_adv_set *adv;
1752 
1753 	adv = ull_adv_set_get(handle);
1754 	if (!adv || !adv->is_enabled) {
1755 		return NULL;
1756 	}
1757 
1758 	return adv;
1759 }
1760 
1761 int ull_adv_is_enabled(uint8_t handle)
1762 {
1763 	struct ll_adv_set *adv;
1764 
1765 	adv = ull_adv_is_enabled_get(handle);
1766 
1767 	return adv != NULL;
1768 }
1769 
1770 uint32_t ull_adv_filter_pol_get(uint8_t handle)
1771 {
1772 	struct ll_adv_set *adv;
1773 
1774 	adv = ull_adv_is_enabled_get(handle);
1775 	if (!adv) {
1776 		return 0;
1777 	}
1778 
1779 	return adv->lll.filter_policy;
1780 }
1781 
1782 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1783 struct ll_adv_set *ull_adv_is_created_get(uint8_t handle)
1784 {
1785 	struct ll_adv_set *adv;
1786 
1787 	adv = ull_adv_set_get(handle);
1788 	if (!adv || !adv->is_created) {
1789 		return NULL;
1790 	}
1791 
1792 	return adv;
1793 }
1794 
1795 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1796 void ull_adv_aux_created(struct ll_adv_set *adv)
1797 {
1798 	if (adv->lll.aux && adv->is_enabled) {
1799 		uint8_t aux_handle = ull_adv_aux_handle_get(HDR_LLL2ULL(adv->lll.aux));
1800 		uint8_t handle = ull_adv_handle_get(adv);
1801 
1802 		ticker_update_ext(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1803 			   (TICKER_ID_ADV_BASE + handle), 0, 0, 0, 0, 0, 0,
1804 			   ticker_update_op_cb, adv, 0,
1805 			   TICKER_ID_ADV_AUX_BASE + aux_handle);
1806 	}
1807 }
1808 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1809 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1810 
1811 uint8_t ull_adv_data_set(struct ll_adv_set *adv, uint8_t len,
1812 			 uint8_t const *const data)
1813 {
1814 	struct pdu_adv *prev;
1815 	struct pdu_adv *pdu;
1816 	uint8_t idx;
1817 
1818 	/* Check invalid AD Data length */
1819 	if (len > PDU_AC_LEG_DATA_SIZE_MAX) {
1820 		return BT_HCI_ERR_INVALID_PARAM;
1821 	}
1822 
1823 	prev = lll_adv_data_peek(&adv->lll);
1824 
1825 	/* Dont update data if directed, back it up */
1826 	if ((prev->type == PDU_ADV_TYPE_DIRECT_IND) ||
1827 	    (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
1828 	     (prev->type == PDU_ADV_TYPE_EXT_IND))) {
1829 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
1830 		/* Update the backup AD Data */
1831 		adv->ad_data_backup.len = len;
1832 		memcpy(adv->ad_data_backup.data, data, adv->ad_data_backup.len);
1833 		return 0;
1834 
1835 #else /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1836 		return BT_HCI_ERR_CMD_DISALLOWED;
1837 #endif /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1838 	}
1839 
1840 	/* update adv pdu fields. */
1841 	pdu = lll_adv_data_alloc(&adv->lll, &idx);
1842 
1843 	/* check for race condition with LLL ISR */
1844 	if (IS_ENABLED(CONFIG_ASSERT)) {
1845 		uint8_t idx_test;
1846 
1847 		lll_adv_data_alloc(&adv->lll, &idx_test);
1848 		__ASSERT((idx == idx_test), "Probable AD Data Corruption.\n");
1849 	}
1850 
1851 	pdu->type = prev->type;
1852 	pdu->rfu = 0U;
1853 
1854 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
1855 		pdu->chan_sel = prev->chan_sel;
1856 	} else {
1857 		pdu->chan_sel = 0U;
1858 	}
1859 
1860 	pdu->tx_addr = prev->tx_addr;
1861 	pdu->rx_addr = prev->rx_addr;
1862 	memcpy(&pdu->adv_ind.addr[0], &prev->adv_ind.addr[0], BDADDR_SIZE);
1863 	memcpy(&pdu->adv_ind.data[0], data, len);
1864 	pdu->len = BDADDR_SIZE + len;
1865 
1866 	/* Update time reservation */
1867 	if (adv->is_enabled) {
1868 		struct pdu_adv *pdu_scan;
1869 		struct lll_adv *lll;
1870 		uint8_t err;
1871 
1872 		lll = &adv->lll;
1873 		pdu_scan = lll_adv_scan_rsp_peek(lll);
1874 
1875 		err = ull_adv_time_update(adv, pdu, pdu_scan);
1876 		if (err) {
1877 			return err;
1878 		}
1879 	}
1880 
1881 	lll_adv_data_enqueue(&adv->lll, idx);
1882 
1883 	return 0;
1884 }
1885 
1886 uint8_t ull_scan_rsp_set(struct ll_adv_set *adv, uint8_t len,
1887 			 uint8_t const *const data)
1888 {
1889 	struct pdu_adv *prev;
1890 	struct pdu_adv *pdu;
1891 	uint8_t idx;
1892 
1893 	if (len > PDU_AC_LEG_DATA_SIZE_MAX) {
1894 		return BT_HCI_ERR_INVALID_PARAM;
1895 	}
1896 
1897 	/* update scan pdu fields. */
1898 	prev = lll_adv_scan_rsp_peek(&adv->lll);
1899 	if (!prev) {
1900 		uint8_t err;
1901 
1902 		err = lll_adv_data_init(&adv->lll.scan_rsp);
1903 		if (err) {
1904 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1905 		}
1906 
1907 		prev = lll_adv_scan_rsp_peek(&adv->lll);
1908 		init_pdu(prev, PDU_ADV_TYPE_SCAN_RSP);
1909 	}
1910 
1911 	pdu = lll_adv_scan_rsp_alloc(&adv->lll, &idx);
1912 	pdu->type = PDU_ADV_TYPE_SCAN_RSP;
1913 	pdu->rfu = 0;
1914 	pdu->chan_sel = 0;
1915 	pdu->tx_addr = prev->tx_addr;
1916 	pdu->rx_addr = 0;
1917 	pdu->len = BDADDR_SIZE + len;
1918 	memcpy(&pdu->scan_rsp.addr[0], &prev->scan_rsp.addr[0], BDADDR_SIZE);
1919 	memcpy(&pdu->scan_rsp.data[0], data, len);
1920 
1921 	/* Update time reservation */
1922 	if (adv->is_enabled) {
1923 		struct pdu_adv *pdu_adv_scan;
1924 		struct lll_adv *lll;
1925 		uint8_t err;
1926 
1927 		lll = &adv->lll;
1928 		pdu_adv_scan = lll_adv_data_peek(lll);
1929 
1930 		if ((pdu_adv_scan->type == PDU_ADV_TYPE_ADV_IND) ||
1931 		    (pdu_adv_scan->type == PDU_ADV_TYPE_SCAN_IND)) {
1932 			err = ull_adv_time_update(adv, pdu_adv_scan, pdu);
1933 			if (err) {
1934 				return err;
1935 			}
1936 		}
1937 	}
1938 
1939 	lll_adv_scan_rsp_enqueue(&adv->lll, idx);
1940 
1941 	return 0;
1942 }
1943 
1944 static uint32_t ticker_update_rand(struct ll_adv_set *adv, uint32_t ticks_delay_window,
1945 				   uint32_t ticks_delay_window_offset,
1946 				   uint32_t ticks_adjust_minus,
1947 				   ticker_op_func fp_op_func)
1948 {
1949 	uint32_t random_delay;
1950 	uint32_t ret;
1951 
1952 	/* Get pseudo-random number in the range [0..ticks_delay_window].
1953 	 * Please note that using modulo of 2^32 sample space has an uneven
1954 	 * distribution, slightly favoring smaller values.
1955 	 */
1956 	lll_rand_isr_get(&random_delay, sizeof(random_delay));
1957 	random_delay %= ticks_delay_window;
1958 	random_delay += (ticks_delay_window_offset + 1);
1959 
1960 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1961 			    TICKER_USER_ID_ULL_HIGH,
1962 			    TICKER_ID_ADV_BASE + ull_adv_handle_get(adv),
1963 			    random_delay,
1964 			    ticks_adjust_minus, 0, 0, 0, 0,
1965 			    fp_op_func, adv);
1966 
1967 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1968 		  (ret == TICKER_STATUS_BUSY) ||
1969 		  (fp_op_func == NULL));
1970 
1971 	return random_delay;
1972 }
1973 
1974 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
1975 	defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1976 void ull_adv_done(struct node_rx_event_done *done)
1977 {
1978 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1979 	struct lll_adv_aux *lll_aux;
1980 	struct node_rx_pdu *rx;
1981 	uint8_t handle;
1982 	uint32_t ret;
1983 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1984 	struct ll_adv_set *adv;
1985 	struct lll_adv *lll;
1986 
1987 	/* Get reference to ULL context */
1988 	adv = CONTAINER_OF(done->param, struct ll_adv_set, ull);
1989 	lll = &adv->lll;
1990 
1991 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1992 	if (done->extra.type == EVENT_DONE_EXTRA_TYPE_ADV && done->extra.result != DONE_COMPLETED) {
1993 		/* Event aborted or too late - try to re-schedule */
1994 		uint32_t ticks_elapsed;
1995 		uint32_t ticks_now;
1996 		uint32_t delay_remain;
1997 
1998 		const uint32_t prepare_overhead =
1999 			HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
2000 		const uint32_t ticks_adv_airtime = adv->ticks_at_expire +
2001 			prepare_overhead;
2002 
2003 		ticks_elapsed = 0U;
2004 
2005 		ticks_now = cntr_cnt_get();
2006 		if ((int32_t)(ticks_now - ticks_adv_airtime) > 0) {
2007 			ticks_elapsed = ticks_now - ticks_adv_airtime;
2008 		}
2009 
2010 		if (adv->delay_at_expire + ticks_elapsed <= ULL_ADV_RANDOM_DELAY) {
2011 			/* The perturbation window is still open */
2012 			delay_remain = ULL_ADV_RANDOM_DELAY - (adv->delay_at_expire +
2013 							       ticks_elapsed);
2014 		} else {
2015 			delay_remain = 0U;
2016 		}
2017 
2018 		/* Check if we have enough time to re-schedule */
2019 		if (delay_remain > prepare_overhead) {
2020 			uint32_t interval_us = adv->interval * ADV_INT_UNIT_US;
2021 			uint32_t ticks_adjust_minus;
2022 			uint32_t random_delay;
2023 
2024 			/* Get negative ticker adjustment needed to pull back ADV one
2025 			 * interval plus the randomized delay. This means that the ticker
2026 			 * will be updated to expire in time frame of now + start
2027 			 * overhead, until 10 ms window is exhausted.
2028 			 */
2029 			ticks_adjust_minus = HAL_TICKER_US_TO_TICKS(interval_us) + adv->delay;
2030 
2031 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2032 			if (adv->remain_duration_us > interval_us) {
2033 				/* Reset remain_duration_us to value before last ticker expire
2034 				 * to correct for the re-scheduling
2035 				 */
2036 				adv->remain_duration_us += interval_us +
2037 							   HAL_TICKER_TICKS_TO_US(
2038 								adv->delay_at_expire);
2039 			}
2040 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2041 
2042 			/* Apply random delay in range [prepare_overhead..delay_remain].
2043 			 * NOTE: This ticker_update may fail if update races with
2044 			 * ticker_stop, e.g. from ull_periph_setup. This is not a problem
2045 			 * and we can safely ignore the operation result.
2046 			 */
2047 			random_delay = ticker_update_rand(adv, delay_remain - prepare_overhead,
2048 							  prepare_overhead, ticks_adjust_minus,
2049 							  NULL);
2050 
2051 			/* Delay from ticker_update_rand is in addition to the last random delay */
2052 			adv->delay = random_delay;
2053 			adv->delay += adv->delay_at_expire;
2054 
2055 			/* Score of the event was increased due to the result, but since
2056 			 * we're getting a another chance we'll set it back.
2057 			 */
2058 			adv->lll.hdr.score -= 1;
2059 		}
2060 	}
2061 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2062 	if (done->extra.type == EVENT_DONE_EXTRA_TYPE_ADV && adv->lll.aux) {
2063 		/* Primary event of extended advertising done - wait for aux done */
2064 		return;
2065 	}
2066 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2067 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2068 
2069 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2070 	if (adv->max_events && (adv->event_counter >= adv->max_events)) {
2071 		adv->max_events = 0U;
2072 
2073 		rx = (void *)lll->node_rx_adv_term;
2074 		rx->rx_ftr.param_adv_term.status = BT_HCI_ERR_LIMIT_REACHED;
2075 	} else if (adv->remain_duration_us &&
2076 		   (adv->remain_duration_us <=
2077 		    ((uint64_t)adv->interval * ADV_INT_UNIT_US))) {
2078 		adv->remain_duration_us = 0U;
2079 
2080 		rx = (void *)lll->node_rx_adv_term;
2081 		rx->rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2082 	} else {
2083 		return;
2084 	}
2085 
2086 	handle = ull_adv_handle_get(adv);
2087 	LL_ASSERT(handle < BT_CTLR_ADV_SET);
2088 
2089 	rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2090 	rx->hdr.handle = handle;
2091 	rx->rx_ftr.param_adv_term.conn_handle = 0xffff;
2092 	rx->rx_ftr.param_adv_term.num_events = adv->event_counter;
2093 
2094 	lll_aux = lll->aux;
2095 	if (lll_aux) {
2096 		struct ll_adv_aux_set *aux;
2097 		uint8_t aux_handle;
2098 
2099 		aux = HDR_LLL2ULL(lll_aux);
2100 		aux_handle = ull_adv_aux_handle_get(aux);
2101 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2102 				  TICKER_USER_ID_ULL_HIGH,
2103 				  (TICKER_ID_ADV_AUX_BASE + aux_handle),
2104 				  ticker_stop_aux_op_cb, adv);
2105 	} else {
2106 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2107 				  TICKER_USER_ID_ULL_HIGH,
2108 				  (TICKER_ID_ADV_BASE + handle),
2109 				  ticker_stop_ext_op_cb, adv);
2110 	}
2111 
2112 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2113 		  (ret == TICKER_STATUS_BUSY));
2114 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2115 }
2116 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
2117 
2118 const uint8_t *ull_adv_pdu_update_addrs(struct ll_adv_set *adv,
2119 					struct pdu_adv *pdu)
2120 {
2121 	const uint8_t *adv_addr;
2122 
2123 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2124 	struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
2125 	struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
2126 	struct pdu_adv_ext_hdr hdr_flags;
2127 
2128 	if (com_hdr->ext_hdr_len) {
2129 		hdr_flags = *hdr;
2130 	} else {
2131 		*(uint8_t *)&hdr_flags = 0U;
2132 	}
2133 #endif
2134 
2135 	adv_addr = adva_update(adv, pdu);
2136 
2137 	/* Update TargetA only if directed advertising PDU is supplied. Note
2138 	 * that AUX_SCAN_REQ does not have TargetA flag set so it will be
2139 	 * ignored here as expected.
2140 	 */
2141 	if ((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
2142 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2143 	    ((pdu->type == PDU_ADV_TYPE_EXT_IND) && hdr_flags.tgt_addr) ||
2144 #endif
2145 	    0) {
2146 		tgta_update(adv, pdu);
2147 	}
2148 
2149 	return adv_addr;
2150 }
2151 
2152 uint8_t ull_adv_time_update(struct ll_adv_set *adv, struct pdu_adv *pdu,
2153 			    struct pdu_adv *pdu_scan)
2154 {
2155 	struct lll_adv *lll;
2156 	uint32_t time_ticks;
2157 	uint8_t phy_flags;
2158 	uint16_t time_us;
2159 	uint8_t chan_map;
2160 	uint8_t chan_cnt;
2161 	uint8_t phy;
2162 
2163 	lll = &adv->lll;
2164 
2165 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2166 	phy = lll->phy_p;
2167 	phy_flags = lll->phy_flags;
2168 #else
2169 	phy = PHY_1M;
2170 	phy_flags = 0U;
2171 #endif
2172 
2173 	chan_map = lll->chan_map;
2174 	chan_cnt = util_ones_count_get(&chan_map, sizeof(chan_map));
2175 	time_us = adv_time_get(pdu, pdu_scan, chan_cnt, phy, phy_flags);
2176 	time_ticks = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
2177 
2178 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2179 	uint32_t volatile ret_cb;
2180 	uint32_t ticks_minus;
2181 	uint32_t ticks_plus;
2182 	uint32_t ret;
2183 
2184 	if (adv->ull.ticks_slot > time_ticks) {
2185 		ticks_minus = adv->ull.ticks_slot - time_ticks;
2186 		ticks_plus = 0U;
2187 	} else if (adv->ull.ticks_slot < time_ticks) {
2188 		ticks_minus = 0U;
2189 		ticks_plus = time_ticks - adv->ull.ticks_slot;
2190 	} else {
2191 		return BT_HCI_ERR_SUCCESS;
2192 	}
2193 
2194 	ret_cb = TICKER_STATUS_BUSY;
2195 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
2196 			    TICKER_USER_ID_THREAD,
2197 			    (TICKER_ID_ADV_BASE +
2198 			     ull_adv_handle_get(adv)),
2199 			    0, 0, ticks_plus, ticks_minus, 0, 0,
2200 			    ull_ticker_status_give, (void *)&ret_cb);
2201 	ret = ull_ticker_status_take(ret, &ret_cb);
2202 	if (ret != TICKER_STATUS_SUCCESS) {
2203 		return BT_HCI_ERR_CMD_DISALLOWED;
2204 	}
2205 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
2206 
2207 	adv->ull.ticks_slot = time_ticks;
2208 
2209 	return BT_HCI_ERR_SUCCESS;
2210 }
2211 
2212 static int init_reset(void)
2213 {
2214 	uint8_t handle;
2215 
2216 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL) && \
2217 	!defined(CONFIG_BT_CTLR_ADV_EXT)
2218 	ll_adv[0].lll.tx_pwr_lvl = RADIO_TXP_DEFAULT;
2219 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL && !CONFIG_BT_CTLR_ADV_EXT */
2220 
2221 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
2222 		lll_adv_data_init(&ll_adv[handle].lll.adv_data);
2223 
2224 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2225 		/* scan_rsp is not init'ed until we know if it is a legacy or extended scan rsp */
2226 		memset(&ll_adv[handle].lll.scan_rsp, 0, sizeof(ll_adv[handle].lll.scan_rsp));
2227 #else
2228 		lll_adv_data_init(&ll_adv[handle].lll.scan_rsp);
2229 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
2230 
2231 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
2232 		/* Pointer to DF configuration must be cleared on reset. In other case it will point
2233 		 * to a memory pool address that should be released. It may be used by the pool
2234 		 * itself. In such situation it may cause error.
2235 		 */
2236 		ll_adv[handle].df_cfg = NULL;
2237 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2238 	}
2239 
2240 	/* Make sure that set #0 is initialized with empty legacy PDUs. This is
2241 	 * especially important if legacy HCI interface is used for advertising
2242 	 * because it allows to enable advertising without any configuration,
2243 	 * thus we need to have PDUs already initialized.
2244 	 */
2245 	init_set(&ll_adv[0]);
2246 
2247 	return 0;
2248 }
2249 
2250 static inline struct ll_adv_set *is_disabled_get(uint8_t handle)
2251 {
2252 	struct ll_adv_set *adv;
2253 
2254 	adv = ull_adv_set_get(handle);
2255 	if (!adv || adv->is_enabled) {
2256 		return NULL;
2257 	}
2258 
2259 	return adv;
2260 }
2261 
2262 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
2263 			     uint8_t adv_chn_cnt, uint8_t phy,
2264 			     uint8_t phy_flags)
2265 {
2266 	uint16_t time_us = EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
2267 
2268 	/* NOTE: 16-bit value is sufficient to calculate the maximum radio
2269 	 *       event time reservation for PDUs on primary advertising
2270 	 *       channels (37, 38, and 39 channel indices of 1M and Coded PHY).
2271 	 */
2272 
2273 	/* Calculate the PDU Tx Time and hence the radio event length */
2274 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2275 	if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2276 		time_us += PDU_AC_US(pdu->len, phy, phy_flags) * adv_chn_cnt +
2277 			   EVENT_RX_TX_TURNAROUND(phy) * (adv_chn_cnt - 1);
2278 	} else
2279 #endif
2280 	{
2281 		uint16_t adv_size =
2282 			PDU_OVERHEAD_SIZE(PHY_1M) + ADVA_SIZE;
2283 		const uint16_t conn_ind_us =
2284 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2285 				 INITA_SIZE + ADVA_SIZE + LLDATA_SIZE), PHY_1M);
2286 		const uint8_t scan_req_us  =
2287 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2288 				 SCANA_SIZE + ADVA_SIZE), PHY_1M);
2289 		const uint16_t scan_rsp_us =
2290 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2291 				 ADVA_SIZE + pdu_scan->len), PHY_1M);
2292 		const uint8_t rx_to_us	= EVENT_RX_TO_US(PHY_1M);
2293 		const uint8_t rxtx_turn_us = EVENT_RX_TX_TURNAROUND(PHY_1M);
2294 
2295 		if (pdu->type == PDU_ADV_TYPE_NONCONN_IND) {
2296 			adv_size += pdu->len;
2297 			time_us += BYTES2US(adv_size, PHY_1M) * adv_chn_cnt +
2298 				   rxtx_turn_us * (adv_chn_cnt - 1);
2299 		} else {
2300 			if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
2301 				adv_size += TARGETA_SIZE;
2302 				time_us += conn_ind_us;
2303 			} else if (pdu->type == PDU_ADV_TYPE_ADV_IND) {
2304 				adv_size += pdu->len;
2305 				time_us += MAX(scan_req_us + EVENT_IFS_MAX_US +
2306 						scan_rsp_us, conn_ind_us);
2307 			} else if (pdu->type == PDU_ADV_TYPE_SCAN_IND) {
2308 				adv_size += pdu->len;
2309 				time_us += scan_req_us + EVENT_IFS_MAX_US +
2310 					   scan_rsp_us;
2311 			}
2312 
2313 			time_us += (BYTES2US(adv_size, PHY_1M) +
2314 				    EVENT_IFS_MAX_US + rx_to_us +
2315 				    rxtx_turn_us) * (adv_chn_cnt - 1) +
2316 				   BYTES2US(adv_size, PHY_1M) + EVENT_IFS_MAX_US;
2317 		}
2318 	}
2319 
2320 	return time_us;
2321 }
2322 
2323 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2324 		      uint32_t remainder, uint16_t lazy, uint8_t force,
2325 		      void *param)
2326 {
2327 	static memq_link_t link;
2328 	static struct mayfly mfy = {0, 0, &link, NULL, lll_adv_prepare};
2329 	static struct lll_prepare_param p;
2330 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2331 	struct ticker_ext_context *context = param;
2332 	struct ll_adv_set *adv = context->context;
2333 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2334 	struct ll_adv_set *adv = param;
2335 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2336 	uint32_t random_delay;
2337 	struct lll_adv *lll;
2338 	uint32_t ret;
2339 	uint8_t ref;
2340 
2341 	DEBUG_RADIO_PREPARE_A(1);
2342 
2343 	lll = &adv->lll;
2344 
2345 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2346 	if (lll->aux) {
2347 		/* Check if we are about to exceed the duration or max events limit
2348 		 * Usually this will be handled in ull_adv_done(), but in cases where
2349 		 * the extended advertising events overlap (ie. several primary advertisings
2350 		 * point to the same AUX_ADV_IND packet) the ticker will not be stopped
2351 		 * in time. To handle this, we simply ignore the extra ticker callback and
2352 		 * wait for the usual ull_adv_done() handling to run
2353 		 */
2354 		if ((adv->max_events && adv->event_counter >= adv->max_events) ||
2355 		    (adv->remain_duration_us &&
2356 		     adv->remain_duration_us <= (uint64_t)adv->interval * ADV_INT_UNIT_US)) {
2357 			return;
2358 		}
2359 	}
2360 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2361 
2362 	if (IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) ||
2363 	    (lazy != TICKER_LAZY_MUST_EXPIRE)) {
2364 		/* Increment prepare reference count */
2365 		ref = ull_ref_inc(&adv->ull);
2366 		LL_ASSERT(ref);
2367 
2368 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2369 	defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2370 		if (adv->lll.aux) {
2371 			uint32_t ticks_to_expire;
2372 			uint32_t other_remainder;
2373 
2374 			LL_ASSERT(context->other_expire_info);
2375 
2376 			/* Adjust ticks to expire based on remainder value */
2377 			ticks_to_expire = context->other_expire_info->ticks_to_expire;
2378 			other_remainder = context->other_expire_info->remainder;
2379 			hal_ticker_remove_jitter(&ticks_to_expire, &other_remainder);
2380 
2381 			/* Store the ticks and remainder offset for aux ptr population in LLL */
2382 			adv->lll.aux->ticks_pri_pdu_offset = ticks_to_expire;
2383 			adv->lll.aux->us_pri_pdu_offset = other_remainder;
2384 		}
2385 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) &&
2386 	* CONFIG_BT_TICKER_EXT_EXPIRE_INFO
2387 	*/
2388 
2389 		/* Append timing parameters */
2390 		p.ticks_at_expire = ticks_at_expire;
2391 		p.remainder = remainder;
2392 		p.lazy = lazy;
2393 		p.force = force;
2394 		p.param = lll;
2395 		mfy.param = &p;
2396 
2397 		/* Kick LLL prepare */
2398 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2399 				     TICKER_USER_ID_LLL, 0, &mfy);
2400 		LL_ASSERT(!ret);
2401 
2402 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING) || \
2403 	(defined(CONFIG_BT_CTLR_ADV_EXT) && \
2404 	 (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2405 	 !defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO))
2406 		/* Remember the ticks_at_expire, will be used by JIT scheduling
2407 		 * and for checking latency calculating the aux offset for
2408 		 * extended advertising.
2409 		 */
2410 		adv->ticks_at_expire = ticks_at_expire;
2411 
2412 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2413 		adv->delay_at_expire = adv->delay;
2414 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2415 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING ||
2416 	* (CONFIG_BT_CTLR_ADV_EXT &&
2417 	*  (CONFIG_BT_CTLR_ADV_AUX_SET > 0) &&
2418 	*  !CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2419 	*/
2420 
2421 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2422 	!defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2423 		if (adv->lll.aux) {
2424 			ull_adv_aux_offset_get(adv);
2425 		}
2426 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2427 	* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO
2428 	*/
2429 	}
2430 
2431 	/* Apply adv random delay */
2432 #if defined(CONFIG_BT_PERIPHERAL)
2433 	if (!lll->is_hdcd)
2434 #endif /* CONFIG_BT_PERIPHERAL */
2435 	{
2436 		if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING) ||
2437 		    (ticker_update_req == ticker_update_ack)) {
2438 			/* Ticker update requested */
2439 			ticker_update_req++;
2440 
2441 			/* Apply random delay in range [0..ULL_ADV_RANDOM_DELAY] */
2442 			random_delay = ticker_update_rand(adv, ULL_ADV_RANDOM_DELAY, 0U, 0U,
2443 							  ticker_update_op_cb);
2444 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2445 			adv->delay = random_delay;
2446 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2447 		} else {
2448 			random_delay = 0U;
2449 		}
2450 
2451 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2452 		uint16_t event_counter_inc;
2453 
2454 		if (lazy == TICKER_LAZY_MUST_EXPIRE) {
2455 			lazy = 0U;
2456 			event_counter_inc = 0U;
2457 		} else {
2458 			event_counter_inc = (lazy + 1U);
2459 		}
2460 
2461 		if (adv->remain_duration_us && adv->event_counter > 0U) {
2462 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2463 			/* ticks_drift is always 0 with JIT scheduling, populate manually */
2464 			ticks_drift = adv->delay_at_expire;
2465 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2466 			uint32_t interval_us = (uint64_t)adv->interval * ADV_INT_UNIT_US;
2467 			uint32_t elapsed_us = interval_us * (lazy + 1U) +
2468 						 HAL_TICKER_TICKS_TO_US(ticks_drift);
2469 
2470 			/* End advertising if the added random delay pushes us beyond the limit */
2471 			if (adv->remain_duration_us > elapsed_us + interval_us +
2472 						      HAL_TICKER_TICKS_TO_US(random_delay)) {
2473 				adv->remain_duration_us -= elapsed_us;
2474 			} else {
2475 				adv->remain_duration_us = interval_us;
2476 			}
2477 		}
2478 
2479 		adv->event_counter += event_counter_inc;
2480 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2481 	}
2482 
2483 	DEBUG_RADIO_PREPARE_A(1);
2484 }
2485 
2486 static void ticker_update_op_cb(uint32_t status, void *param)
2487 {
2488 	/* Reset update requested */
2489 	ticker_update_ack = ticker_update_req;
2490 
2491 #if defined(CONFIG_BT_PERIPHERAL) && (defined(CONFIG_BT_ASSERT) || defined(CONFIG_ASSERT))
2492 	struct ll_adv_set *adv = param;
2493 	struct pdu_adv *pdu = lll_adv_data_peek(&adv->lll);
2494 	bool connectable = (pdu->type == PDU_ADV_TYPE_ADV_IND) ||
2495 			   (pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
2496 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2497 			   ((pdu->type == PDU_ADV_TYPE_EXT_IND) &&
2498 			    (pdu->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_CONN)) ||
2499 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2500 			   0;
2501 #endif /* CONFIG_BT_PERIPHERAL && (CONFIG_BT_ASSERT || CONFIG_ASSERT) */
2502 
2503 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
2504 		  param == ull_disable_mark_get() ||
2505 #if defined(CONFIG_BT_PERIPHERAL)
2506 		   /* if using connectable adv and lll.conn is 0 -> a connection is underway */
2507 		  (connectable && !adv->lll.conn) ||
2508 #endif /* CONFIG_BT_PERIPHERAL */
2509 		  0);
2510 }
2511 
2512 #if defined(CONFIG_BT_PERIPHERAL)
2513 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2514 			   uint32_t remainder, uint16_t lazy, uint8_t force,
2515 			   void *param)
2516 {
2517 	struct ll_adv_set *adv = param;
2518 	uint8_t handle;
2519 	uint32_t ret;
2520 
2521 	handle = ull_adv_handle_get(adv);
2522 	LL_ASSERT(handle < BT_CTLR_ADV_SET);
2523 
2524 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2525 			  TICKER_ID_ADV_BASE + handle,
2526 			  ticker_stop_op_cb, adv);
2527 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2528 		  (ret == TICKER_STATUS_BUSY));
2529 }
2530 
2531 static void ticker_stop_op_cb(uint32_t status, void *param)
2532 {
2533 	static memq_link_t link;
2534 	static struct mayfly mfy = {0, 0, &link, NULL, adv_disable};
2535 	uint32_t ret;
2536 
2537 	/* Ignore if race between thread and ULL */
2538 	if (status != TICKER_STATUS_SUCCESS) {
2539 		/* TODO: detect race */
2540 
2541 		return;
2542 	}
2543 
2544 #if defined(CONFIG_BT_HCI_MESH_EXT)
2545 	/* FIXME: why is this here for Mesh commands? */
2546 	if (param) {
2547 		return;
2548 	}
2549 #endif /* CONFIG_BT_HCI_MESH_EXT */
2550 
2551 	/* Check if any pending LLL events that need to be aborted */
2552 	mfy.param = param;
2553 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2554 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2555 	LL_ASSERT(!ret);
2556 }
2557 
2558 static void adv_disable(void *param)
2559 {
2560 	struct ll_adv_set *adv;
2561 	struct ull_hdr *hdr;
2562 
2563 	/* Check ref count to determine if any pending LLL events in pipeline */
2564 	adv = param;
2565 	hdr = &adv->ull;
2566 	if (ull_ref_get(hdr)) {
2567 		static memq_link_t link;
2568 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2569 		uint32_t ret;
2570 
2571 		mfy.param = &adv->lll;
2572 
2573 		/* Setup disabled callback to be called when ref count
2574 		 * returns to zero.
2575 		 */
2576 		LL_ASSERT(!hdr->disabled_cb);
2577 		hdr->disabled_param = mfy.param;
2578 		hdr->disabled_cb = disabled_cb;
2579 
2580 		/* Trigger LLL disable */
2581 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2582 				     TICKER_USER_ID_LLL, 0, &mfy);
2583 		LL_ASSERT(!ret);
2584 	} else {
2585 		/* No pending LLL events */
2586 		disabled_cb(&adv->lll);
2587 	}
2588 }
2589 
2590 static void disabled_cb(void *param)
2591 {
2592 	struct ll_adv_set *adv;
2593 	struct node_rx_pdu *rx;
2594 	struct node_rx_cc *cc;
2595 	memq_link_t *link;
2596 
2597 	adv = ((struct lll_hdr *)param)->parent;
2598 
2599 	LL_ASSERT(adv->link_cc_free);
2600 	link = adv->link_cc_free;
2601 	adv->link_cc_free = NULL;
2602 
2603 	LL_ASSERT(adv->node_rx_cc_free);
2604 	rx = adv->node_rx_cc_free;
2605 	adv->node_rx_cc_free = NULL;
2606 
2607 	rx->hdr.type = NODE_RX_TYPE_CONNECTION;
2608 	rx->hdr.handle = 0xffff;
2609 
2610 	cc = (void *)rx->pdu;
2611 	memset(cc, 0x00, sizeof(struct node_rx_cc));
2612 	cc->status = BT_HCI_ERR_ADV_TIMEOUT;
2613 
2614 	rx->rx_ftr.param = param;
2615 
2616 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2617 	if (adv->lll.node_rx_adv_term) {
2618 		uint8_t handle;
2619 
2620 		ll_rx_put(link, rx);
2621 
2622 		handle = ull_adv_handle_get(adv);
2623 		LL_ASSERT(handle < BT_CTLR_ADV_SET);
2624 
2625 		rx = (void *)adv->lll.node_rx_adv_term;
2626 		rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2627 		rx->hdr.handle = handle;
2628 		rx->rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2629 		rx->rx_ftr.param_adv_term.conn_handle = 0xffff;
2630 		rx->rx_ftr.param_adv_term.num_events = adv->event_counter;
2631 
2632 		link = rx->hdr.link;
2633 	}
2634 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2635 
2636 	ll_rx_put_sched(link, rx);
2637 }
2638 
2639 static void conn_release(struct ll_adv_set *adv)
2640 {
2641 	struct lll_conn *lll = adv->lll.conn;
2642 	memq_link_t *link;
2643 
2644 	LL_ASSERT(!lll->link_tx_free);
2645 	link = memq_deinit(&lll->memq_tx.head, &lll->memq_tx.tail);
2646 	LL_ASSERT(link);
2647 	lll->link_tx_free = link;
2648 
2649 	ll_conn_release(lll->hdr.parent);
2650 	adv->lll.conn = NULL;
2651 
2652 	ll_rx_release(adv->node_rx_cc_free);
2653 	adv->node_rx_cc_free = NULL;
2654 	ll_rx_link_release(adv->link_cc_free);
2655 	adv->link_cc_free = NULL;
2656 }
2657 #endif /* CONFIG_BT_PERIPHERAL */
2658 
2659 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2660 static uint8_t leg_adv_type_get(uint8_t evt_prop)
2661 {
2662 	/* We take advantage of the fact that 2 LS bits
2663 	 * of evt_prop can be used in a lookup to return
2664 	 * PDU type value in the pdu_adv_type[] lookup.
2665 	 */
2666 	uint8_t const leg_adv_type[] = {
2667 		0x03, /* index of PDU_ADV_TYPE_NONCONN_IND in pdu_adv_type[] */
2668 		0x04, /* index of PDU_ADV_TYPE_DIRECT_IND in pdu_adv_type[] */
2669 		0x02, /* index of PDU_ADV_TYPE_SCAN_IND in pdu_adv_type[] */
2670 		0x00  /* index of PDU_ADV_TYPE_ADV_IND in pdu_adv_type[] */
2671 	};
2672 
2673 	/* if high duty cycle directed */
2674 	if (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN) {
2675 		/* index of PDU_ADV_TYPE_DIRECT_IND in pdu_adv_type[] */
2676 		return 0x01;
2677 	}
2678 
2679 	return leg_adv_type[evt_prop & 0x03];
2680 }
2681 
2682 static void adv_max_events_duration_set(struct ll_adv_set *adv,
2683 					uint16_t duration,
2684 					uint8_t max_ext_adv_evts)
2685 {
2686 	adv->event_counter = 0;
2687 	adv->max_events = max_ext_adv_evts;
2688 	adv->remain_duration_us = (uint32_t)duration * 10U * USEC_PER_MSEC;
2689 }
2690 
2691 static void ticker_stop_aux_op_cb(uint32_t status, void *param)
2692 {
2693 	static memq_link_t link;
2694 	static struct mayfly mfy = {0, 0, &link, NULL, aux_disable};
2695 	uint32_t ret;
2696 
2697 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2698 
2699 	/* Check if any pending LLL events that need to be aborted */
2700 	mfy.param = param;
2701 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2702 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2703 	LL_ASSERT(!ret);
2704 }
2705 
2706 static void aux_disable(void *param)
2707 {
2708 	struct lll_adv_aux *lll_aux;
2709 	struct ll_adv_aux_set *aux;
2710 	struct ll_adv_set *adv;
2711 	struct ull_hdr *hdr;
2712 
2713 	adv = param;
2714 	lll_aux = adv->lll.aux;
2715 	aux = HDR_LLL2ULL(lll_aux);
2716 	hdr = &aux->ull;
2717 	if (ull_ref_get(hdr)) {
2718 		LL_ASSERT(!hdr->disabled_cb);
2719 		hdr->disabled_param = adv;
2720 		hdr->disabled_cb = aux_disabled_cb;
2721 	} else {
2722 		aux_disabled_cb(param);
2723 	}
2724 }
2725 
2726 static void aux_disabled_cb(void *param)
2727 {
2728 	uint8_t handle;
2729 	uint32_t ret;
2730 
2731 	handle = ull_adv_handle_get(param);
2732 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2733 			  TICKER_USER_ID_ULL_HIGH,
2734 			  (TICKER_ID_ADV_BASE + handle),
2735 			  ticker_stop_ext_op_cb, param);
2736 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2737 		  (ret == TICKER_STATUS_BUSY));
2738 }
2739 
2740 static void ticker_stop_ext_op_cb(uint32_t status, void *param)
2741 {
2742 	static memq_link_t link;
2743 	static struct mayfly mfy = {0, 0, &link, NULL, ext_disable};
2744 	uint32_t ret;
2745 
2746 	/* Ignore if race between thread and ULL */
2747 	if (status != TICKER_STATUS_SUCCESS) {
2748 		/* TODO: detect race */
2749 
2750 		return;
2751 	}
2752 
2753 	/* Check if any pending LLL events that need to be aborted */
2754 	mfy.param = param;
2755 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2756 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2757 	LL_ASSERT(!ret);
2758 }
2759 
2760 static void ext_disable(void *param)
2761 {
2762 	struct ll_adv_set *adv;
2763 	struct ull_hdr *hdr;
2764 
2765 	/* Check ref count to determine if any pending LLL events in pipeline */
2766 	adv = param;
2767 	hdr = &adv->ull;
2768 	if (ull_ref_get(hdr)) {
2769 		static memq_link_t link;
2770 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2771 		uint32_t ret;
2772 
2773 		mfy.param = &adv->lll;
2774 
2775 		/* Setup disabled callback to be called when ref count
2776 		 * returns to zero.
2777 		 */
2778 		LL_ASSERT(!hdr->disabled_cb);
2779 		hdr->disabled_param = mfy.param;
2780 		hdr->disabled_cb = ext_disabled_cb;
2781 
2782 		/* Trigger LLL disable */
2783 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2784 				     TICKER_USER_ID_LLL, 0, &mfy);
2785 		LL_ASSERT(!ret);
2786 	} else {
2787 		/* No pending LLL events */
2788 		ext_disabled_cb(&adv->lll);
2789 	}
2790 }
2791 
2792 static void ext_disabled_cb(void *param)
2793 {
2794 	struct lll_adv *lll = (void *)param;
2795 	struct node_rx_pdu *rx = lll->node_rx_adv_term;
2796 
2797 	/* Under race condition, if a connection has been established then
2798 	 * node_rx is already utilized to send terminate event on connection
2799 	 */
2800 	if (!rx) {
2801 		return;
2802 	}
2803 
2804 	/* NOTE: parameters are already populated on disable, just enqueue here
2805 	 */
2806 	ll_rx_put_sched(rx->hdr.link, rx);
2807 }
2808 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2809 
2810 static inline uint8_t disable(uint8_t handle)
2811 {
2812 	uint32_t volatile ret_cb;
2813 	struct ll_adv_set *adv;
2814 	uint32_t ret;
2815 	void *mark;
2816 	int err;
2817 
2818 	adv = ull_adv_is_enabled_get(handle);
2819 	if (!adv) {
2820 		/* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
2821 		 * Disabling advertising when it is already disabled has no
2822 		 * effect.
2823 		 */
2824 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT)) {
2825 			return 0;
2826 		}
2827 
2828 		return BT_HCI_ERR_CMD_DISALLOWED;
2829 	}
2830 
2831 #if defined(CONFIG_BT_PERIPHERAL)
2832 	if (adv->lll.conn) {
2833 		/* Indicate to LLL that a cancellation is requested */
2834 		adv->lll.conn->periph.cancelled = 1U;
2835 		cpu_dmb();
2836 
2837 		/* Check if a connection was initiated (connection
2838 		 * establishment race between LLL and ULL).
2839 		 */
2840 		if (unlikely(adv->lll.conn->periph.initiated)) {
2841 			return BT_HCI_ERR_CMD_DISALLOWED;
2842 		}
2843 	}
2844 #endif /* CONFIG_BT_PERIPHERAL */
2845 
2846 	mark = ull_disable_mark(adv);
2847 	LL_ASSERT(mark == adv);
2848 
2849 #if defined(CONFIG_BT_PERIPHERAL)
2850 	if (adv->lll.is_hdcd) {
2851 		ret_cb = TICKER_STATUS_BUSY;
2852 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2853 				  TICKER_USER_ID_THREAD, TICKER_ID_ADV_STOP,
2854 				  ull_ticker_status_give, (void *)&ret_cb);
2855 		ret = ull_ticker_status_take(ret, &ret_cb);
2856 		if (ret) {
2857 			mark = ull_disable_unmark(adv);
2858 			LL_ASSERT(mark == adv);
2859 
2860 			return BT_HCI_ERR_CMD_DISALLOWED;
2861 		}
2862 	}
2863 #endif /* CONFIG_BT_PERIPHERAL */
2864 
2865 	ret_cb = TICKER_STATUS_BUSY;
2866 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
2867 			  TICKER_ID_ADV_BASE + handle,
2868 			  ull_ticker_status_give, (void *)&ret_cb);
2869 	ret = ull_ticker_status_take(ret, &ret_cb);
2870 	if (ret) {
2871 		mark = ull_disable_unmark(adv);
2872 		LL_ASSERT(mark == adv);
2873 
2874 		return BT_HCI_ERR_CMD_DISALLOWED;
2875 	}
2876 
2877 	err = ull_disable(&adv->lll);
2878 	LL_ASSERT(!err || (err == -EALREADY));
2879 
2880 	mark = ull_disable_unmark(adv);
2881 	LL_ASSERT(mark == adv);
2882 
2883 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2884 	struct lll_adv_aux *lll_aux = adv->lll.aux;
2885 
2886 	if (lll_aux) {
2887 		struct ll_adv_aux_set *aux;
2888 
2889 		aux = HDR_LLL2ULL(lll_aux);
2890 
2891 		err = ull_adv_aux_stop(aux);
2892 		if (err && (err != -EALREADY)) {
2893 			return BT_HCI_ERR_CMD_DISALLOWED;
2894 		}
2895 	}
2896 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2897 
2898 #if defined(CONFIG_BT_PERIPHERAL)
2899 	if (adv->lll.conn) {
2900 		conn_release(adv);
2901 	}
2902 #endif /* CONFIG_BT_PERIPHERAL */
2903 
2904 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2905 	struct lll_adv *lll = &adv->lll;
2906 
2907 	if (lll->node_rx_adv_term) {
2908 		struct node_rx_pdu *node_rx_adv_term =
2909 			(void *)lll->node_rx_adv_term;
2910 
2911 		lll->node_rx_adv_term = NULL;
2912 
2913 		ll_rx_link_release(node_rx_adv_term->hdr.link);
2914 		ll_rx_release(node_rx_adv_term);
2915 	}
2916 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2917 
2918 	adv->is_enabled = 0U;
2919 
2920 #if defined(CONFIG_BT_CTLR_PRIVACY)
2921 	if (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled_get(0)) {
2922 		ull_filter_adv_scan_state_cb(0);
2923 	}
2924 #endif /* CONFIG_BT_CTLR_PRIVACY */
2925 
2926 	return 0;
2927 }
2928 
2929 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
2930 					struct pdu_adv *pdu,
2931 					struct pdu_adv *pdu_scan)
2932 {
2933 	struct pdu_adv *pdu_adv_to_update;
2934 	struct lll_adv *lll;
2935 
2936 	pdu_adv_to_update = NULL;
2937 	lll = &adv->lll;
2938 
2939 	if (0) {
2940 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2941 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2942 		struct pdu_adv_com_ext_adv *pri_com_hdr;
2943 		struct pdu_adv_ext_hdr pri_hdr_flags;
2944 		struct pdu_adv_ext_hdr *pri_hdr;
2945 
2946 		pri_com_hdr = (void *)&pdu->adv_ext_ind;
2947 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
2948 		if (pri_com_hdr->ext_hdr_len) {
2949 			pri_hdr_flags = *pri_hdr;
2950 		} else {
2951 			*(uint8_t *)&pri_hdr_flags = 0U;
2952 		}
2953 
2954 		if (pri_com_hdr->adv_mode & BT_HCI_LE_ADV_PROP_SCAN) {
2955 			struct pdu_adv *sr = lll_adv_scan_rsp_peek(lll);
2956 
2957 			if (!sr->len) {
2958 				return BT_HCI_ERR_CMD_DISALLOWED;
2959 			}
2960 		}
2961 
2962 		/* AdvA, fill here at enable */
2963 		if (pri_hdr_flags.adv_addr) {
2964 			pdu_adv_to_update = pdu;
2965 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2966 		} else if (pri_hdr_flags.aux_ptr) {
2967 			struct pdu_adv_com_ext_adv *sec_com_hdr;
2968 			struct pdu_adv_ext_hdr sec_hdr_flags;
2969 			struct pdu_adv_ext_hdr *sec_hdr;
2970 			struct pdu_adv *sec_pdu;
2971 
2972 			sec_pdu = lll_adv_aux_data_peek(lll->aux);
2973 
2974 			sec_com_hdr = (void *)&sec_pdu->adv_ext_ind;
2975 			sec_hdr = (void *)sec_com_hdr->ext_hdr_adv_data;
2976 			if (sec_com_hdr->ext_hdr_len) {
2977 				sec_hdr_flags = *sec_hdr;
2978 			} else {
2979 				*(uint8_t *)&sec_hdr_flags = 0U;
2980 			}
2981 
2982 			if (sec_hdr_flags.adv_addr) {
2983 				pdu_adv_to_update = sec_pdu;
2984 			}
2985 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2986 		}
2987 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2988 	} else {
2989 		pdu_adv_to_update = pdu;
2990 	}
2991 
2992 	if (pdu_adv_to_update) {
2993 		const uint8_t *adv_addr;
2994 
2995 		adv_addr = ull_adv_pdu_update_addrs(adv, pdu_adv_to_update);
2996 
2997 		/* In case the local IRK was not set or no match was
2998 		 * found the fallback address was used instead, check
2999 		 * that a valid address has been set.
3000 		 */
3001 		if (pdu_adv_to_update->tx_addr &&
3002 		    !mem_nz((void *)adv_addr, BDADDR_SIZE)) {
3003 			return BT_HCI_ERR_INVALID_PARAM;
3004 		}
3005 
3006 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3007 		/* Do not update scan response for extended non-scannable since
3008 		 * there may be no scan response set.
3009 		 */
3010 		if ((pdu->type != PDU_ADV_TYPE_EXT_IND) ||
3011 		    (pdu->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_SCAN)) {
3012 #else
3013 		if (1) {
3014 #endif
3015 			ull_adv_pdu_update_addrs(adv, pdu_scan);
3016 		}
3017 
3018 	}
3019 
3020 	return 0;
3021 }
3022 
3023 static inline uint8_t *adv_pdu_adva_get(struct pdu_adv *pdu)
3024 {
3025 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3026 	struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
3027 	struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
3028 	struct pdu_adv_ext_hdr hdr_flags;
3029 
3030 	if (com_hdr->ext_hdr_len) {
3031 		hdr_flags = *hdr;
3032 	} else {
3033 		*(uint8_t *)&hdr_flags = 0U;
3034 	}
3035 
3036 	/* All extended PDUs have AdvA at the same offset in common header */
3037 	if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
3038 		LL_ASSERT(hdr_flags.adv_addr);
3039 
3040 		return &com_hdr->ext_hdr_adv_data[1];
3041 	}
3042 #endif
3043 
3044 	/* All legacy PDUs have AdvA at the same offset */
3045 	return pdu->adv_ind.addr;
3046 }
3047 
3048 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
3049 {
3050 #if defined(CONFIG_BT_CTLR_PRIVACY)
3051 	const uint8_t *rpa = ull_filter_adva_get(adv->lll.rl_idx);
3052 #else
3053 	const uint8_t *rpa = NULL;
3054 #endif
3055 	const uint8_t *own_id_addr;
3056 	const uint8_t *tx_addr;
3057 	uint8_t *adv_addr;
3058 
3059 	if (!rpa || IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)) {
3060 		if (0) {
3061 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3062 		} else if (ll_adv_cmds_is_ext() && pdu->tx_addr) {
3063 			own_id_addr = adv->rnd_addr;
3064 #endif
3065 		} else {
3066 			own_id_addr = ll_addr_get(pdu->tx_addr);
3067 		}
3068 	}
3069 
3070 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
3071 	(void)memcpy(adv->own_id_addr, own_id_addr, BDADDR_SIZE);
3072 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
3073 
3074 	if (rpa) {
3075 		pdu->tx_addr = 1;
3076 		tx_addr = rpa;
3077 	} else {
3078 		tx_addr = own_id_addr;
3079 	}
3080 
3081 	adv_addr = adv_pdu_adva_get(pdu);
3082 	memcpy(adv_addr, tx_addr, BDADDR_SIZE);
3083 
3084 	return adv_addr;
3085 }
3086 
3087 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
3088 {
3089 #if defined(CONFIG_BT_CTLR_PRIVACY)
3090 	const uint8_t *rx_addr = NULL;
3091 	uint8_t *tgt_addr;
3092 
3093 	rx_addr = ull_filter_tgta_get(adv->lll.rl_idx);
3094 	if (rx_addr) {
3095 		pdu->rx_addr = 1;
3096 
3097 		/* TargetA always follows AdvA in all PDUs */
3098 		tgt_addr = adv_pdu_adva_get(pdu) + BDADDR_SIZE;
3099 		memcpy(tgt_addr, rx_addr, BDADDR_SIZE);
3100 	}
3101 #endif
3102 
3103 	/* NOTE: identity TargetA is set when configuring advertising set, no
3104 	 *       need to update if LL Privacy is not supported.
3105 	 */
3106 }
3107 
3108 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type)
3109 {
3110 	/* TODO: Add support for extended advertising PDU if needed */
3111 	pdu->type = pdu_type;
3112 	pdu->rfu = 0;
3113 	pdu->chan_sel = 0;
3114 	pdu->tx_addr = 0;
3115 	pdu->rx_addr = 0;
3116 	pdu->len = BDADDR_SIZE;
3117 }
3118 
3119 static void init_set(struct ll_adv_set *adv)
3120 {
3121 	adv->interval = BT_LE_ADV_INTERVAL_DEFAULT;
3122 #if defined(CONFIG_BT_CTLR_PRIVACY)
3123 	adv->own_addr_type = BT_ADDR_LE_PUBLIC;
3124 #endif /* CONFIG_BT_CTLR_PRIVACY */
3125 	adv->lll.chan_map = BT_LE_ADV_CHAN_MAP_ALL;
3126 	adv->lll.filter_policy = BT_LE_ADV_FP_NO_FILTER;
3127 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
3128 	adv->delay = 0U;
3129 #endif /* ONFIG_BT_CTLR_JIT_SCHEDULING */
3130 
3131 	init_pdu(lll_adv_data_peek(&ll_adv[0].lll), PDU_ADV_TYPE_ADV_IND);
3132 
3133 #if !defined(CONFIG_BT_CTLR_ADV_EXT)
3134 	init_pdu(lll_adv_scan_rsp_peek(&ll_adv[0].lll), PDU_ADV_TYPE_SCAN_RSP);
3135 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
3136 }
3137