1 /*
2  * Copyright (c) 2016-2021 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <string.h>
9 
10 #include <zephyr/kernel.h>
11 #include <soc.h>
12 #include <zephyr/bluetooth/hci_types.h>
13 #include <zephyr/sys/byteorder.h>
14 
15 #include "hal/cpu.h"
16 #include "hal/ccm.h"
17 #include "hal/radio.h"
18 #include "hal/ticker.h"
19 #include "hal/cntr.h"
20 
21 #include "util/util.h"
22 #include "util/mem.h"
23 #include "util/memq.h"
24 #include "util/mayfly.h"
25 #include "util/dbuf.h"
26 
27 #include "ticker/ticker.h"
28 
29 #include "pdu_df.h"
30 #include "lll/pdu_vendor.h"
31 #include "pdu.h"
32 
33 #include "lll.h"
34 #include "lll_clock.h"
35 #include "lll/lll_vendor.h"
36 #include "lll/lll_adv_types.h"
37 #include "lll_adv.h"
38 #include "lll/lll_adv_pdu.h"
39 #include "lll_scan.h"
40 #include "lll/lll_df_types.h"
41 #include "lll_conn.h"
42 #include "lll_filter.h"
43 #include "lll_conn_iso.h"
44 
45 #include "ll_sw/ull_tx_queue.h"
46 
47 #include "ull_adv_types.h"
48 #include "ull_scan_types.h"
49 #include "ull_conn_types.h"
50 #include "ull_filter.h"
51 
52 #include "ull_adv_internal.h"
53 #include "ull_scan_internal.h"
54 #include "ull_conn_internal.h"
55 #include "ull_internal.h"
56 
57 #include "ll.h"
58 #include "ll_feat.h"
59 #include "ll_settings.h"
60 
61 #include "ll_sw/isoal.h"
62 #include "ll_sw/ull_iso_types.h"
63 #include "ll_sw/ull_conn_iso_types.h"
64 
65 #include "ll_sw/ull_llcp.h"
66 
67 
68 #include "hal/debug.h"
69 
70 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle);
71 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv);
72 
73 static int init_reset(void);
74 static inline struct ll_adv_set *is_disabled_get(uint8_t handle);
75 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
76 			     uint8_t adv_chn_cnt, uint8_t phy,
77 			     uint8_t phy_flags);
78 
79 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
80 		      uint32_t remainder, uint16_t lazy, uint8_t force,
81 		      void *param);
82 static void ticker_update_op_cb(uint32_t status, void *param);
83 
84 #if defined(CONFIG_BT_PERIPHERAL)
85 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
86 			   uint32_t remainder, uint16_t lazy, uint8_t force,
87 			   void *param);
88 static void ticker_stop_op_cb(uint32_t status, void *param);
89 static void adv_disable(void *param);
90 static void disabled_cb(void *param);
91 static void conn_release(struct ll_adv_set *adv);
92 #endif /* CONFIG_BT_PERIPHERAL */
93 
94 #if defined(CONFIG_BT_CTLR_ADV_EXT)
95 static uint8_t leg_adv_type_get(uint8_t evt_prop);
96 static void adv_max_events_duration_set(struct ll_adv_set *adv,
97 					uint16_t duration,
98 					uint8_t max_ext_adv_evts);
99 static void ticker_stop_aux_op_cb(uint32_t status, void *param);
100 static void aux_disable(void *param);
101 static void aux_disabled_cb(void *param);
102 static void ticker_stop_ext_op_cb(uint32_t status, void *param);
103 static void ext_disable(void *param);
104 static void ext_disabled_cb(void *param);
105 #endif /* CONFIG_BT_CTLR_ADV_EXT */
106 
107 static inline uint8_t disable(uint8_t handle);
108 
109 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
110 					struct pdu_adv *pdu,
111 					struct pdu_adv *pdu_scan);
112 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
113 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
114 
115 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type);
116 static void init_set(struct ll_adv_set *adv);
117 
118 static struct ll_adv_set ll_adv[BT_CTLR_ADV_SET];
119 
120 #if defined(CONFIG_BT_TICKER_EXT)
121 static struct ticker_ext ll_adv_ticker_ext[BT_CTLR_ADV_SET];
122 #endif /* CONFIG_BT_TICKER_EXT */
123 
124 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_CTLR_ADV_EXT)
125 static uint8_t ll_adv_cmds;
126 
ll_adv_cmds_set(uint8_t adv_cmds)127 int ll_adv_cmds_set(uint8_t adv_cmds)
128 {
129 	if (!ll_adv_cmds) {
130 		ll_adv_cmds = adv_cmds;
131 
132 		if (adv_cmds == LL_ADV_CMDS_LEGACY) {
133 			struct ll_adv_set *adv = &ll_adv[0];
134 
135 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
136 			adv->hci_handle = 0;
137 #endif
138 			adv->is_created = 1;
139 		}
140 	}
141 
142 	if (ll_adv_cmds != adv_cmds) {
143 		return -EINVAL;
144 	}
145 
146 	return 0;
147 }
148 
ll_adv_cmds_is_ext(void)149 int ll_adv_cmds_is_ext(void)
150 {
151 	return ll_adv_cmds == LL_ADV_CMDS_EXT;
152 }
153 #endif
154 
155 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_set_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)156 uint8_t ll_adv_set_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
157 {
158 	struct ll_adv_set *adv;
159 	uint8_t idx;
160 
161 	adv =  &ll_adv[0];
162 
163 	for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
164 		if (adv->is_created && (adv->hci_handle == hci_handle)) {
165 			*handle = idx;
166 			return 0;
167 		}
168 	}
169 
170 	return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
171 }
172 
ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle,uint8_t * handle)173 uint8_t ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle, uint8_t *handle)
174 {
175 	struct ll_adv_set *adv, *adv_empty;
176 	uint8_t idx;
177 
178 	adv =  &ll_adv[0];
179 	adv_empty = NULL;
180 
181 	for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
182 		if (adv->is_created) {
183 			if (adv->hci_handle == hci_handle) {
184 				*handle = idx;
185 				return 0;
186 			}
187 		} else if (!adv_empty) {
188 			adv_empty = adv;
189 		}
190 	}
191 
192 	if (adv_empty) {
193 		adv_empty->hci_handle = hci_handle;
194 		*handle = ull_adv_handle_get(adv_empty);
195 		return 0;
196 	}
197 
198 	return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
199 }
200 
ll_adv_set_hci_handle_get(uint8_t handle)201 uint8_t ll_adv_set_hci_handle_get(uint8_t handle)
202 {
203 	struct ll_adv_set *adv;
204 
205 	adv = ull_adv_set_get(handle);
206 	LL_ASSERT(adv && adv->is_created);
207 
208 	return adv->hci_handle;
209 }
210 #endif
211 
212 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_adv_params_set(uint8_t handle,uint16_t evt_prop,uint32_t interval,uint8_t adv_type,uint8_t own_addr_type,uint8_t direct_addr_type,uint8_t const * const direct_addr,uint8_t chan_map,uint8_t filter_policy,uint8_t * const tx_pwr,uint8_t phy_p,uint8_t skip,uint8_t phy_s,uint8_t sid,uint8_t sreq)213 uint8_t ll_adv_params_set(uint8_t handle, uint16_t evt_prop, uint32_t interval,
214 		       uint8_t adv_type, uint8_t own_addr_type,
215 		       uint8_t direct_addr_type, uint8_t const *const direct_addr,
216 		       uint8_t chan_map, uint8_t filter_policy,
217 		       uint8_t *const tx_pwr, uint8_t phy_p, uint8_t skip,
218 		       uint8_t phy_s, uint8_t sid, uint8_t sreq)
219 {
220 	uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
221 				     PDU_ADV_TYPE_DIRECT_IND,
222 				     PDU_ADV_TYPE_SCAN_IND,
223 				     PDU_ADV_TYPE_NONCONN_IND,
224 				     PDU_ADV_TYPE_DIRECT_IND,
225 				     PDU_ADV_TYPE_EXT_IND};
226 	uint8_t is_pdu_type_changed = 0;
227 	uint8_t is_new_set;
228 #else /* !CONFIG_BT_CTLR_ADV_EXT */
229 uint8_t ll_adv_params_set(uint16_t interval, uint8_t adv_type,
230 		       uint8_t own_addr_type, uint8_t direct_addr_type,
231 		       uint8_t const *const direct_addr, uint8_t chan_map,
232 		       uint8_t filter_policy)
233 {
234 	uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
235 				     PDU_ADV_TYPE_DIRECT_IND,
236 				     PDU_ADV_TYPE_SCAN_IND,
237 				     PDU_ADV_TYPE_NONCONN_IND,
238 				     PDU_ADV_TYPE_DIRECT_IND};
239 	uint8_t const handle = 0;
240 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
241 
242 	struct ll_adv_set *adv;
243 	uint8_t pdu_type_prev;
244 	struct pdu_adv *pdu;
245 
246 	adv = is_disabled_get(handle);
247 	if (!adv) {
248 		return BT_HCI_ERR_CMD_DISALLOWED;
249 	}
250 
251 #if defined(CONFIG_BT_CTLR_ADV_EXT)
252 	/* TODO: check and fail (0x12, invalid HCI cmd param) if invalid
253 	 * evt_prop bits.
254 	 */
255 
256 	/* Extended adv param set command used */
257 	if (adv_type == PDU_ADV_TYPE_EXT_IND) {
258 		/* legacy */
259 		if (evt_prop & BT_HCI_LE_ADV_PROP_LEGACY) {
260 			if (evt_prop & BT_HCI_LE_ADV_PROP_ANON) {
261 				return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
262 			}
263 
264 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
265 			/* disallow changing to legacy advertising while
266 			 * periodic advertising enabled.
267 			 */
268 			if (adv->lll.sync) {
269 				const struct ll_adv_sync_set *sync;
270 
271 				sync = HDR_LLL2ULL(adv->lll.sync);
272 				if (sync->is_enabled) {
273 					return BT_HCI_ERR_INVALID_PARAM;
274 				}
275 			}
276 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
277 
278 			adv_type = leg_adv_type_get(evt_prop);
279 
280 			adv->lll.phy_p = PHY_1M;
281 		} else {
282 			/* - Connectable and scannable not allowed;
283 			 * - High duty cycle directed connectable not allowed
284 			 */
285 			if (((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
286 					 BT_HCI_LE_ADV_PROP_SCAN)) ==
287 			     (BT_HCI_LE_ADV_PROP_CONN |
288 			      BT_HCI_LE_ADV_PROP_SCAN)) ||
289 			    (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN)) {
290 				return BT_HCI_ERR_INVALID_PARAM;
291 			}
292 
293 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
294 			if (adv->lll.sync &&
295 			    (evt_prop & (BT_HCI_LE_ADV_PROP_ANON |
296 					 BT_HCI_LE_ADV_PROP_CONN |
297 					 BT_HCI_LE_ADV_PROP_SCAN))) {
298 				const struct ll_adv_sync_set *sync;
299 
300 				sync = HDR_LLL2ULL(adv->lll.sync);
301 				if (sync->is_enabled) {
302 					return BT_HCI_ERR_INVALID_PARAM;
303 				}
304 			}
305 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
306 
307 #if (CONFIG_BT_CTLR_ADV_AUX_SET == 0)
308 			/* Connectable or scannable requires aux */
309 			if (evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
310 					BT_HCI_LE_ADV_PROP_SCAN)) {
311 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
312 			}
313 #endif
314 
315 			adv_type = 0x05; /* index of PDU_ADV_TYPE_EXT_IND in */
316 					 /* pdu_adv_type[] */
317 
318 			adv->lll.phy_p = phy_p;
319 			adv->lll.phy_flags = PHY_FLAGS_S8;
320 		}
321 	} else {
322 		adv->lll.phy_p = PHY_1M;
323 	}
324 
325 	is_new_set = !adv->is_created;
326 	adv->is_created = 1;
327 	adv->is_ad_data_cmplt = 1U;
328 #endif /* CONFIG_BT_CTLR_ADV_EXT */
329 
330 	/* remember parameters so that set adv/scan data and adv enable
331 	 * interface can correctly update adv/scan data in the
332 	 * double buffer between caller and controller context.
333 	 */
334 	/* Set interval for Undirected or Low Duty Cycle Directed Advertising */
335 	if (adv_type != 0x01) {
336 		adv->interval = interval;
337 	} else {
338 		adv->interval = 0;
339 	}
340 	adv->lll.chan_map = chan_map;
341 	adv->lll.filter_policy = filter_policy;
342 
343 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
344 	adv->lll.scan_req_notify = sreq;
345 #endif
346 
347 	/* update the "current" primary adv PDU */
348 	pdu = lll_adv_data_peek(&adv->lll);
349 	pdu_type_prev = pdu->type;
350 #if defined(CONFIG_BT_CTLR_ADV_EXT)
351 	if (is_new_set) {
352 		is_pdu_type_changed = 1;
353 
354 		pdu->type = pdu_adv_type[adv_type];
355 		if (pdu->type != PDU_ADV_TYPE_EXT_IND) {
356 			pdu->len = 0U;
357 		}
358 	/* check if new PDU type is different that past one */
359 	} else if (pdu->type != pdu_adv_type[adv_type]) {
360 		is_pdu_type_changed = 1;
361 
362 		/* If old PDU was extended advertising PDU, release
363 		 * auxiliary and periodic advertising sets.
364 		 */
365 		if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
366 			struct lll_adv_aux *lll_aux = adv->lll.aux;
367 
368 			if (lll_aux) {
369 				struct ll_adv_aux_set *aux;
370 
371 				/* FIXME: copy AD data from auxiliary channel
372 				 * PDU.
373 				 */
374 				pdu->len = 0;
375 
376 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
377 				if (adv->lll.sync) {
378 					struct ll_adv_sync_set *sync;
379 
380 					sync = HDR_LLL2ULL(adv->lll.sync);
381 					adv->lll.sync = NULL;
382 
383 					ull_adv_sync_release(sync);
384 				}
385 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
386 
387 				/* Release auxiliary channel set */
388 				aux = HDR_LLL2ULL(lll_aux);
389 				adv->lll.aux = NULL;
390 
391 				ull_adv_aux_release(aux);
392 			} else {
393 				/* No previous AD data in auxiliary channel
394 				 * PDU.
395 				 */
396 				pdu->len = 0;
397 			}
398 		}
399 
400 		pdu->type = pdu_adv_type[adv_type];
401 	}
402 
403 #else /* !CONFIG_BT_CTLR_ADV_EXT */
404 	pdu->type = pdu_adv_type[adv_type];
405 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
406 
407 	pdu->rfu = 0;
408 
409 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2) &&
410 	    ((pdu->type == PDU_ADV_TYPE_ADV_IND) ||
411 	     (pdu->type == PDU_ADV_TYPE_DIRECT_IND))) {
412 		pdu->chan_sel = 1;
413 	} else {
414 		pdu->chan_sel = 0;
415 	}
416 
417 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
418 	/* Backup the legacy AD Data if switching to legacy directed advertising
419 	 * or to Extended Advertising.
420 	 */
421 	if (((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
422 	     (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
423 	      (pdu->type == PDU_ADV_TYPE_EXT_IND))) &&
424 	    (pdu_type_prev != PDU_ADV_TYPE_DIRECT_IND) &&
425 	    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
426 	     (pdu_type_prev != PDU_ADV_TYPE_EXT_IND))) {
427 		if (pdu->len == 0U) {
428 			adv->ad_data_backup.len = 0U;
429 		} else {
430 			LL_ASSERT(pdu->len >=
431 				  offsetof(struct pdu_adv_adv_ind, data));
432 
433 			adv->ad_data_backup.len = pdu->len -
434 				offsetof(struct pdu_adv_adv_ind, data);
435 			memcpy(adv->ad_data_backup.data, pdu->adv_ind.data,
436 			       adv->ad_data_backup.len);
437 		}
438 	}
439 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
440 
441 #if defined(CONFIG_BT_CTLR_PRIVACY)
442 	adv->own_addr_type = own_addr_type;
443 	if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
444 	    adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) {
445 		adv->peer_addr_type = direct_addr_type;
446 		memcpy(&adv->peer_addr, direct_addr, BDADDR_SIZE);
447 	}
448 #endif /* CONFIG_BT_CTLR_PRIVACY */
449 
450 	if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
451 		pdu->tx_addr = own_addr_type & 0x1;
452 		pdu->rx_addr = direct_addr_type;
453 		memcpy(&pdu->direct_ind.tgt_addr[0], direct_addr, BDADDR_SIZE);
454 		pdu->len = sizeof(struct pdu_adv_direct_ind);
455 
456 #if defined(CONFIG_BT_CTLR_ADV_EXT)
457 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
458 		struct pdu_adv_ext_hdr *pri_hdr, pri_hdr_prev;
459 		struct pdu_adv_com_ext_adv *pri_com_hdr;
460 		uint8_t *pri_dptr_prev, *pri_dptr;
461 		uint8_t len;
462 
463 		pri_com_hdr = (void *)&pdu->adv_ext_ind;
464 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
465 		pri_dptr = pri_hdr->data;
466 		pri_dptr_prev = pri_dptr;
467 
468 		/* No ACAD and no AdvData */
469 		pri_com_hdr->adv_mode = evt_prop & 0x03;
470 
471 		/* Zero-init header flags */
472 		if (is_pdu_type_changed) {
473 			*(uint8_t *)&pri_hdr_prev = 0U;
474 		} else {
475 			pri_hdr_prev = *pri_hdr;
476 		}
477 		*(uint8_t *)pri_hdr = 0U;
478 
479 		/* AdvA flag */
480 		if (pri_hdr_prev.adv_addr) {
481 			pri_dptr_prev += BDADDR_SIZE;
482 		}
483 		if (!pri_com_hdr->adv_mode &&
484 		    !(evt_prop & BT_HCI_LE_ADV_PROP_ANON) &&
485 		    (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
486 			/* TODO: optional on 1M with Aux Ptr */
487 			pri_hdr->adv_addr = 1;
488 
489 			/* NOTE: AdvA is filled at enable */
490 			pdu->tx_addr = own_addr_type & 0x1;
491 			pri_dptr += BDADDR_SIZE;
492 		} else {
493 			pdu->tx_addr = 0;
494 		}
495 
496 		/* TargetA flag */
497 		if (pri_hdr_prev.tgt_addr) {
498 			pri_dptr_prev += BDADDR_SIZE;
499 		}
500 		/* TargetA flag in primary channel PDU only for directed */
501 		if (evt_prop & BT_HCI_LE_ADV_PROP_DIRECT) {
502 			pri_hdr->tgt_addr = 1;
503 			pdu->rx_addr = direct_addr_type;
504 			pri_dptr += BDADDR_SIZE;
505 		} else {
506 			pdu->rx_addr = 0;
507 		}
508 
509 		/* No CTEInfo flag in primary channel PDU */
510 
511 		/* ADI flag */
512 		if (pri_hdr_prev.adi) {
513 			pri_dptr_prev += sizeof(struct pdu_adv_adi);
514 
515 			pri_hdr->adi = 1;
516 			pri_dptr += sizeof(struct pdu_adv_adi);
517 		}
518 
519 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
520 		/* AuxPtr flag */
521 		if (pri_hdr_prev.aux_ptr) {
522 			pri_dptr_prev += sizeof(struct pdu_adv_aux_ptr);
523 		}
524 		/* Need aux for connectable or scannable extended advertising */
525 		if (pri_hdr_prev.aux_ptr ||
526 		    ((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
527 				  BT_HCI_LE_ADV_PROP_SCAN)))) {
528 			pri_hdr->aux_ptr = 1;
529 			pri_dptr += sizeof(struct pdu_adv_aux_ptr);
530 		}
531 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
532 
533 		/* No SyncInfo flag in primary channel PDU */
534 
535 		/* Tx Power flag */
536 		if (pri_hdr_prev.tx_pwr) {
537 			pri_dptr_prev += sizeof(uint8_t);
538 		}
539 		/* C1, Tx Power is optional on the LE 1M PHY, and reserved for
540 		 * for future use on the LE Coded PHY.
541 		 */
542 		if ((evt_prop & BT_HCI_LE_ADV_PROP_TX_POWER) &&
543 		    (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
544 			pri_hdr->tx_pwr = 1;
545 			pri_dptr += sizeof(uint8_t);
546 		}
547 
548 		/* Calc primary PDU len */
549 		len = ull_adv_aux_hdr_len_calc(pri_com_hdr, &pri_dptr);
550 		ull_adv_aux_hdr_len_fill(pri_com_hdr, len);
551 
552 		/* Set PDU length */
553 		pdu->len = len;
554 
555 		/* Start filling primary PDU payload based on flags */
556 
557 		/* No AdvData in primary channel PDU */
558 
559 		/* No ACAD in primary channel PDU */
560 
561 		/* Tx Power */
562 		if (pri_hdr_prev.tx_pwr) {
563 			pri_dptr_prev -= sizeof(uint8_t);
564 		}
565 		if (pri_hdr->tx_pwr) {
566 			uint8_t _tx_pwr;
567 
568 			_tx_pwr = 0;
569 			if (tx_pwr) {
570 				if (*tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) {
571 					_tx_pwr = *tx_pwr;
572 				} else {
573 					*tx_pwr = _tx_pwr;
574 				}
575 			}
576 
577 			pri_dptr -= sizeof(uint8_t);
578 			*pri_dptr = _tx_pwr;
579 		}
580 
581 		/* No SyncInfo in primary channel PDU */
582 
583 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
584 		/* AuxPtr */
585 		if (pri_hdr_prev.aux_ptr) {
586 			pri_dptr_prev -= sizeof(struct pdu_adv_aux_ptr);
587 		}
588 		if (pri_hdr->aux_ptr) {
589 			pri_dptr -= sizeof(struct pdu_adv_aux_ptr);
590 			ull_adv_aux_ptr_fill((void *)pri_dptr, 0U, phy_s);
591 		}
592 		adv->lll.phy_s = phy_s;
593 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
594 
595 		/* ADI */
596 		if (pri_hdr_prev.adi) {
597 			pri_dptr_prev -= sizeof(struct pdu_adv_adi);
598 		}
599 		if (pri_hdr->adi) {
600 			struct pdu_adv_adi *adi;
601 
602 			pri_dptr -= sizeof(struct pdu_adv_adi);
603 
604 			/* NOTE: memmove shall handle overlapping buffers */
605 			memmove(pri_dptr, pri_dptr_prev,
606 				sizeof(struct pdu_adv_adi));
607 
608 			adi = (void *)pri_dptr;
609 			PDU_ADV_ADI_SID_SET(adi, sid);
610 		}
611 		adv->sid = sid;
612 
613 		/* No CTEInfo field in primary channel PDU */
614 
615 		/* TargetA */
616 		if (pri_hdr_prev.tgt_addr) {
617 			pri_dptr_prev -= BDADDR_SIZE;
618 		}
619 		if (pri_hdr->tgt_addr) {
620 			pri_dptr -= BDADDR_SIZE;
621 			/* NOTE: RPA will be updated on enable, if needed */
622 			memcpy(pri_dptr, direct_addr, BDADDR_SIZE);
623 		}
624 
625 		/* NOTE: AdvA, filled at enable and RPA timeout */
626 
627 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
628 		/* Make sure aux is created if we have AuxPtr */
629 		if (pri_hdr->aux_ptr) {
630 			uint8_t pri_idx, sec_idx;
631 			uint8_t err;
632 
633 			err = ull_adv_aux_hdr_set_clear(adv,
634 						ULL_ADV_PDU_HDR_FIELD_ADVA,
635 						0U, &own_addr_type,
636 						&pri_idx, &sec_idx);
637 			if (err) {
638 				/* TODO: cleanup? */
639 				return err;
640 			}
641 
642 			lll_adv_aux_data_enqueue(adv->lll.aux, sec_idx);
643 			lll_adv_data_enqueue(&adv->lll, pri_idx);
644 		}
645 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
646 
647 #endif /* CONFIG_BT_CTLR_ADV_EXT */
648 
649 	} else if (pdu->len == 0) {
650 		pdu->tx_addr = own_addr_type & 0x1;
651 		pdu->rx_addr = 0;
652 		pdu->len = BDADDR_SIZE;
653 	} else {
654 
655 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
656 		if (((pdu_type_prev == PDU_ADV_TYPE_DIRECT_IND) ||
657 		     (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
658 		      (pdu_type_prev == PDU_ADV_TYPE_EXT_IND))) &&
659 		    (pdu->type != PDU_ADV_TYPE_DIRECT_IND) &&
660 		    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
661 		     (pdu->type != PDU_ADV_TYPE_EXT_IND))) {
662 			/* Restore the legacy AD Data */
663 			memcpy(pdu->adv_ind.data, adv->ad_data_backup.data,
664 			       adv->ad_data_backup.len);
665 			pdu->len = offsetof(struct pdu_adv_adv_ind, data) +
666 				   adv->ad_data_backup.len;
667 		}
668 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
669 
670 		pdu->tx_addr = own_addr_type & 0x1;
671 		pdu->rx_addr = 0;
672 	}
673 
674 	/* Initialize LLL header with parent pointer so that ULL contexts
675 	 * can be referenced in functions having the LLL context reference.
676 	 */
677 	lll_hdr_init(&adv->lll, adv);
678 
679 	if (0) {
680 #if defined(CONFIG_BT_CTLR_ADV_EXT)
681 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
682 		/* Make sure new extended advertising set is initialized with no
683 		 * scan response data. Existing sets keep whatever data was set.
684 		 */
685 		if (is_pdu_type_changed) {
686 			uint8_t err;
687 
688 			/* Make sure the scan response PDU is allocated from the right pool */
689 			(void)lll_adv_data_release(&adv->lll.scan_rsp);
690 			lll_adv_data_reset(&adv->lll.scan_rsp);
691 			err = lll_adv_aux_data_init(&adv->lll.scan_rsp);
692 			if (err) {
693 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
694 			}
695 
696 			pdu = lll_adv_scan_rsp_peek(&adv->lll);
697 			pdu->type = PDU_ADV_TYPE_AUX_SCAN_RSP;
698 			pdu->len = 0;
699 		}
700 #endif /* CONFIG_BT_CTLR_ADV_EXT */
701 	} else {
702 		pdu = lll_adv_scan_rsp_peek(&adv->lll);
703 
704 #if defined(CONFIG_BT_CTLR_ADV_EXT)
705 		if (is_pdu_type_changed || !pdu) {
706 			uint8_t err;
707 
708 			/* Make sure the scan response PDU is allocated from the right pool */
709 			(void)lll_adv_data_release(&adv->lll.scan_rsp);
710 			lll_adv_data_reset(&adv->lll.scan_rsp);
711 			err = lll_adv_data_init(&adv->lll.scan_rsp);
712 			if (err) {
713 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
714 			}
715 
716 			pdu = lll_adv_scan_rsp_peek(&adv->lll);
717 		}
718 #endif /* CONFIG_BT_CTLR_ADV_EXT */
719 
720 		/* Make sure legacy advertising set has scan response data
721 		 * initialized.
722 		 */
723 		pdu->type = PDU_ADV_TYPE_SCAN_RSP;
724 		pdu->rfu = 0;
725 		pdu->chan_sel = 0;
726 		pdu->tx_addr = own_addr_type & 0x1;
727 		pdu->rx_addr = 0;
728 		if (pdu->len == 0) {
729 			pdu->len = BDADDR_SIZE;
730 		}
731 	}
732 
733 	return 0;
734 }
735 
736 #if defined(CONFIG_BT_CTLR_ADV_EXT)
737 uint8_t ll_adv_data_set(uint8_t handle, uint8_t len, uint8_t const *const data)
738 {
739 #else /* !CONFIG_BT_CTLR_ADV_EXT */
740 uint8_t ll_adv_data_set(uint8_t len, uint8_t const *const data)
741 {
742 	const uint8_t handle = 0;
743 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
744 	struct ll_adv_set *adv;
745 
746 	adv = ull_adv_set_get(handle);
747 	if (!adv) {
748 		return BT_HCI_ERR_CMD_DISALLOWED;
749 	}
750 
751 	return ull_adv_data_set(adv, len, data);
752 }
753 
754 #if defined(CONFIG_BT_CTLR_ADV_EXT)
755 uint8_t ll_adv_scan_rsp_set(uint8_t handle, uint8_t len,
756 			    uint8_t const *const data)
757 {
758 #else /* !CONFIG_BT_CTLR_ADV_EXT */
759 uint8_t ll_adv_scan_rsp_set(uint8_t len, uint8_t const *const data)
760 {
761 	const uint8_t handle = 0;
762 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
763 	struct ll_adv_set *adv;
764 
765 	adv = ull_adv_set_get(handle);
766 	if (!adv) {
767 		return BT_HCI_ERR_CMD_DISALLOWED;
768 	}
769 
770 	return ull_scan_rsp_set(adv, len, data);
771 }
772 
773 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
774 #if defined(CONFIG_BT_HCI_MESH_EXT)
775 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
776 		   uint8_t at_anchor, uint32_t ticks_anchor, uint8_t retry,
777 		   uint8_t scan_window, uint8_t scan_delay)
778 {
779 #else /* !CONFIG_BT_HCI_MESH_EXT */
780 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
781 		   uint16_t duration, uint8_t max_ext_adv_evts)
782 {
783 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
784 	struct ll_adv_sync_set *sync = NULL;
785 	uint8_t sync_is_started = 0U;
786 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
787 	struct ll_adv_aux_set *aux = NULL;
788 	uint8_t aux_is_started = 0U;
789 	uint32_t ticks_anchor;
790 #endif /* !CONFIG_BT_HCI_MESH_EXT */
791 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
792 uint8_t ll_adv_enable(uint8_t enable)
793 {
794 	uint8_t const handle = 0;
795 	uint32_t ticks_anchor;
796 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
797 	uint32_t ticks_slot_overhead;
798 	uint32_t ticks_slot_offset;
799 	uint32_t volatile ret_cb;
800 	struct pdu_adv *pdu_scan;
801 	struct pdu_adv *pdu_adv;
802 	struct ll_adv_set *adv;
803 	struct lll_adv *lll;
804 	uint8_t hci_err;
805 	uint32_t ret;
806 
807 	if (!enable) {
808 		return disable(handle);
809 	}
810 
811 	adv = is_disabled_get(handle);
812 	if (!adv) {
813 		/* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
814 		 * Enabling advertising when it is already enabled can cause the
815 		 * random address to change. As the current implementation does
816 		 * does not update RPAs on every advertising enable, only on
817 		 * `rpa_timeout_ms` timeout, we are not going to implement the
818 		 * "can cause the random address to change" for legacy
819 		 * advertisements.
820 		 */
821 
822 		/* If HCI LE Set Extended Advertising Enable command is sent
823 		 * again for an advertising set while that set is enabled, the
824 		 * timer used for duration and the number of events counter are
825 		 * reset and any change to the random address shall take effect.
826 		 */
827 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT) ||
828 		    IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
829 #if defined(CONFIG_BT_CTLR_ADV_EXT)
830 			if (ll_adv_cmds_is_ext()) {
831 				enum node_rx_type volatile *type;
832 
833 				adv = ull_adv_is_enabled_get(handle);
834 				if (!adv) {
835 					/* This should not be happening as
836 					 * is_disabled_get failed.
837 					 */
838 					return BT_HCI_ERR_CMD_DISALLOWED;
839 				}
840 
841 				/* Change random address in the primary or
842 				 * auxiliary PDU as necessary.
843 				 */
844 				lll = &adv->lll;
845 				pdu_adv = lll_adv_data_peek(lll);
846 				pdu_scan = lll_adv_scan_rsp_peek(lll);
847 				hci_err = adv_scan_pdu_addr_update(adv,
848 								   pdu_adv,
849 								   pdu_scan);
850 				if (hci_err) {
851 					return hci_err;
852 				}
853 
854 				if (!adv->lll.node_rx_adv_term) {
855 					/* This should not be happening,
856 					 * adv->is_enabled would be 0 if
857 					 * node_rx_adv_term is released back to
858 					 * pool.
859 					 */
860 					return BT_HCI_ERR_CMD_DISALLOWED;
861 				}
862 
863 				/* Check advertising not terminated */
864 				type = &adv->lll.node_rx_adv_term->type;
865 				if (*type == NODE_RX_TYPE_NONE) {
866 					/* Reset event counter, update duration,
867 					 * and max events
868 					 */
869 					adv_max_events_duration_set(adv,
870 						duration, max_ext_adv_evts);
871 				}
872 
873 				/* Check the counter reset did not race with
874 				 * advertising terminated.
875 				 */
876 				if (*type != NODE_RX_TYPE_NONE) {
877 					/* Race with advertising terminated */
878 					return BT_HCI_ERR_CMD_DISALLOWED;
879 				}
880 			}
881 #endif /* CONFIG_BT_CTLR_ADV_EXT */
882 
883 			return 0;
884 		}
885 
886 		/* Fail on being strict as a legacy controller, valid only under
887 		 * Bluetooth Specification v4.x.
888 		 * Bluetooth Specification v5.0 and above shall not fail to
889 		 * enable already enabled advertising.
890 		 */
891 		return BT_HCI_ERR_CMD_DISALLOWED;
892 	}
893 
894 	lll = &adv->lll;
895 
896 #if defined(CONFIG_BT_CTLR_PRIVACY)
897 	lll->rl_idx = FILTER_IDX_NONE;
898 
899 	/* Prepare filter accept list and optionally resolving list */
900 	ull_filter_adv_update(lll->filter_policy);
901 
902 	if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
903 	    adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) {
904 		/* Look up the resolving list */
905 		lll->rl_idx = ull_filter_rl_find(adv->peer_addr_type,
906 						 adv->peer_addr, NULL);
907 
908 		if (lll->rl_idx != FILTER_IDX_NONE) {
909 			/* Generate RPAs if required */
910 			ull_filter_rpa_update(false);
911 		}
912 	}
913 #endif /* !CONFIG_BT_CTLR_PRIVACY */
914 
915 	pdu_adv = lll_adv_data_peek(lll);
916 	pdu_scan = lll_adv_scan_rsp_peek(lll);
917 
918 #if defined(CONFIG_BT_CTLR_ADV_EXT)
919 	if (!pdu_scan) {
920 		uint8_t err;
921 
922 		if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
923 			/* Should never happen */
924 			return BT_HCI_ERR_CMD_DISALLOWED;
925 		}
926 
927 		err = lll_adv_data_init(&adv->lll.scan_rsp);
928 		if (err) {
929 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
930 		}
931 
932 		pdu_scan = lll_adv_scan_rsp_peek(lll);
933 		init_pdu(pdu_scan, PDU_ADV_TYPE_SCAN_RSP);
934 	}
935 #endif /* CONFIG_BT_CTLR_ADV_EXT */
936 
937 	/* Update Bluetooth Device address in advertising and scan response
938 	 * PDUs.
939 	 */
940 	hci_err = adv_scan_pdu_addr_update(adv, pdu_adv, pdu_scan);
941 	if (hci_err) {
942 		return hci_err;
943 	}
944 
945 #if defined(CONFIG_BT_HCI_MESH_EXT)
946 	if (scan_delay) {
947 		if (ull_scan_is_enabled(0)) {
948 			return BT_HCI_ERR_CMD_DISALLOWED;
949 		}
950 
951 		lll->is_mesh = 1;
952 	}
953 #endif /* CONFIG_BT_HCI_MESH_EXT */
954 
955 #if defined(CONFIG_BT_PERIPHERAL)
956 	/* prepare connectable advertising */
957 	if ((pdu_adv->type == PDU_ADV_TYPE_ADV_IND) ||
958 	    (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND) ||
959 #if defined(CONFIG_BT_CTLR_ADV_EXT)
960 	    ((pdu_adv->type == PDU_ADV_TYPE_EXT_IND) &&
961 	     (pdu_adv->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_CONN))
962 #else
963 	    0
964 #endif
965 	     ) {
966 		struct node_rx_pdu *node_rx;
967 		struct ll_conn *conn;
968 		struct lll_conn *conn_lll;
969 		void *link;
970 		int err;
971 
972 		if (lll->conn) {
973 			return BT_HCI_ERR_CMD_DISALLOWED;
974 		}
975 
976 		link = ll_rx_link_alloc();
977 		if (!link) {
978 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
979 		}
980 
981 		node_rx = ll_rx_alloc();
982 		if (!node_rx) {
983 			ll_rx_link_release(link);
984 
985 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
986 		}
987 
988 		conn = ll_conn_acquire();
989 		if (!conn) {
990 			ll_rx_release(node_rx);
991 			ll_rx_link_release(link);
992 
993 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
994 		}
995 
996 		conn_lll = &conn->lll;
997 		conn_lll->handle = 0xFFFF;
998 
999 		if (!conn_lll->link_tx_free) {
1000 			conn_lll->link_tx_free = &conn_lll->link_tx;
1001 		}
1002 
1003 		memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head,
1004 			  &conn_lll->memq_tx.tail);
1005 		conn_lll->link_tx_free = NULL;
1006 
1007 		conn_lll->packet_tx_head_len = 0;
1008 		conn_lll->packet_tx_head_offset = 0;
1009 
1010 		conn_lll->sn = 0;
1011 		conn_lll->nesn = 0;
1012 		conn_lll->empty = 0;
1013 
1014 #if defined(CONFIG_BT_CTLR_PHY)
1015 		conn_lll->phy_flags = 0;
1016 		if (0) {
1017 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1018 		} else if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
1019 			conn_lll->phy_tx = lll->phy_s;
1020 			conn_lll->phy_tx_time = lll->phy_s;
1021 			conn_lll->phy_rx = lll->phy_s;
1022 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1023 		} else {
1024 			conn_lll->phy_tx = PHY_1M;
1025 			conn_lll->phy_tx_time = PHY_1M;
1026 			conn_lll->phy_rx = PHY_1M;
1027 		}
1028 #endif /* CONFIG_BT_CTLR_PHY */
1029 
1030 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1031 		conn_lll->rssi_latest = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1032 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1033 		conn_lll->rssi_reported = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1034 		conn_lll->rssi_sample_count = 0;
1035 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1036 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1037 
1038 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
1039 		conn_lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
1040 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
1041 
1042 		/* FIXME: BEGIN: Move to ULL? */
1043 		conn_lll->role = 1;
1044 		conn_lll->periph.initiated = 0;
1045 		conn_lll->periph.cancelled = 0;
1046 		conn_lll->data_chan_sel = 0;
1047 		conn_lll->data_chan_use = 0;
1048 		conn_lll->event_counter = 0;
1049 
1050 		conn_lll->latency_prepare = 0;
1051 		conn_lll->latency_event = 0;
1052 		conn_lll->periph.latency_enabled = 0;
1053 		conn_lll->periph.window_widening_prepare_us = 0;
1054 		conn_lll->periph.window_widening_event_us = 0;
1055 		conn_lll->periph.window_size_prepare_us = 0;
1056 		/* FIXME: END: Move to ULL? */
1057 #if defined(CONFIG_BT_CTLR_CONN_META)
1058 		memset(&conn_lll->conn_meta, 0, sizeof(conn_lll->conn_meta));
1059 #endif /* CONFIG_BT_CTLR_CONN_META */
1060 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1061 		conn_lll->df_rx_cfg.is_initialized = 0U;
1062 		conn_lll->df_rx_cfg.hdr.elem_size = sizeof(struct lll_df_conn_rx_params);
1063 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1064 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1065 		conn_lll->df_tx_cfg.is_initialized = 0U;
1066 		conn_lll->df_tx_cfg.cte_rsp_en = 0U;
1067 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1068 		conn->connect_expire = 6;
1069 		conn->supervision_expire = 0;
1070 
1071 #if defined(CONFIG_BT_CTLR_LE_PING)
1072 		conn->apto_expire = 0U;
1073 		conn->appto_expire = 0U;
1074 #endif
1075 
1076 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
1077 		conn->own_id_addr_type = BT_ADDR_LE_NONE->type;
1078 		(void)memcpy(conn->own_id_addr, BT_ADDR_LE_NONE->a.val,
1079 			     sizeof(conn->own_id_addr));
1080 		conn->peer_id_addr_type = BT_ADDR_LE_NONE->type;
1081 		(void)memcpy(conn->peer_id_addr, BT_ADDR_LE_NONE->a.val,
1082 			     sizeof(conn->peer_id_addr));
1083 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1084 
1085 		/* Re-initialize the control procedure data structures */
1086 		ull_llcp_init(conn);
1087 
1088 		conn->llcp_terminate.reason_final = 0;
1089 		/* NOTE: use allocated link for generating dedicated
1090 		 * terminate ind rx node
1091 		 */
1092 		conn->llcp_terminate.node_rx.hdr.link = link;
1093 
1094 #if defined(CONFIG_BT_CTLR_PHY)
1095 		conn->phy_pref_tx = ull_conn_default_phy_tx_get();
1096 		conn->phy_pref_rx = ull_conn_default_phy_rx_get();
1097 #endif /* CONFIG_BT_CTLR_PHY */
1098 
1099 #if defined(CONFIG_BT_CTLR_LE_ENC)
1100 		conn->pause_rx_data = 0U;
1101 #endif /* CONFIG_BT_CTLR_LE_ENC */
1102 
1103 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1104 		uint8_t phy_in_use = PHY_1M;
1105 
1106 
1107 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1108 		if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
1109 			phy_in_use = lll->phy_s;
1110 		}
1111 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1112 
1113 		ull_dle_init(conn, phy_in_use);
1114 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1115 
1116 		/* Re-initialize the Tx Q */
1117 		ull_tx_q_init(&conn->tx_q);
1118 
1119 		/* NOTE: using same link as supplied for terminate ind */
1120 		adv->link_cc_free = link;
1121 		adv->node_rx_cc_free = node_rx;
1122 		lll->conn = conn_lll;
1123 
1124 		ull_hdr_init(&conn->ull);
1125 		lll_hdr_init(&conn->lll, conn);
1126 
1127 		/* wait for stable clocks */
1128 		err = lll_clock_wait();
1129 		if (err) {
1130 			conn_release(adv);
1131 
1132 			return BT_HCI_ERR_HW_FAILURE;
1133 		}
1134 	}
1135 #endif /* CONFIG_BT_PERIPHERAL */
1136 
1137 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1138 	if (ll_adv_cmds_is_ext()) {
1139 		struct node_rx_pdu *node_rx_adv_term;
1140 		void *link_adv_term;
1141 
1142 		/* The alloc here used for ext adv termination event */
1143 		link_adv_term = ll_rx_link_alloc();
1144 		if (!link_adv_term) {
1145 #if defined(CONFIG_BT_PERIPHERAL)
1146 			if (adv->lll.conn) {
1147 				conn_release(adv);
1148 			}
1149 #endif /* CONFIG_BT_PERIPHERAL */
1150 
1151 			/* TODO: figure out right return value */
1152 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1153 		}
1154 
1155 		node_rx_adv_term = ll_rx_alloc();
1156 		if (!node_rx_adv_term) {
1157 #if defined(CONFIG_BT_PERIPHERAL)
1158 			if (adv->lll.conn) {
1159 				conn_release(adv);
1160 			}
1161 #endif /* CONFIG_BT_PERIPHERAL */
1162 
1163 			ll_rx_link_release(link_adv_term);
1164 
1165 			/* TODO: figure out right return value */
1166 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1167 		}
1168 
1169 		node_rx_adv_term->hdr.type = NODE_RX_TYPE_NONE;
1170 
1171 		node_rx_adv_term->hdr.link = (void *)link_adv_term;
1172 		adv->lll.node_rx_adv_term = (void *)node_rx_adv_term;
1173 
1174 		if (0) {
1175 #if defined(CONFIG_BT_PERIPHERAL)
1176 		} else if (lll->is_hdcd) {
1177 			adv_max_events_duration_set(adv, 0U, 0U);
1178 #endif /* CONFIG_BT_PERIPHERAL */
1179 		} else {
1180 			adv_max_events_duration_set(adv, duration,
1181 						    max_ext_adv_evts);
1182 		}
1183 	} else {
1184 		adv->lll.node_rx_adv_term = NULL;
1185 		adv_max_events_duration_set(adv, 0U, 0U);
1186 	}
1187 
1188 	const uint8_t phy = lll->phy_p;
1189 	const uint8_t phy_flags = lll->phy_flags;
1190 
1191 	adv->event_counter = 0U;
1192 #else
1193 	/* Legacy ADV only supports LE_1M PHY */
1194 	const uint8_t phy = PHY_1M;
1195 	const uint8_t phy_flags = 0U;
1196 #endif
1197 
1198 	/* For now we adv on all channels enabled in channel map */
1199 	uint8_t ch_map = lll->chan_map;
1200 	const uint8_t adv_chn_cnt = util_ones_count_get(&ch_map, sizeof(ch_map));
1201 
1202 	if (adv_chn_cnt == 0) {
1203 		/* ADV needs at least one channel */
1204 		goto failure_cleanup;
1205 	}
1206 
1207 	/* Calculate the advertising time reservation */
1208 	uint16_t time_us = adv_time_get(pdu_adv, pdu_scan, adv_chn_cnt, phy,
1209 					phy_flags);
1210 
1211 	uint16_t interval = adv->interval;
1212 #if defined(CONFIG_BT_HCI_MESH_EXT)
1213 	if (lll->is_mesh) {
1214 		uint16_t interval_min_us;
1215 
1216 		_radio.advertiser.retry = retry;
1217 		_radio.advertiser.scan_delay_ms = scan_delay;
1218 		_radio.advertiser.scan_window_ms = scan_window;
1219 
1220 		interval_min_us = time_us +
1221 				  (scan_delay + scan_window) * USEC_PER_MSEC;
1222 		if ((interval * SCAN_INT_UNIT_US) < interval_min_us) {
1223 			interval = DIV_ROUND_UP(interval_min_us,
1224 						    SCAN_INT_UNIT_US);
1225 		}
1226 
1227 		/* passive scanning */
1228 		_radio.scanner.type = 0;
1229 
1230 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1231 		/* TODO: Coded PHY support */
1232 		_radio.scanner.phy = 0;
1233 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1234 
1235 #if defined(CONFIG_BT_CTLR_PRIVACY)
1236 		/* TODO: Privacy support */
1237 		_radio.scanner.rpa_gen = 0;
1238 		_radio.scanner.rl_idx = rl_idx;
1239 #endif /* CONFIG_BT_CTLR_PRIVACY */
1240 
1241 		_radio.scanner.filter_policy = filter_policy;
1242 	}
1243 #endif /* CONFIG_BT_HCI_MESH_EXT */
1244 
1245 	/* Initialize ULL context before radio event scheduling is started. */
1246 	ull_hdr_init(&adv->ull);
1247 
1248 	/* TODO: active_to_start feature port */
1249 	adv->ull.ticks_active_to_start = 0;
1250 	adv->ull.ticks_prepare_to_start =
1251 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1252 	adv->ull.ticks_preempt_to_start =
1253 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1254 	adv->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
1255 
1256 	ticks_slot_offset = MAX(adv->ull.ticks_active_to_start,
1257 				adv->ull.ticks_prepare_to_start);
1258 
1259 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1260 		ticks_slot_overhead = ticks_slot_offset;
1261 	} else {
1262 		ticks_slot_overhead = 0;
1263 	}
1264 
1265 #if !defined(CONFIG_BT_HCI_MESH_EXT)
1266 	ticks_anchor = ticker_ticks_now_get();
1267 	ticks_anchor += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1268 
1269 #else /* CONFIG_BT_HCI_MESH_EXT */
1270 	if (!at_anchor) {
1271 		ticks_anchor = ticker_ticks_now_get();
1272 	}
1273 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1274 
1275 	/* High Duty Cycle Directed Advertising if interval is 0. */
1276 #if defined(CONFIG_BT_PERIPHERAL)
1277 	lll->is_hdcd = !interval && (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND);
1278 	if (lll->is_hdcd) {
1279 		ret_cb = TICKER_STATUS_BUSY;
1280 
1281 #if defined(CONFIG_BT_TICKER_EXT)
1282 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1283 		ll_adv_ticker_ext[handle].ticks_slot_window = 0;
1284 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
1285 
1286 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1287 		ll_adv_ticker_ext[handle].expire_info_id = TICKER_NULL;
1288 		ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1289 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1290 
1291 		ret = ticker_start_ext(
1292 #else /* !CONFIG_BT_TICKER_EXT */
1293 		ret = ticker_start(
1294 #endif /* !CONFIG_BT_TICKER_EXT */
1295 				   TICKER_INSTANCE_ID_CTLR,
1296 				   TICKER_USER_ID_THREAD,
1297 				   (TICKER_ID_ADV_BASE + handle),
1298 				   ticks_anchor, 0,
1299 				   (adv->ull.ticks_slot + ticks_slot_overhead),
1300 				   TICKER_NULL_REMAINDER, TICKER_NULL_LAZY,
1301 				   (adv->ull.ticks_slot + ticks_slot_overhead),
1302 				   ticker_cb, adv,
1303 				   ull_ticker_status_give, (void *)&ret_cb
1304 #if defined(CONFIG_BT_TICKER_EXT)
1305 				   ,
1306 				   &ll_adv_ticker_ext[handle]
1307 #endif /* CONFIG_BT_TICKER_EXT */
1308 				   );
1309 		ret = ull_ticker_status_take(ret, &ret_cb);
1310 		if (ret != TICKER_STATUS_SUCCESS) {
1311 			goto failure_cleanup;
1312 		}
1313 
1314 		ret_cb = TICKER_STATUS_BUSY;
1315 		ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1316 				   TICKER_USER_ID_THREAD,
1317 				   TICKER_ID_ADV_STOP, ticks_anchor,
1318 				   HAL_TICKER_US_TO_TICKS(ticks_slot_offset +
1319 							  (1280 * 1000)),
1320 				   TICKER_NULL_PERIOD, TICKER_NULL_REMAINDER,
1321 				   TICKER_NULL_LAZY, TICKER_NULL_SLOT,
1322 				   ticker_stop_cb, adv,
1323 				   ull_ticker_status_give, (void *)&ret_cb);
1324 	} else
1325 #endif /* CONFIG_BT_PERIPHERAL */
1326 	{
1327 		const uint32_t ticks_slot = adv->ull.ticks_slot +
1328 					 ticks_slot_overhead;
1329 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1330 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1331 		uint8_t pri_idx, sec_idx;
1332 
1333 		/* Add sync_info into auxiliary PDU */
1334 		if (lll->sync) {
1335 			sync = HDR_LLL2ULL(lll->sync);
1336 			if (sync->is_enabled && !sync->is_started) {
1337 				struct pdu_adv_sync_info *sync_info;
1338 				uint8_t value[1 + sizeof(sync_info)];
1339 				uint8_t err;
1340 
1341 				err = ull_adv_aux_hdr_set_clear(adv,
1342 						ULL_ADV_PDU_HDR_FIELD_SYNC_INFO,
1343 						0U, value, &pri_idx, &sec_idx);
1344 				if (err) {
1345 					return err;
1346 				}
1347 
1348 				/* First byte in the length-value encoded
1349 				 * parameter is size of sync_info structure,
1350 				 * followed by pointer to sync_info in the
1351 				 * PDU.
1352 				 */
1353 				memcpy(&sync_info, &value[1], sizeof(sync_info));
1354 				ull_adv_sync_info_fill(sync, sync_info);
1355 			} else {
1356 				/* Do not start periodic advertising */
1357 				sync = NULL;
1358 			}
1359 		}
1360 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1361 
1362 		if (lll->aux) {
1363 			struct lll_adv_aux *lll_aux = lll->aux;
1364 			uint32_t ticks_slot_overhead_aux;
1365 			uint32_t ticks_anchor_aux;
1366 
1367 			aux = HDR_LLL2ULL(lll_aux);
1368 
1369 			/* Schedule auxiliary PDU after primary channel
1370 			 * PDUs.
1371 			 * Reduce the MAFS offset by the Event Overhead
1372 			 * so that actual radio air packet start as
1373 			 * close as possible after the MAFS gap.
1374 			 * Add 2 ticks offset as compensation towards
1375 			 * the +/- 1 tick ticker scheduling jitter due
1376 			 * to accumulation of remainder to maintain
1377 			 * average ticker interval.
1378 			 */
1379 			ticks_anchor_aux =
1380 				ticks_anchor + ticks_slot +
1381 				HAL_TICKER_US_TO_TICKS(
1382 					MAX(EVENT_MAFS_US,
1383 					    EVENT_OVERHEAD_START_US) -
1384 					EVENT_OVERHEAD_START_US +
1385 					(EVENT_TICKER_RES_MARGIN_US << 1));
1386 
1387 			ticks_slot_overhead_aux =
1388 				ull_adv_aux_evt_init(aux, &ticks_anchor_aux);
1389 
1390 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1391 			/* Start periodic advertising if enabled and not already
1392 			 * started.
1393 			 */
1394 			if (sync) {
1395 				uint32_t ticks_slot_overhead2;
1396 				uint32_t ticks_slot_aux;
1397 
1398 #if defined(CONFIG_BT_CTLR_ADV_RESERVE_MAX)
1399 				uint32_t us_slot;
1400 
1401 				us_slot = ull_adv_aux_time_get(aux,
1402 						PDU_AC_PAYLOAD_SIZE_MAX,
1403 						PDU_AC_PAYLOAD_SIZE_MAX);
1404 				ticks_slot_aux =
1405 					HAL_TICKER_US_TO_TICKS(us_slot) +
1406 					ticks_slot_overhead_aux;
1407 #else
1408 				ticks_slot_aux = aux->ull.ticks_slot +
1409 						 ticks_slot_overhead_aux;
1410 #endif
1411 
1412 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET) || \
1413 	(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET == 0)
1414 				/* Schedule periodic advertising PDU after
1415 				 * auxiliary PDUs.
1416 				 * Reduce the MAFS offset by the Event Overhead
1417 				 * so that actual radio air packet start as
1418 				 * close as possible after the MAFS gap.
1419 				 * Add 2 ticks offset as compensation towards
1420 				 * the +/- 1 tick ticker scheduling jitter due
1421 				 * to accumulation of remainder to maintain
1422 				 * average ticker interval.
1423 				 */
1424 				uint32_t ticks_anchor_sync = ticks_anchor_aux +
1425 					ticks_slot_aux +
1426 					HAL_TICKER_US_TO_TICKS(
1427 						MAX(EVENT_MAFS_US,
1428 						    EVENT_OVERHEAD_START_US) -
1429 						EVENT_OVERHEAD_START_US +
1430 						(EVENT_TICKER_RES_MARGIN_US << 1));
1431 
1432 #else /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
1433 				uint32_t ticks_anchor_sync = ticks_anchor_aux +
1434 					HAL_TICKER_US_TO_TICKS(
1435 						CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET);
1436 
1437 #endif /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
1438 
1439 				ticks_slot_overhead2 = ull_adv_sync_evt_init(adv, sync, NULL);
1440 				ret = ull_adv_sync_start(adv, sync,
1441 							 ticks_anchor_sync,
1442 							 ticks_slot_overhead2);
1443 				if (ret) {
1444 					goto failure_cleanup;
1445 				}
1446 
1447 				sync_is_started = 1U;
1448 
1449 				lll_adv_aux_data_enqueue(adv->lll.aux, sec_idx);
1450 				lll_adv_data_enqueue(lll, pri_idx);
1451 			} else {
1452 				/* TODO: Find the anchor before the group of
1453 				 *       active Periodic Advertising events, so
1454 				 *       that auxiliary sets are grouped such
1455 				 *       that auxiliary sets and Periodic
1456 				 *       Advertising sets are non-overlapping
1457 				 *       for the same event interval.
1458 				 */
1459 			}
1460 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1461 
1462 			/* Keep aux interval equal or higher than primary PDU
1463 			 * interval.
1464 			 * Use periodic interval units to represent the
1465 			 * periodic behavior of scheduling of AUX_ADV_IND PDUs
1466 			 * so that it is grouped with similar interval units
1467 			 * used for ACL Connections, Periodic Advertising and
1468 			 * BIG radio events.
1469 			 */
1470 			aux->interval =
1471 				DIV_ROUND_UP(((uint64_t)adv->interval *
1472 						  ADV_INT_UNIT_US) +
1473 						 HAL_TICKER_TICKS_TO_US(
1474 							ULL_ADV_RANDOM_DELAY),
1475 						 PERIODIC_INT_UNIT_US);
1476 
1477 			ret = ull_adv_aux_start(aux, ticks_anchor_aux,
1478 						ticks_slot_overhead_aux);
1479 			if (ret) {
1480 				goto failure_cleanup;
1481 			}
1482 
1483 			aux_is_started = 1U;
1484 		}
1485 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1486 
1487 		ret_cb = TICKER_STATUS_BUSY;
1488 
1489 #if defined(CONFIG_BT_TICKER_EXT)
1490 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1491 		ll_adv_ticker_ext[handle].ticks_slot_window =
1492 			ULL_ADV_RANDOM_DELAY + ticks_slot;
1493 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1494 
1495 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1496 		if (lll->aux) {
1497 			uint8_t aux_handle = ull_adv_aux_handle_get(aux);
1498 
1499 			ll_adv_ticker_ext[handle].expire_info_id = TICKER_ID_ADV_AUX_BASE +
1500 								  aux_handle;
1501 			ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1502 		} else {
1503 			ll_adv_ticker_ext[handle].expire_info_id = TICKER_NULL;
1504 			ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1505 		}
1506 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1507 
1508 		ret = ticker_start_ext(
1509 #else /* !CONFIG_BT_TICKER_EXT */
1510 		ret = ticker_start(
1511 #endif /* !CONFIG_BT_TICKER_EXT */
1512 				   TICKER_INSTANCE_ID_CTLR,
1513 				   TICKER_USER_ID_THREAD,
1514 				   (TICKER_ID_ADV_BASE + handle),
1515 				   ticks_anchor, 0,
1516 				   HAL_TICKER_US_TO_TICKS((uint64_t)interval *
1517 							  ADV_INT_UNIT_US),
1518 				   TICKER_NULL_REMAINDER,
1519 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1520 	!defined(CONFIG_BT_CTLR_LOW_LAT)
1521 				   /* Force expiry to ensure timing update */
1522 				   TICKER_LAZY_MUST_EXPIRE,
1523 #else
1524 				   TICKER_NULL_LAZY,
1525 #endif /* !CONFIG_BT_TICKER_LOW_LAT && !CONFIG_BT_CTLR_LOW_LAT */
1526 				   ticks_slot,
1527 				   ticker_cb, adv,
1528 				   ull_ticker_status_give, (void *)&ret_cb
1529 #if defined(CONFIG_BT_TICKER_EXT)
1530 				   ,
1531 				   &ll_adv_ticker_ext[handle]
1532 #endif /* CONFIG_BT_TICKER_EXT */
1533 				   );
1534 	}
1535 
1536 	ret = ull_ticker_status_take(ret, &ret_cb);
1537 	if (ret != TICKER_STATUS_SUCCESS) {
1538 		goto failure_cleanup;
1539 	}
1540 
1541 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1542 	if (aux_is_started) {
1543 		aux->is_started = aux_is_started;
1544 
1545 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1546 		if (sync_is_started) {
1547 			sync->is_started = sync_is_started;
1548 		}
1549 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1550 	}
1551 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1552 
1553 	adv->is_enabled = 1;
1554 
1555 #if defined(CONFIG_BT_CTLR_PRIVACY)
1556 #if defined(CONFIG_BT_HCI_MESH_EXT)
1557 	if (_radio.advertiser.is_mesh) {
1558 		_radio.scanner.is_enabled = 1;
1559 
1560 		ull_filter_adv_scan_state_cb(BIT(0) | BIT(1));
1561 	}
1562 #else /* !CONFIG_BT_HCI_MESH_EXT */
1563 	if (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled_get(0)) {
1564 		ull_filter_adv_scan_state_cb(BIT(0));
1565 	}
1566 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1567 #endif /* CONFIG_BT_CTLR_PRIVACY */
1568 
1569 	return 0;
1570 
1571 failure_cleanup:
1572 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1573 	if (aux_is_started) {
1574 		/* TODO: Stop extended advertising and release resources */
1575 	}
1576 
1577 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1578 	if (sync_is_started) {
1579 		/* TODO: Stop periodic advertising and release resources */
1580 	}
1581 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1582 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1583 
1584 #if defined(CONFIG_BT_PERIPHERAL)
1585 	if (adv->lll.conn) {
1586 		conn_release(adv);
1587 	}
1588 #endif /* CONFIG_BT_PERIPHERAL */
1589 
1590 	return BT_HCI_ERR_CMD_DISALLOWED;
1591 }
1592 
1593 int ull_adv_init(void)
1594 {
1595 	int err;
1596 
1597 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1598 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1599 	if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1600 		err = ull_adv_aux_init();
1601 		if (err) {
1602 			return err;
1603 		}
1604 	}
1605 
1606 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1607 	err = ull_adv_sync_init();
1608 	if (err) {
1609 		return err;
1610 	}
1611 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1612 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1613 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1614 
1615 	err = init_reset();
1616 	if (err) {
1617 		return err;
1618 	}
1619 
1620 	return 0;
1621 }
1622 
1623 uint8_t ll_adv_disable_all(void)
1624 {
1625 	uint8_t handle;
1626 
1627 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1628 		(void)disable(handle);
1629 	}
1630 
1631 	return 0U;
1632 }
1633 
1634 int ull_adv_reset(void)
1635 {
1636 	(void)ll_adv_disable_all();
1637 
1638 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1639 #if defined(CONFIG_BT_HCI_RAW)
1640 	ll_adv_cmds = LL_ADV_CMDS_ANY;
1641 #endif
1642 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1643 	{
1644 		int err;
1645 
1646 		err = ull_adv_sync_reset();
1647 		if (err) {
1648 			return err;
1649 		}
1650 	}
1651 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1652 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1653 
1654 	return 0;
1655 }
1656 
1657 int ull_adv_reset_finalize(void)
1658 {
1659 	uint8_t handle;
1660 	int err;
1661 
1662 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1663 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1664 	if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1665 		err = ull_adv_aux_reset_finalize();
1666 		if (err) {
1667 			return err;
1668 		}
1669 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1670 		err = ull_adv_sync_reset_finalize();
1671 		if (err) {
1672 			return err;
1673 		}
1674 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1675 	}
1676 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1677 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1678 
1679 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1680 		struct ll_adv_set *adv = &ll_adv[handle];
1681 		struct lll_adv *lll = &adv->lll;
1682 
1683 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1684 		adv->is_created = 0;
1685 		lll->aux = NULL;
1686 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1687 		lll->sync = NULL;
1688 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1689 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1690 		lll_adv_data_reset(&lll->adv_data);
1691 		lll_adv_data_reset(&lll->scan_rsp);
1692 	}
1693 
1694 	err = init_reset();
1695 	if (err) {
1696 		return err;
1697 	}
1698 
1699 	return 0;
1700 }
1701 
1702 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle)
1703 {
1704 	if (handle >= BT_CTLR_ADV_SET) {
1705 		return NULL;
1706 	}
1707 
1708 	return &ll_adv[handle];
1709 }
1710 
1711 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv)
1712 {
1713 	return ((uint8_t *)adv - (uint8_t *)ll_adv) / sizeof(*adv);
1714 }
1715 
1716 uint16_t ull_adv_lll_handle_get(struct lll_adv *lll)
1717 {
1718 	return ull_adv_handle_get(HDR_LLL2ULL(lll));
1719 }
1720 
1721 inline struct ll_adv_set *ull_adv_is_enabled_get(uint8_t handle)
1722 {
1723 	struct ll_adv_set *adv;
1724 
1725 	adv = ull_adv_set_get(handle);
1726 	if (!adv || !adv->is_enabled) {
1727 		return NULL;
1728 	}
1729 
1730 	return adv;
1731 }
1732 
1733 int ull_adv_is_enabled(uint8_t handle)
1734 {
1735 	struct ll_adv_set *adv;
1736 
1737 	adv = ull_adv_is_enabled_get(handle);
1738 
1739 	return adv != NULL;
1740 }
1741 
1742 uint32_t ull_adv_filter_pol_get(uint8_t handle)
1743 {
1744 	struct ll_adv_set *adv;
1745 
1746 	adv = ull_adv_is_enabled_get(handle);
1747 	if (!adv) {
1748 		return 0;
1749 	}
1750 
1751 	return adv->lll.filter_policy;
1752 }
1753 
1754 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1755 struct ll_adv_set *ull_adv_is_created_get(uint8_t handle)
1756 {
1757 	struct ll_adv_set *adv;
1758 
1759 	adv = ull_adv_set_get(handle);
1760 	if (!adv || !adv->is_created) {
1761 		return NULL;
1762 	}
1763 
1764 	return adv;
1765 }
1766 
1767 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1768 void ull_adv_aux_created(struct ll_adv_set *adv)
1769 {
1770 	if (adv->lll.aux && adv->is_enabled) {
1771 		uint8_t aux_handle = ull_adv_aux_handle_get(HDR_LLL2ULL(adv->lll.aux));
1772 		uint8_t handle = ull_adv_handle_get(adv);
1773 
1774 		ticker_update_ext(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1775 			   (TICKER_ID_ADV_BASE + handle), 0, 0, 0, 0, 0, 0,
1776 			   ticker_update_op_cb, adv, 0,
1777 			   TICKER_ID_ADV_AUX_BASE + aux_handle);
1778 	}
1779 }
1780 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1781 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1782 
1783 uint8_t ull_adv_data_set(struct ll_adv_set *adv, uint8_t len,
1784 			 uint8_t const *const data)
1785 {
1786 	struct pdu_adv *prev;
1787 	struct pdu_adv *pdu;
1788 	uint8_t idx;
1789 
1790 	/* Check invalid AD Data length */
1791 	if (len > PDU_AC_LEG_DATA_SIZE_MAX) {
1792 		return BT_HCI_ERR_INVALID_PARAM;
1793 	}
1794 
1795 	prev = lll_adv_data_peek(&adv->lll);
1796 
1797 	/* Dont update data if directed, back it up */
1798 	if ((prev->type == PDU_ADV_TYPE_DIRECT_IND) ||
1799 	    (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
1800 	     (prev->type == PDU_ADV_TYPE_EXT_IND))) {
1801 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
1802 		/* Update the backup AD Data */
1803 		adv->ad_data_backup.len = len;
1804 		memcpy(adv->ad_data_backup.data, data, adv->ad_data_backup.len);
1805 		return 0;
1806 
1807 #else /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1808 		return BT_HCI_ERR_CMD_DISALLOWED;
1809 #endif /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1810 	}
1811 
1812 	/* update adv pdu fields. */
1813 	pdu = lll_adv_data_alloc(&adv->lll, &idx);
1814 
1815 	/* check for race condition with LLL ISR */
1816 	if (IS_ENABLED(CONFIG_ASSERT)) {
1817 		uint8_t idx_test;
1818 
1819 		lll_adv_data_alloc(&adv->lll, &idx_test);
1820 		__ASSERT((idx == idx_test), "Probable AD Data Corruption.\n");
1821 	}
1822 
1823 	pdu->type = prev->type;
1824 	pdu->rfu = 0U;
1825 
1826 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
1827 		pdu->chan_sel = prev->chan_sel;
1828 	} else {
1829 		pdu->chan_sel = 0U;
1830 	}
1831 
1832 	pdu->tx_addr = prev->tx_addr;
1833 	pdu->rx_addr = prev->rx_addr;
1834 	memcpy(&pdu->adv_ind.addr[0], &prev->adv_ind.addr[0], BDADDR_SIZE);
1835 	memcpy(&pdu->adv_ind.data[0], data, len);
1836 	pdu->len = BDADDR_SIZE + len;
1837 
1838 	/* Update time reservation */
1839 	if (adv->is_enabled) {
1840 		struct pdu_adv *pdu_scan;
1841 		struct lll_adv *lll;
1842 		uint8_t err;
1843 
1844 		lll = &adv->lll;
1845 		pdu_scan = lll_adv_scan_rsp_peek(lll);
1846 
1847 		err = ull_adv_time_update(adv, pdu, pdu_scan);
1848 		if (err) {
1849 			return err;
1850 		}
1851 	}
1852 
1853 	lll_adv_data_enqueue(&adv->lll, idx);
1854 
1855 	return 0;
1856 }
1857 
1858 uint8_t ull_scan_rsp_set(struct ll_adv_set *adv, uint8_t len,
1859 			 uint8_t const *const data)
1860 {
1861 	struct pdu_adv *prev;
1862 	struct pdu_adv *pdu;
1863 	uint8_t idx;
1864 
1865 	if (len > PDU_AC_LEG_DATA_SIZE_MAX) {
1866 		return BT_HCI_ERR_INVALID_PARAM;
1867 	}
1868 
1869 	/* update scan pdu fields. */
1870 	prev = lll_adv_scan_rsp_peek(&adv->lll);
1871 	if (!prev) {
1872 		uint8_t err;
1873 
1874 		err = lll_adv_data_init(&adv->lll.scan_rsp);
1875 		if (err) {
1876 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1877 		}
1878 
1879 		prev = lll_adv_scan_rsp_peek(&adv->lll);
1880 		init_pdu(prev, PDU_ADV_TYPE_SCAN_RSP);
1881 	}
1882 
1883 	pdu = lll_adv_scan_rsp_alloc(&adv->lll, &idx);
1884 	pdu->type = PDU_ADV_TYPE_SCAN_RSP;
1885 	pdu->rfu = 0;
1886 	pdu->chan_sel = 0;
1887 	pdu->tx_addr = prev->tx_addr;
1888 	pdu->rx_addr = 0;
1889 	pdu->len = BDADDR_SIZE + len;
1890 	memcpy(&pdu->scan_rsp.addr[0], &prev->scan_rsp.addr[0], BDADDR_SIZE);
1891 	memcpy(&pdu->scan_rsp.data[0], data, len);
1892 
1893 	/* Update time reservation */
1894 	if (adv->is_enabled) {
1895 		struct pdu_adv *pdu_adv_scan;
1896 		struct lll_adv *lll;
1897 		uint8_t err;
1898 
1899 		lll = &adv->lll;
1900 		pdu_adv_scan = lll_adv_data_peek(lll);
1901 
1902 		if ((pdu_adv_scan->type == PDU_ADV_TYPE_ADV_IND) ||
1903 		    (pdu_adv_scan->type == PDU_ADV_TYPE_SCAN_IND)) {
1904 			err = ull_adv_time_update(adv, pdu_adv_scan, pdu);
1905 			if (err) {
1906 				return err;
1907 			}
1908 		}
1909 	}
1910 
1911 	lll_adv_scan_rsp_enqueue(&adv->lll, idx);
1912 
1913 	return 0;
1914 }
1915 
1916 static uint32_t ticker_update_rand(struct ll_adv_set *adv, uint32_t ticks_delay_window,
1917 				   uint32_t ticks_delay_window_offset,
1918 				   uint32_t ticks_adjust_minus,
1919 				   ticker_op_func fp_op_func)
1920 {
1921 	uint32_t random_delay;
1922 	uint32_t ret;
1923 
1924 	/* Get pseudo-random number in the range [0..ticks_delay_window].
1925 	 * Please note that using modulo of 2^32 sample space has an uneven
1926 	 * distribution, slightly favoring smaller values.
1927 	 */
1928 	lll_rand_isr_get(&random_delay, sizeof(random_delay));
1929 	random_delay %= ticks_delay_window;
1930 	random_delay += (ticks_delay_window_offset + 1);
1931 
1932 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1933 			    TICKER_USER_ID_ULL_HIGH,
1934 			    TICKER_ID_ADV_BASE + ull_adv_handle_get(adv),
1935 			    random_delay,
1936 			    ticks_adjust_minus, 0, 0, 0, 0,
1937 			    fp_op_func, adv);
1938 
1939 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1940 		  (ret == TICKER_STATUS_BUSY) ||
1941 		  (fp_op_func == NULL));
1942 
1943 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1944 	adv->delay = random_delay;
1945 #endif
1946 	return random_delay;
1947 }
1948 
1949 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
1950 	defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1951 void ull_adv_done(struct node_rx_event_done *done)
1952 {
1953 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1954 	struct lll_adv_aux *lll_aux;
1955 	struct node_rx_hdr *rx_hdr;
1956 	uint8_t handle;
1957 	uint32_t ret;
1958 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1959 	struct ll_adv_set *adv;
1960 	struct lll_adv *lll;
1961 
1962 	/* Get reference to ULL context */
1963 	adv = CONTAINER_OF(done->param, struct ll_adv_set, ull);
1964 	lll = &adv->lll;
1965 
1966 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1967 	if (done->extra.type == EVENT_DONE_EXTRA_TYPE_ADV && done->extra.result != DONE_COMPLETED) {
1968 		/* Event aborted or too late - try to re-schedule */
1969 		uint32_t ticks_elapsed;
1970 		uint32_t ticks_now;
1971 		uint32_t delay_remain;
1972 
1973 		const uint32_t prepare_overhead =
1974 			HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1975 		const uint32_t ticks_adv_airtime = adv->ticks_at_expire +
1976 			prepare_overhead;
1977 
1978 		ticks_elapsed = 0U;
1979 
1980 		ticks_now = cntr_cnt_get();
1981 		if ((int32_t)(ticks_now - ticks_adv_airtime) > 0) {
1982 			ticks_elapsed = ticks_now - ticks_adv_airtime;
1983 		}
1984 
1985 		if (adv->delay_at_expire + ticks_elapsed <= ULL_ADV_RANDOM_DELAY) {
1986 			/* The perturbation window is still open */
1987 			delay_remain = ULL_ADV_RANDOM_DELAY - (adv->delay_at_expire +
1988 							       ticks_elapsed);
1989 		} else {
1990 			delay_remain = 0U;
1991 		}
1992 
1993 		/* Check if we have enough time to re-schedule */
1994 		if (delay_remain > prepare_overhead) {
1995 			uint32_t ticks_adjust_minus;
1996 			uint32_t interval_us = adv->interval * ADV_INT_UNIT_US;
1997 
1998 			/* Get negative ticker adjustment needed to pull back ADV one
1999 			 * interval plus the randomized delay. This means that the ticker
2000 			 * will be updated to expire in time frame of now + start
2001 			 * overhead, until 10 ms window is exhausted.
2002 			 */
2003 			ticks_adjust_minus = HAL_TICKER_US_TO_TICKS(interval_us) + adv->delay;
2004 
2005 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2006 			if (adv->remain_duration_us > interval_us) {
2007 				/* Reset remain_duration_us to value before last ticker expire
2008 				 * to correct for the re-scheduling
2009 				 */
2010 				adv->remain_duration_us += interval_us +
2011 							   HAL_TICKER_TICKS_TO_US(
2012 								adv->delay_at_expire);
2013 			}
2014 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2015 
2016 			/* Apply random delay in range [prepare_overhead..delay_remain].
2017 			 * NOTE: This ticker_update may fail if update races with
2018 			 * ticker_stop, e.g. from ull_periph_setup. This is not a problem
2019 			 * and we can safely ignore the operation result.
2020 			 */
2021 			ticker_update_rand(adv, delay_remain - prepare_overhead,
2022 					   prepare_overhead, ticks_adjust_minus, NULL);
2023 
2024 			/* Delay from ticker_update_rand is in addition to the last random delay */
2025 			adv->delay += adv->delay_at_expire;
2026 
2027 			/* Score of the event was increased due to the result, but since
2028 			 * we're getting a another chance we'll set it back.
2029 			 */
2030 			adv->lll.hdr.score -= 1;
2031 		}
2032 	}
2033 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2034 	if (done->extra.type == EVENT_DONE_EXTRA_TYPE_ADV && adv->lll.aux) {
2035 		/* Primary event of extended advertising done - wait for aux done */
2036 		return;
2037 	}
2038 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2039 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2040 
2041 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2042 	if (adv->max_events && (adv->event_counter >= adv->max_events)) {
2043 		adv->max_events = 0U;
2044 
2045 		rx_hdr = (void *)lll->node_rx_adv_term;
2046 		rx_hdr->rx_ftr.param_adv_term.status = BT_HCI_ERR_LIMIT_REACHED;
2047 	} else if (adv->remain_duration_us &&
2048 		   (adv->remain_duration_us <=
2049 		    ((uint64_t)adv->interval * ADV_INT_UNIT_US))) {
2050 		adv->remain_duration_us = 0U;
2051 
2052 		rx_hdr = (void *)lll->node_rx_adv_term;
2053 		rx_hdr->rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2054 	} else {
2055 		return;
2056 	}
2057 
2058 	handle = ull_adv_handle_get(adv);
2059 	LL_ASSERT(handle < BT_CTLR_ADV_SET);
2060 
2061 	rx_hdr->type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2062 	rx_hdr->handle = handle;
2063 	rx_hdr->rx_ftr.param_adv_term.conn_handle = 0xffff;
2064 	rx_hdr->rx_ftr.param_adv_term.num_events = adv->event_counter;
2065 
2066 	lll_aux = lll->aux;
2067 	if (lll_aux) {
2068 		struct ll_adv_aux_set *aux;
2069 		uint8_t aux_handle;
2070 
2071 		aux = HDR_LLL2ULL(lll_aux);
2072 		aux_handle = ull_adv_aux_handle_get(aux);
2073 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2074 				  TICKER_USER_ID_ULL_HIGH,
2075 				  (TICKER_ID_ADV_AUX_BASE + aux_handle),
2076 				  ticker_stop_aux_op_cb, adv);
2077 	} else {
2078 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2079 				  TICKER_USER_ID_ULL_HIGH,
2080 				  (TICKER_ID_ADV_BASE + handle),
2081 				  ticker_stop_ext_op_cb, adv);
2082 	}
2083 
2084 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2085 		  (ret == TICKER_STATUS_BUSY));
2086 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2087 }
2088 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
2089 
2090 const uint8_t *ull_adv_pdu_update_addrs(struct ll_adv_set *adv,
2091 					struct pdu_adv *pdu)
2092 {
2093 	const uint8_t *adv_addr;
2094 
2095 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2096 	struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
2097 	struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
2098 	struct pdu_adv_ext_hdr hdr_flags;
2099 
2100 	if (com_hdr->ext_hdr_len) {
2101 		hdr_flags = *hdr;
2102 	} else {
2103 		*(uint8_t *)&hdr_flags = 0U;
2104 	}
2105 #endif
2106 
2107 	adv_addr = adva_update(adv, pdu);
2108 
2109 	/* Update TargetA only if directed advertising PDU is supplied. Note
2110 	 * that AUX_SCAN_REQ does not have TargetA flag set so it will be
2111 	 * ignored here as expected.
2112 	 */
2113 	if ((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
2114 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2115 	    ((pdu->type == PDU_ADV_TYPE_EXT_IND) && hdr_flags.tgt_addr) ||
2116 #endif
2117 	    0) {
2118 		tgta_update(adv, pdu);
2119 	}
2120 
2121 	return adv_addr;
2122 }
2123 
2124 uint8_t ull_adv_time_update(struct ll_adv_set *adv, struct pdu_adv *pdu,
2125 			    struct pdu_adv *pdu_scan)
2126 {
2127 	struct lll_adv *lll;
2128 	uint32_t time_ticks;
2129 	uint8_t phy_flags;
2130 	uint16_t time_us;
2131 	uint8_t chan_map;
2132 	uint8_t chan_cnt;
2133 	uint8_t phy;
2134 
2135 	lll = &adv->lll;
2136 
2137 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2138 	phy = lll->phy_p;
2139 	phy_flags = lll->phy_flags;
2140 #else
2141 	phy = PHY_1M;
2142 	phy_flags = 0U;
2143 #endif
2144 
2145 	chan_map = lll->chan_map;
2146 	chan_cnt = util_ones_count_get(&chan_map, sizeof(chan_map));
2147 	time_us = adv_time_get(pdu, pdu_scan, chan_cnt, phy, phy_flags);
2148 	time_ticks = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
2149 
2150 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2151 	uint32_t volatile ret_cb;
2152 	uint32_t ticks_minus;
2153 	uint32_t ticks_plus;
2154 	uint32_t ret;
2155 
2156 	if (adv->ull.ticks_slot > time_ticks) {
2157 		ticks_minus = adv->ull.ticks_slot - time_ticks;
2158 		ticks_plus = 0U;
2159 	} else if (adv->ull.ticks_slot < time_ticks) {
2160 		ticks_minus = 0U;
2161 		ticks_plus = time_ticks - adv->ull.ticks_slot;
2162 	} else {
2163 		return BT_HCI_ERR_SUCCESS;
2164 	}
2165 
2166 	ret_cb = TICKER_STATUS_BUSY;
2167 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
2168 			    TICKER_USER_ID_THREAD,
2169 			    (TICKER_ID_ADV_BASE +
2170 			     ull_adv_handle_get(adv)),
2171 			    0, 0, ticks_plus, ticks_minus, 0, 0,
2172 			    ull_ticker_status_give, (void *)&ret_cb);
2173 	ret = ull_ticker_status_take(ret, &ret_cb);
2174 	if (ret != TICKER_STATUS_SUCCESS) {
2175 		return BT_HCI_ERR_CMD_DISALLOWED;
2176 	}
2177 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
2178 
2179 	adv->ull.ticks_slot = time_ticks;
2180 
2181 	return BT_HCI_ERR_SUCCESS;
2182 }
2183 
2184 static int init_reset(void)
2185 {
2186 	uint8_t handle;
2187 
2188 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL) && \
2189 	!defined(CONFIG_BT_CTLR_ADV_EXT)
2190 	ll_adv[0].lll.tx_pwr_lvl = RADIO_TXP_DEFAULT;
2191 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL && !CONFIG_BT_CTLR_ADV_EXT */
2192 
2193 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
2194 		lll_adv_data_init(&ll_adv[handle].lll.adv_data);
2195 
2196 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2197 		/* scan_rsp is not init'ed until we know if it is a legacy or extended scan rsp */
2198 		memset(&ll_adv[handle].lll.scan_rsp, 0, sizeof(ll_adv[handle].lll.scan_rsp));
2199 #else
2200 		lll_adv_data_init(&ll_adv[handle].lll.scan_rsp);
2201 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
2202 
2203 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
2204 		/* Pointer to DF configuration must be cleared on reset. In other case it will point
2205 		 * to a memory pool address that should be released. It may be used by the pool
2206 		 * itself. In such situation it may cause error.
2207 		 */
2208 		ll_adv[handle].df_cfg = NULL;
2209 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2210 	}
2211 
2212 	/* Make sure that set #0 is initialized with empty legacy PDUs. This is
2213 	 * especially important if legacy HCI interface is used for advertising
2214 	 * because it allows to enable advertising without any configuration,
2215 	 * thus we need to have PDUs already initialized.
2216 	 */
2217 	init_set(&ll_adv[0]);
2218 
2219 	return 0;
2220 }
2221 
2222 static inline struct ll_adv_set *is_disabled_get(uint8_t handle)
2223 {
2224 	struct ll_adv_set *adv;
2225 
2226 	adv = ull_adv_set_get(handle);
2227 	if (!adv || adv->is_enabled) {
2228 		return NULL;
2229 	}
2230 
2231 	return adv;
2232 }
2233 
2234 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
2235 			     uint8_t adv_chn_cnt, uint8_t phy,
2236 			     uint8_t phy_flags)
2237 {
2238 	uint16_t time_us = EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
2239 
2240 	/* NOTE: 16-bit value is sufficient to calculate the maximum radio
2241 	 *       event time reservation for PDUs on primary advertising
2242 	 *       channels (37, 38, and 39 channel indices of 1M and Coded PHY).
2243 	 */
2244 
2245 	/* Calculate the PDU Tx Time and hence the radio event length */
2246 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2247 	if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2248 		time_us += PDU_AC_US(pdu->len, phy, phy_flags) * adv_chn_cnt +
2249 			   EVENT_RX_TX_TURNAROUND(phy) * (adv_chn_cnt - 1);
2250 	} else
2251 #endif
2252 	{
2253 		uint16_t adv_size =
2254 			PDU_OVERHEAD_SIZE(PHY_1M) + ADVA_SIZE;
2255 		const uint16_t conn_ind_us =
2256 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2257 				 INITA_SIZE + ADVA_SIZE + LLDATA_SIZE), PHY_1M);
2258 		const uint8_t scan_req_us  =
2259 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2260 				 SCANA_SIZE + ADVA_SIZE), PHY_1M);
2261 		const uint16_t scan_rsp_us =
2262 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2263 				 ADVA_SIZE + pdu_scan->len), PHY_1M);
2264 		const uint8_t rx_to_us	= EVENT_RX_TO_US(PHY_1M);
2265 		const uint8_t rxtx_turn_us = EVENT_RX_TX_TURNAROUND(PHY_1M);
2266 
2267 		if (pdu->type == PDU_ADV_TYPE_NONCONN_IND) {
2268 			adv_size += pdu->len;
2269 			time_us += BYTES2US(adv_size, PHY_1M) * adv_chn_cnt +
2270 				   rxtx_turn_us * (adv_chn_cnt - 1);
2271 		} else {
2272 			if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
2273 				adv_size += TARGETA_SIZE;
2274 				time_us += conn_ind_us;
2275 			} else if (pdu->type == PDU_ADV_TYPE_ADV_IND) {
2276 				adv_size += pdu->len;
2277 				time_us += MAX(scan_req_us + EVENT_IFS_MAX_US +
2278 						scan_rsp_us, conn_ind_us);
2279 			} else if (pdu->type == PDU_ADV_TYPE_SCAN_IND) {
2280 				adv_size += pdu->len;
2281 				time_us += scan_req_us + EVENT_IFS_MAX_US +
2282 					   scan_rsp_us;
2283 			}
2284 
2285 			time_us += (BYTES2US(adv_size, PHY_1M) +
2286 				    EVENT_IFS_MAX_US + rx_to_us +
2287 				    rxtx_turn_us) * (adv_chn_cnt - 1) +
2288 				   BYTES2US(adv_size, PHY_1M) + EVENT_IFS_MAX_US;
2289 		}
2290 	}
2291 
2292 	return time_us;
2293 }
2294 
2295 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2296 		      uint32_t remainder, uint16_t lazy, uint8_t force,
2297 		      void *param)
2298 {
2299 	static memq_link_t link;
2300 	static struct mayfly mfy = {0, 0, &link, NULL, lll_adv_prepare};
2301 	static struct lll_prepare_param p;
2302 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2303 	struct ticker_ext_context *context = param;
2304 	struct ll_adv_set *adv = context->context;
2305 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2306 	struct ll_adv_set *adv = param;
2307 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2308 	uint32_t random_delay;
2309 	struct lll_adv *lll;
2310 	uint32_t ret;
2311 	uint8_t ref;
2312 
2313 	DEBUG_RADIO_PREPARE_A(1);
2314 
2315 	lll = &adv->lll;
2316 
2317 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2318 	if (lll->aux) {
2319 		/* Check if we are about to exceed the duration or max events limit
2320 		 * Usually this will be handled in ull_adv_done(), but in cases where
2321 		 * the extended advertising events overlap (ie. several primary advertisings
2322 		 * point to the same AUX_ADV_IND packet) the ticker will not be stopped
2323 		 * in time. To handle this, we simply ignore the extra ticker callback and
2324 		 * wait for the usual ull_adv_done() handling to run
2325 		 */
2326 		if ((adv->max_events && adv->event_counter >= adv->max_events) ||
2327 		    (adv->remain_duration_us &&
2328 		     adv->remain_duration_us <= (uint64_t)adv->interval * ADV_INT_UNIT_US)) {
2329 			return;
2330 		}
2331 	}
2332 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2333 
2334 	if (IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) ||
2335 	    (lazy != TICKER_LAZY_MUST_EXPIRE)) {
2336 		/* Increment prepare reference count */
2337 		ref = ull_ref_inc(&adv->ull);
2338 		LL_ASSERT(ref);
2339 
2340 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2341 	defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2342 		if (adv->lll.aux) {
2343 			uint32_t ticks_to_expire;
2344 			uint32_t other_remainder;
2345 
2346 			LL_ASSERT(context->other_expire_info);
2347 
2348 			/* Adjust ticks to expire based on remainder value */
2349 			ticks_to_expire = context->other_expire_info->ticks_to_expire;
2350 			other_remainder = context->other_expire_info->remainder;
2351 			hal_ticker_remove_jitter(&ticks_to_expire, &other_remainder);
2352 
2353 			/* Store the ticks and remainder offset for aux ptr population in LLL */
2354 			adv->lll.aux->ticks_pri_pdu_offset = ticks_to_expire;
2355 			adv->lll.aux->us_pri_pdu_offset = other_remainder;
2356 		}
2357 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) &&
2358 	* CONFIG_BT_TICKER_EXT_EXPIRE_INFO
2359 	*/
2360 
2361 		/* Append timing parameters */
2362 		p.ticks_at_expire = ticks_at_expire;
2363 		p.remainder = remainder;
2364 		p.lazy = lazy;
2365 		p.force = force;
2366 		p.param = lll;
2367 		mfy.param = &p;
2368 
2369 		/* Kick LLL prepare */
2370 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2371 				     TICKER_USER_ID_LLL, 0, &mfy);
2372 		LL_ASSERT(!ret);
2373 
2374 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2375 	!defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2376 		if (adv->lll.aux) {
2377 			ull_adv_aux_offset_get(adv);
2378 		}
2379 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2380 	* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO
2381 	*/
2382 
2383 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2384 		adv->ticks_at_expire = ticks_at_expire;
2385 		adv->delay_at_expire = adv->delay;
2386 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2387 	}
2388 
2389 	/* Apply adv random delay */
2390 #if defined(CONFIG_BT_PERIPHERAL)
2391 	if (!lll->is_hdcd)
2392 #endif /* CONFIG_BT_PERIPHERAL */
2393 	{
2394 		/* Apply random delay in range [0..ULL_ADV_RANDOM_DELAY] */
2395 		random_delay = ticker_update_rand(adv, ULL_ADV_RANDOM_DELAY,
2396 						  0, 0, ticker_update_op_cb);
2397 
2398 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2399 		if (adv->remain_duration_us && adv->event_counter > 0U) {
2400 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2401 			/* ticks_drift is always 0 with JIT scheduling, populate manually */
2402 			ticks_drift = adv->delay_at_expire;
2403 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2404 			uint32_t interval_us = (uint64_t)adv->interval * ADV_INT_UNIT_US;
2405 			uint32_t elapsed_us = interval_us * (lazy + 1U) +
2406 						 HAL_TICKER_TICKS_TO_US(ticks_drift);
2407 
2408 			/* End advertising if the added random delay pushes us beyond the limit */
2409 			if (adv->remain_duration_us > elapsed_us + interval_us +
2410 						      HAL_TICKER_TICKS_TO_US(random_delay)) {
2411 				adv->remain_duration_us -= elapsed_us;
2412 			} else {
2413 				adv->remain_duration_us = interval_us;
2414 			}
2415 		}
2416 
2417 		adv->event_counter += (lazy + 1U);
2418 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2419 	}
2420 
2421 	DEBUG_RADIO_PREPARE_A(1);
2422 }
2423 
2424 static void ticker_update_op_cb(uint32_t status, void *param)
2425 {
2426 #if defined(CONFIG_BT_PERIPHERAL) && (defined(CONFIG_BT_ASSERT) || defined(CONFIG_ASSERT))
2427 	struct ll_adv_set *adv = param;
2428 	struct pdu_adv *pdu = lll_adv_data_peek(&adv->lll);
2429 	bool connectable = (pdu->type == PDU_ADV_TYPE_ADV_IND) ||
2430 			   (pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
2431 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2432 			   ((pdu->type == PDU_ADV_TYPE_EXT_IND) &&
2433 			    (pdu->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_CONN)) ||
2434 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2435 			   0;
2436 #endif /* CONFIG_BT_PERIPHERAL && (CONFIG_BT_ASSERT || CONFIG_ASSERT) */
2437 
2438 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
2439 		  param == ull_disable_mark_get() ||
2440 #if defined(CONFIG_BT_PERIPHERAL)
2441 		   /* if using connectable adv and lll.conn is 0 -> a connection is underway */
2442 		  (connectable && !adv->lll.conn) ||
2443 #endif /* CONFIG_BT_PERIPHERAL */
2444 		  0);
2445 }
2446 
2447 #if defined(CONFIG_BT_PERIPHERAL)
2448 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2449 			   uint32_t remainder, uint16_t lazy, uint8_t force,
2450 			   void *param)
2451 {
2452 	struct ll_adv_set *adv = param;
2453 	uint8_t handle;
2454 	uint32_t ret;
2455 
2456 	handle = ull_adv_handle_get(adv);
2457 	LL_ASSERT(handle < BT_CTLR_ADV_SET);
2458 
2459 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2460 			  TICKER_ID_ADV_BASE + handle,
2461 			  ticker_stop_op_cb, adv);
2462 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2463 		  (ret == TICKER_STATUS_BUSY));
2464 }
2465 
2466 static void ticker_stop_op_cb(uint32_t status, void *param)
2467 {
2468 	static memq_link_t link;
2469 	static struct mayfly mfy = {0, 0, &link, NULL, adv_disable};
2470 	uint32_t ret;
2471 
2472 	/* Ignore if race between thread and ULL */
2473 	if (status != TICKER_STATUS_SUCCESS) {
2474 		/* TODO: detect race */
2475 
2476 		return;
2477 	}
2478 
2479 #if defined(CONFIG_BT_HCI_MESH_EXT)
2480 	/* FIXME: why is this here for Mesh commands? */
2481 	if (param) {
2482 		return;
2483 	}
2484 #endif /* CONFIG_BT_HCI_MESH_EXT */
2485 
2486 	/* Check if any pending LLL events that need to be aborted */
2487 	mfy.param = param;
2488 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2489 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2490 	LL_ASSERT(!ret);
2491 }
2492 
2493 static void adv_disable(void *param)
2494 {
2495 	struct ll_adv_set *adv;
2496 	struct ull_hdr *hdr;
2497 
2498 	/* Check ref count to determine if any pending LLL events in pipeline */
2499 	adv = param;
2500 	hdr = &adv->ull;
2501 	if (ull_ref_get(hdr)) {
2502 		static memq_link_t link;
2503 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2504 		uint32_t ret;
2505 
2506 		mfy.param = &adv->lll;
2507 
2508 		/* Setup disabled callback to be called when ref count
2509 		 * returns to zero.
2510 		 */
2511 		LL_ASSERT(!hdr->disabled_cb);
2512 		hdr->disabled_param = mfy.param;
2513 		hdr->disabled_cb = disabled_cb;
2514 
2515 		/* Trigger LLL disable */
2516 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2517 				     TICKER_USER_ID_LLL, 0, &mfy);
2518 		LL_ASSERT(!ret);
2519 	} else {
2520 		/* No pending LLL events */
2521 		disabled_cb(&adv->lll);
2522 	}
2523 }
2524 
2525 static void disabled_cb(void *param)
2526 {
2527 	struct ll_adv_set *adv;
2528 	struct node_rx_pdu *rx;
2529 	struct node_rx_cc *cc;
2530 	memq_link_t *link;
2531 
2532 	adv = ((struct lll_hdr *)param)->parent;
2533 
2534 	LL_ASSERT(adv->link_cc_free);
2535 	link = adv->link_cc_free;
2536 	adv->link_cc_free = NULL;
2537 
2538 	LL_ASSERT(adv->node_rx_cc_free);
2539 	rx = adv->node_rx_cc_free;
2540 	adv->node_rx_cc_free = NULL;
2541 
2542 	rx->hdr.type = NODE_RX_TYPE_CONNECTION;
2543 	rx->hdr.handle = 0xffff;
2544 
2545 	cc = (void *)rx->pdu;
2546 	memset(cc, 0x00, sizeof(struct node_rx_cc));
2547 	cc->status = BT_HCI_ERR_ADV_TIMEOUT;
2548 
2549 	rx->hdr.rx_ftr.param = param;
2550 
2551 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2552 	if (adv->lll.node_rx_adv_term) {
2553 		uint8_t handle;
2554 
2555 		ll_rx_put(link, rx);
2556 
2557 		handle = ull_adv_handle_get(adv);
2558 		LL_ASSERT(handle < BT_CTLR_ADV_SET);
2559 
2560 		rx = (void *)adv->lll.node_rx_adv_term;
2561 		rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2562 		rx->hdr.handle = handle;
2563 		rx->hdr.rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2564 		rx->hdr.rx_ftr.param_adv_term.conn_handle = 0xffff;
2565 		rx->hdr.rx_ftr.param_adv_term.num_events = adv->event_counter;
2566 
2567 		link = rx->hdr.link;
2568 	}
2569 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2570 
2571 	ll_rx_put_sched(link, rx);
2572 }
2573 
2574 static void conn_release(struct ll_adv_set *adv)
2575 {
2576 	struct lll_conn *lll = adv->lll.conn;
2577 	memq_link_t *link;
2578 
2579 	LL_ASSERT(!lll->link_tx_free);
2580 	link = memq_deinit(&lll->memq_tx.head, &lll->memq_tx.tail);
2581 	LL_ASSERT(link);
2582 	lll->link_tx_free = link;
2583 
2584 	ll_conn_release(lll->hdr.parent);
2585 	adv->lll.conn = NULL;
2586 
2587 	ll_rx_release(adv->node_rx_cc_free);
2588 	adv->node_rx_cc_free = NULL;
2589 	ll_rx_link_release(adv->link_cc_free);
2590 	adv->link_cc_free = NULL;
2591 }
2592 #endif /* CONFIG_BT_PERIPHERAL */
2593 
2594 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2595 static uint8_t leg_adv_type_get(uint8_t evt_prop)
2596 {
2597 	/* We take advantage of the fact that 2 LS bits
2598 	 * of evt_prop can be used in a lookup to return
2599 	 * PDU type value in the pdu_adv_type[] lookup.
2600 	 */
2601 	uint8_t const leg_adv_type[] = {
2602 		0x03, /* index of PDU_ADV_TYPE_NONCONN_IND in pdu_adv_type[] */
2603 		0x04, /* index of PDU_ADV_TYPE_DIRECT_IND in pdu_adv_type[] */
2604 		0x02, /* index of PDU_ADV_TYPE_SCAN_IND in pdu_adv_type[] */
2605 		0x00  /* index of PDU_ADV_TYPE_ADV_IND in pdu_adv_type[] */
2606 	};
2607 
2608 	/* if high duty cycle directed */
2609 	if (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN) {
2610 		/* index of PDU_ADV_TYPE_DIRECT_IND in pdu_adv_type[] */
2611 		return 0x01;
2612 	}
2613 
2614 	return leg_adv_type[evt_prop & 0x03];
2615 }
2616 
2617 static void adv_max_events_duration_set(struct ll_adv_set *adv,
2618 					uint16_t duration,
2619 					uint8_t max_ext_adv_evts)
2620 {
2621 	adv->event_counter = 0;
2622 	adv->max_events = max_ext_adv_evts;
2623 	adv->remain_duration_us = (uint32_t)duration * 10U * USEC_PER_MSEC;
2624 }
2625 
2626 static void ticker_stop_aux_op_cb(uint32_t status, void *param)
2627 {
2628 	static memq_link_t link;
2629 	static struct mayfly mfy = {0, 0, &link, NULL, aux_disable};
2630 	uint32_t ret;
2631 
2632 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2633 
2634 	/* Check if any pending LLL events that need to be aborted */
2635 	mfy.param = param;
2636 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2637 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2638 	LL_ASSERT(!ret);
2639 }
2640 
2641 static void aux_disable(void *param)
2642 {
2643 	struct lll_adv_aux *lll_aux;
2644 	struct ll_adv_aux_set *aux;
2645 	struct ll_adv_set *adv;
2646 	struct ull_hdr *hdr;
2647 
2648 	adv = param;
2649 	lll_aux = adv->lll.aux;
2650 	aux = HDR_LLL2ULL(lll_aux);
2651 	hdr = &aux->ull;
2652 	if (ull_ref_get(hdr)) {
2653 		LL_ASSERT(!hdr->disabled_cb);
2654 		hdr->disabled_param = adv;
2655 		hdr->disabled_cb = aux_disabled_cb;
2656 	} else {
2657 		aux_disabled_cb(param);
2658 	}
2659 }
2660 
2661 static void aux_disabled_cb(void *param)
2662 {
2663 	uint8_t handle;
2664 	uint32_t ret;
2665 
2666 	handle = ull_adv_handle_get(param);
2667 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2668 			  TICKER_USER_ID_ULL_HIGH,
2669 			  (TICKER_ID_ADV_BASE + handle),
2670 			  ticker_stop_ext_op_cb, param);
2671 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2672 		  (ret == TICKER_STATUS_BUSY));
2673 }
2674 
2675 static void ticker_stop_ext_op_cb(uint32_t status, void *param)
2676 {
2677 	static memq_link_t link;
2678 	static struct mayfly mfy = {0, 0, &link, NULL, ext_disable};
2679 	uint32_t ret;
2680 
2681 	/* Ignore if race between thread and ULL */
2682 	if (status != TICKER_STATUS_SUCCESS) {
2683 		/* TODO: detect race */
2684 
2685 		return;
2686 	}
2687 
2688 	/* Check if any pending LLL events that need to be aborted */
2689 	mfy.param = param;
2690 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2691 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2692 	LL_ASSERT(!ret);
2693 }
2694 
2695 static void ext_disable(void *param)
2696 {
2697 	struct ll_adv_set *adv;
2698 	struct ull_hdr *hdr;
2699 
2700 	/* Check ref count to determine if any pending LLL events in pipeline */
2701 	adv = param;
2702 	hdr = &adv->ull;
2703 	if (ull_ref_get(hdr)) {
2704 		static memq_link_t link;
2705 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2706 		uint32_t ret;
2707 
2708 		mfy.param = &adv->lll;
2709 
2710 		/* Setup disabled callback to be called when ref count
2711 		 * returns to zero.
2712 		 */
2713 		LL_ASSERT(!hdr->disabled_cb);
2714 		hdr->disabled_param = mfy.param;
2715 		hdr->disabled_cb = ext_disabled_cb;
2716 
2717 		/* Trigger LLL disable */
2718 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2719 				     TICKER_USER_ID_LLL, 0, &mfy);
2720 		LL_ASSERT(!ret);
2721 	} else {
2722 		/* No pending LLL events */
2723 		ext_disabled_cb(&adv->lll);
2724 	}
2725 }
2726 
2727 static void ext_disabled_cb(void *param)
2728 {
2729 	struct lll_adv *lll = (void *)param;
2730 	struct node_rx_hdr *rx_hdr = (void *)lll->node_rx_adv_term;
2731 
2732 	/* Under race condition, if a connection has been established then
2733 	 * node_rx is already utilized to send terminate event on connection
2734 	 */
2735 	if (!rx_hdr) {
2736 		return;
2737 	}
2738 
2739 	/* NOTE: parameters are already populated on disable, just enqueue here
2740 	 */
2741 	ll_rx_put_sched(rx_hdr->link, rx_hdr);
2742 }
2743 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2744 
2745 static inline uint8_t disable(uint8_t handle)
2746 {
2747 	uint32_t volatile ret_cb;
2748 	struct ll_adv_set *adv;
2749 	uint32_t ret;
2750 	void *mark;
2751 	int err;
2752 
2753 	adv = ull_adv_is_enabled_get(handle);
2754 	if (!adv) {
2755 		/* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
2756 		 * Disabling advertising when it is already disabled has no
2757 		 * effect.
2758 		 */
2759 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT)) {
2760 			return 0;
2761 		}
2762 
2763 		return BT_HCI_ERR_CMD_DISALLOWED;
2764 	}
2765 
2766 #if defined(CONFIG_BT_PERIPHERAL)
2767 	if (adv->lll.conn) {
2768 		/* Indicate to LLL that a cancellation is requested */
2769 		adv->lll.conn->periph.cancelled = 1U;
2770 		cpu_dmb();
2771 
2772 		/* Check if a connection was initiated (connection
2773 		 * establishment race between LLL and ULL).
2774 		 */
2775 		if (unlikely(adv->lll.conn->periph.initiated)) {
2776 			return BT_HCI_ERR_CMD_DISALLOWED;
2777 		}
2778 	}
2779 #endif /* CONFIG_BT_PERIPHERAL */
2780 
2781 	mark = ull_disable_mark(adv);
2782 	LL_ASSERT(mark == adv);
2783 
2784 #if defined(CONFIG_BT_PERIPHERAL)
2785 	if (adv->lll.is_hdcd) {
2786 		ret_cb = TICKER_STATUS_BUSY;
2787 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2788 				  TICKER_USER_ID_THREAD, TICKER_ID_ADV_STOP,
2789 				  ull_ticker_status_give, (void *)&ret_cb);
2790 		ret = ull_ticker_status_take(ret, &ret_cb);
2791 		if (ret) {
2792 			mark = ull_disable_unmark(adv);
2793 			LL_ASSERT(mark == adv);
2794 
2795 			return BT_HCI_ERR_CMD_DISALLOWED;
2796 		}
2797 	}
2798 #endif /* CONFIG_BT_PERIPHERAL */
2799 
2800 	ret_cb = TICKER_STATUS_BUSY;
2801 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
2802 			  TICKER_ID_ADV_BASE + handle,
2803 			  ull_ticker_status_give, (void *)&ret_cb);
2804 	ret = ull_ticker_status_take(ret, &ret_cb);
2805 	if (ret) {
2806 		mark = ull_disable_unmark(adv);
2807 		LL_ASSERT(mark == adv);
2808 
2809 		return BT_HCI_ERR_CMD_DISALLOWED;
2810 	}
2811 
2812 	err = ull_disable(&adv->lll);
2813 	LL_ASSERT(!err || (err == -EALREADY));
2814 
2815 	mark = ull_disable_unmark(adv);
2816 	LL_ASSERT(mark == adv);
2817 
2818 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2819 	struct lll_adv_aux *lll_aux = adv->lll.aux;
2820 
2821 	if (lll_aux) {
2822 		struct ll_adv_aux_set *aux;
2823 
2824 		aux = HDR_LLL2ULL(lll_aux);
2825 
2826 		err = ull_adv_aux_stop(aux);
2827 		if (err && (err != -EALREADY)) {
2828 			return BT_HCI_ERR_CMD_DISALLOWED;
2829 		}
2830 	}
2831 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2832 
2833 #if defined(CONFIG_BT_PERIPHERAL)
2834 	if (adv->lll.conn) {
2835 		conn_release(adv);
2836 	}
2837 #endif /* CONFIG_BT_PERIPHERAL */
2838 
2839 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2840 	struct lll_adv *lll = &adv->lll;
2841 
2842 	if (lll->node_rx_adv_term) {
2843 		struct node_rx_pdu *node_rx_adv_term =
2844 			(void *)lll->node_rx_adv_term;
2845 
2846 		lll->node_rx_adv_term = NULL;
2847 
2848 		ll_rx_link_release(node_rx_adv_term->hdr.link);
2849 		ll_rx_release(node_rx_adv_term);
2850 	}
2851 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2852 
2853 	adv->is_enabled = 0U;
2854 
2855 #if defined(CONFIG_BT_CTLR_PRIVACY)
2856 	if (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled_get(0)) {
2857 		ull_filter_adv_scan_state_cb(0);
2858 	}
2859 #endif /* CONFIG_BT_CTLR_PRIVACY */
2860 
2861 	return 0;
2862 }
2863 
2864 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
2865 					struct pdu_adv *pdu,
2866 					struct pdu_adv *pdu_scan)
2867 {
2868 	struct pdu_adv *pdu_adv_to_update;
2869 	struct lll_adv *lll;
2870 
2871 	pdu_adv_to_update = NULL;
2872 	lll = &adv->lll;
2873 
2874 	if (0) {
2875 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2876 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2877 		struct pdu_adv_com_ext_adv *pri_com_hdr;
2878 		struct pdu_adv_ext_hdr pri_hdr_flags;
2879 		struct pdu_adv_ext_hdr *pri_hdr;
2880 
2881 		pri_com_hdr = (void *)&pdu->adv_ext_ind;
2882 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
2883 		if (pri_com_hdr->ext_hdr_len) {
2884 			pri_hdr_flags = *pri_hdr;
2885 		} else {
2886 			*(uint8_t *)&pri_hdr_flags = 0U;
2887 		}
2888 
2889 		if (pri_com_hdr->adv_mode & BT_HCI_LE_ADV_PROP_SCAN) {
2890 			struct pdu_adv *sr = lll_adv_scan_rsp_peek(lll);
2891 
2892 			if (!sr->len) {
2893 				return BT_HCI_ERR_CMD_DISALLOWED;
2894 			}
2895 		}
2896 
2897 		/* AdvA, fill here at enable */
2898 		if (pri_hdr_flags.adv_addr) {
2899 			pdu_adv_to_update = pdu;
2900 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2901 		} else if (pri_hdr_flags.aux_ptr) {
2902 			struct pdu_adv_com_ext_adv *sec_com_hdr;
2903 			struct pdu_adv_ext_hdr sec_hdr_flags;
2904 			struct pdu_adv_ext_hdr *sec_hdr;
2905 			struct pdu_adv *sec_pdu;
2906 
2907 			sec_pdu = lll_adv_aux_data_peek(lll->aux);
2908 
2909 			sec_com_hdr = (void *)&sec_pdu->adv_ext_ind;
2910 			sec_hdr = (void *)sec_com_hdr->ext_hdr_adv_data;
2911 			if (sec_com_hdr->ext_hdr_len) {
2912 				sec_hdr_flags = *sec_hdr;
2913 			} else {
2914 				*(uint8_t *)&sec_hdr_flags = 0U;
2915 			}
2916 
2917 			if (sec_hdr_flags.adv_addr) {
2918 				pdu_adv_to_update = sec_pdu;
2919 			}
2920 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2921 		}
2922 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2923 	} else {
2924 		pdu_adv_to_update = pdu;
2925 	}
2926 
2927 	if (pdu_adv_to_update) {
2928 		const uint8_t *adv_addr;
2929 
2930 		adv_addr = ull_adv_pdu_update_addrs(adv, pdu_adv_to_update);
2931 
2932 		/* In case the local IRK was not set or no match was
2933 		 * found the fallback address was used instead, check
2934 		 * that a valid address has been set.
2935 		 */
2936 		if (pdu_adv_to_update->tx_addr &&
2937 		    !mem_nz((void *)adv_addr, BDADDR_SIZE)) {
2938 			return BT_HCI_ERR_INVALID_PARAM;
2939 		}
2940 
2941 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2942 		/* Do not update scan response for extended non-scannable since
2943 		 * there may be no scan response set.
2944 		 */
2945 		if ((pdu->type != PDU_ADV_TYPE_EXT_IND) ||
2946 		    (pdu->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_SCAN)) {
2947 #else
2948 		if (1) {
2949 #endif
2950 			ull_adv_pdu_update_addrs(adv, pdu_scan);
2951 		}
2952 
2953 	}
2954 
2955 	return 0;
2956 }
2957 
2958 static inline uint8_t *adv_pdu_adva_get(struct pdu_adv *pdu)
2959 {
2960 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2961 	struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
2962 	struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
2963 	struct pdu_adv_ext_hdr hdr_flags;
2964 
2965 	if (com_hdr->ext_hdr_len) {
2966 		hdr_flags = *hdr;
2967 	} else {
2968 		*(uint8_t *)&hdr_flags = 0U;
2969 	}
2970 
2971 	/* All extended PDUs have AdvA at the same offset in common header */
2972 	if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2973 		LL_ASSERT(hdr_flags.adv_addr);
2974 
2975 		return &com_hdr->ext_hdr_adv_data[1];
2976 	}
2977 #endif
2978 
2979 	/* All legacy PDUs have AdvA at the same offset */
2980 	return pdu->adv_ind.addr;
2981 }
2982 
2983 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
2984 {
2985 #if defined(CONFIG_BT_CTLR_PRIVACY)
2986 	const uint8_t *rpa = ull_filter_adva_get(adv->lll.rl_idx);
2987 #else
2988 	const uint8_t *rpa = NULL;
2989 #endif
2990 	const uint8_t *own_id_addr;
2991 	const uint8_t *tx_addr;
2992 	uint8_t *adv_addr;
2993 
2994 	if (!rpa || IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)) {
2995 		if (0) {
2996 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2997 		} else if (ll_adv_cmds_is_ext() && pdu->tx_addr) {
2998 			own_id_addr = adv->rnd_addr;
2999 #endif
3000 		} else {
3001 			own_id_addr = ll_addr_get(pdu->tx_addr);
3002 		}
3003 	}
3004 
3005 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
3006 	(void)memcpy(adv->own_id_addr, own_id_addr, BDADDR_SIZE);
3007 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
3008 
3009 	if (rpa) {
3010 		pdu->tx_addr = 1;
3011 		tx_addr = rpa;
3012 	} else {
3013 		tx_addr = own_id_addr;
3014 	}
3015 
3016 	adv_addr = adv_pdu_adva_get(pdu);
3017 	memcpy(adv_addr, tx_addr, BDADDR_SIZE);
3018 
3019 	return adv_addr;
3020 }
3021 
3022 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
3023 {
3024 #if defined(CONFIG_BT_CTLR_PRIVACY)
3025 	const uint8_t *rx_addr = NULL;
3026 	uint8_t *tgt_addr;
3027 
3028 	rx_addr = ull_filter_tgta_get(adv->lll.rl_idx);
3029 	if (rx_addr) {
3030 		pdu->rx_addr = 1;
3031 
3032 		/* TargetA always follows AdvA in all PDUs */
3033 		tgt_addr = adv_pdu_adva_get(pdu) + BDADDR_SIZE;
3034 		memcpy(tgt_addr, rx_addr, BDADDR_SIZE);
3035 	}
3036 #endif
3037 
3038 	/* NOTE: identity TargetA is set when configuring advertising set, no
3039 	 *       need to update if LL Privacy is not supported.
3040 	 */
3041 }
3042 
3043 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type)
3044 {
3045 	/* TODO: Add support for extended advertising PDU if needed */
3046 	pdu->type = pdu_type;
3047 	pdu->rfu = 0;
3048 	pdu->chan_sel = 0;
3049 	pdu->tx_addr = 0;
3050 	pdu->rx_addr = 0;
3051 	pdu->len = BDADDR_SIZE;
3052 }
3053 
3054 static void init_set(struct ll_adv_set *adv)
3055 {
3056 	adv->interval = BT_LE_ADV_INTERVAL_DEFAULT;
3057 #if defined(CONFIG_BT_CTLR_PRIVACY)
3058 	adv->own_addr_type = BT_ADDR_LE_PUBLIC;
3059 #endif /* CONFIG_BT_CTLR_PRIVACY */
3060 	adv->lll.chan_map = BT_LE_ADV_CHAN_MAP_ALL;
3061 	adv->lll.filter_policy = BT_LE_ADV_FP_NO_FILTER;
3062 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
3063 	adv->delay = 0U;
3064 #endif /* ONFIG_BT_CTLR_JIT_SCHEDULING */
3065 
3066 	init_pdu(lll_adv_data_peek(&ll_adv[0].lll), PDU_ADV_TYPE_ADV_IND);
3067 
3068 #if !defined(CONFIG_BT_CTLR_ADV_EXT)
3069 	init_pdu(lll_adv_scan_rsp_peek(&ll_adv[0].lll), PDU_ADV_TYPE_SCAN_RSP);
3070 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
3071 }
3072