1 /*
2  * Copyright (c) 2016-2021 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <string.h>
9 
10 #include <zephyr/kernel.h>
11 #include <soc.h>
12 #include <zephyr/bluetooth/hci_types.h>
13 #include <zephyr/sys/byteorder.h>
14 
15 #include "hal/cpu.h"
16 #include "hal/ccm.h"
17 #include "hal/radio.h"
18 #include "hal/ticker.h"
19 #include "hal/cntr.h"
20 
21 #include "util/util.h"
22 #include "util/mem.h"
23 #include "util/memq.h"
24 #include "util/mayfly.h"
25 #include "util/dbuf.h"
26 
27 #include "ticker/ticker.h"
28 
29 #include "pdu_df.h"
30 #include "lll/pdu_vendor.h"
31 #include "pdu.h"
32 
33 #include "lll.h"
34 #include "lll_clock.h"
35 #include "lll/lll_vendor.h"
36 #include "lll/lll_adv_types.h"
37 #include "lll_adv.h"
38 #include "lll/lll_adv_pdu.h"
39 #include "lll_scan.h"
40 #include "lll/lll_df_types.h"
41 #include "lll_conn.h"
42 #include "lll_filter.h"
43 #include "lll_conn_iso.h"
44 
45 #include "ll_sw/ull_tx_queue.h"
46 
47 #include "ull_adv_types.h"
48 #include "ull_scan_types.h"
49 #include "ull_conn_types.h"
50 #include "ull_filter.h"
51 
52 #include "ull_adv_internal.h"
53 #include "ull_scan_internal.h"
54 #include "ull_conn_internal.h"
55 #include "ull_internal.h"
56 
57 #include "ll.h"
58 #include "ll_feat.h"
59 #include "ll_settings.h"
60 
61 #include "ll_sw/isoal.h"
62 #include "ll_sw/ull_iso_types.h"
63 #include "ll_sw/ull_conn_iso_types.h"
64 
65 #include "ll_sw/ull_llcp.h"
66 
67 
68 #include "hal/debug.h"
69 
70 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle);
71 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv);
72 
73 static int init_reset(void);
74 static inline struct ll_adv_set *is_disabled_get(uint8_t handle);
75 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
76 			     uint8_t adv_chn_cnt, uint8_t phy,
77 			     uint8_t phy_flags);
78 
79 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
80 		      uint32_t remainder, uint16_t lazy, uint8_t force,
81 		      void *param);
82 static void ticker_update_op_cb(uint32_t status, void *param);
83 
84 #if defined(CONFIG_BT_PERIPHERAL)
85 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
86 			   uint32_t remainder, uint16_t lazy, uint8_t force,
87 			   void *param);
88 static void ticker_stop_op_cb(uint32_t status, void *param);
89 static void adv_disable(void *param);
90 static void disabled_cb(void *param);
91 static void conn_release(struct ll_adv_set *adv);
92 #endif /* CONFIG_BT_PERIPHERAL */
93 
94 #if defined(CONFIG_BT_CTLR_ADV_EXT)
95 static uint8_t leg_adv_type_get(uint8_t evt_prop);
96 static void adv_max_events_duration_set(struct ll_adv_set *adv,
97 					uint16_t duration,
98 					uint8_t max_ext_adv_evts);
99 static void ticker_stop_aux_op_cb(uint32_t status, void *param);
100 static void aux_disable(void *param);
101 static void aux_disabled_cb(void *param);
102 static void ticker_stop_ext_op_cb(uint32_t status, void *param);
103 static void ext_disable(void *param);
104 static void ext_disabled_cb(void *param);
105 #endif /* CONFIG_BT_CTLR_ADV_EXT */
106 
107 static inline uint8_t disable(uint8_t handle);
108 
109 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
110 					struct pdu_adv *pdu,
111 					struct pdu_adv *pdu_scan);
112 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
113 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
114 
115 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type);
116 static void init_set(struct ll_adv_set *adv);
117 
118 static struct ll_adv_set ll_adv[BT_CTLR_ADV_SET];
119 
120 static uint8_t ticker_update_req;
121 static uint8_t ticker_update_ack;
122 
123 #if defined(CONFIG_BT_TICKER_EXT)
124 static struct ticker_ext ll_adv_ticker_ext[BT_CTLR_ADV_SET];
125 #endif /* CONFIG_BT_TICKER_EXT */
126 
127 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_CTLR_ADV_EXT)
128 static uint8_t ll_adv_cmds;
129 
ll_adv_cmds_set(uint8_t adv_cmds)130 int ll_adv_cmds_set(uint8_t adv_cmds)
131 {
132 	if (!ll_adv_cmds) {
133 		ll_adv_cmds = adv_cmds;
134 
135 		if (adv_cmds == LL_ADV_CMDS_LEGACY) {
136 			struct ll_adv_set *adv = &ll_adv[0];
137 
138 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
139 			adv->hci_handle = 0;
140 #endif
141 			adv->is_created = 1;
142 		}
143 	}
144 
145 	if (ll_adv_cmds != adv_cmds) {
146 		return -EINVAL;
147 	}
148 
149 	return 0;
150 }
151 
ll_adv_cmds_is_ext(void)152 int ll_adv_cmds_is_ext(void)
153 {
154 	return ll_adv_cmds == LL_ADV_CMDS_EXT;
155 }
156 #endif
157 
158 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_set_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)159 uint8_t ll_adv_set_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
160 {
161 	struct ll_adv_set *adv;
162 	uint8_t idx;
163 
164 	adv =  &ll_adv[0];
165 
166 	for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
167 		if (adv->is_created && (adv->hci_handle == hci_handle)) {
168 			*handle = idx;
169 			return 0;
170 		}
171 	}
172 
173 	return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
174 }
175 
ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle,uint8_t * handle)176 uint8_t ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle, uint8_t *handle)
177 {
178 	struct ll_adv_set *adv, *adv_empty;
179 	uint8_t idx;
180 
181 	adv =  &ll_adv[0];
182 	adv_empty = NULL;
183 
184 	for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
185 		if (adv->is_created) {
186 			if (adv->hci_handle == hci_handle) {
187 				*handle = idx;
188 				return 0;
189 			}
190 		} else if (!adv_empty) {
191 			adv_empty = adv;
192 		}
193 	}
194 
195 	if (adv_empty) {
196 		adv_empty->hci_handle = hci_handle;
197 		*handle = ull_adv_handle_get(adv_empty);
198 		return 0;
199 	}
200 
201 	return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
202 }
203 
ll_adv_set_hci_handle_get(uint8_t handle)204 uint8_t ll_adv_set_hci_handle_get(uint8_t handle)
205 {
206 	struct ll_adv_set *adv;
207 
208 	adv = ull_adv_set_get(handle);
209 	LL_ASSERT(adv && adv->is_created);
210 
211 	return adv->hci_handle;
212 }
213 #endif
214 
215 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_adv_params_set(uint8_t handle,uint16_t evt_prop,uint32_t interval,uint8_t adv_type,uint8_t own_addr_type,uint8_t direct_addr_type,uint8_t const * const direct_addr,uint8_t chan_map,uint8_t filter_policy,uint8_t * const tx_pwr,uint8_t phy_p,uint8_t skip,uint8_t phy_s,uint8_t sid,uint8_t sreq)216 uint8_t ll_adv_params_set(uint8_t handle, uint16_t evt_prop, uint32_t interval,
217 		       uint8_t adv_type, uint8_t own_addr_type,
218 		       uint8_t direct_addr_type, uint8_t const *const direct_addr,
219 		       uint8_t chan_map, uint8_t filter_policy,
220 		       uint8_t *const tx_pwr, uint8_t phy_p, uint8_t skip,
221 		       uint8_t phy_s, uint8_t sid, uint8_t sreq)
222 {
223 	uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
224 				     PDU_ADV_TYPE_DIRECT_IND,
225 				     PDU_ADV_TYPE_SCAN_IND,
226 				     PDU_ADV_TYPE_NONCONN_IND,
227 				     PDU_ADV_TYPE_DIRECT_IND,
228 				     PDU_ADV_TYPE_EXT_IND};
229 	uint8_t is_pdu_type_changed = 0;
230 	uint8_t is_new_set;
231 #else /* !CONFIG_BT_CTLR_ADV_EXT */
232 uint8_t ll_adv_params_set(uint16_t interval, uint8_t adv_type,
233 		       uint8_t own_addr_type, uint8_t direct_addr_type,
234 		       uint8_t const *const direct_addr, uint8_t chan_map,
235 		       uint8_t filter_policy)
236 {
237 	uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
238 				     PDU_ADV_TYPE_DIRECT_IND,
239 				     PDU_ADV_TYPE_SCAN_IND,
240 				     PDU_ADV_TYPE_NONCONN_IND,
241 				     PDU_ADV_TYPE_DIRECT_IND};
242 	uint8_t const handle = 0;
243 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
244 
245 	struct ll_adv_set *adv;
246 	uint8_t pdu_type_prev;
247 	struct pdu_adv *pdu;
248 
249 	adv = is_disabled_get(handle);
250 	if (!adv) {
251 		return BT_HCI_ERR_CMD_DISALLOWED;
252 	}
253 
254 #if defined(CONFIG_BT_CTLR_ADV_EXT)
255 	/* TODO: check and fail (0x12, invalid HCI cmd param) if invalid
256 	 * evt_prop bits.
257 	 */
258 
259 	/* Extended adv param set command used */
260 	if (adv_type == PDU_ADV_TYPE_EXT_IND) {
261 		/* legacy */
262 		if (evt_prop & BT_HCI_LE_ADV_PROP_LEGACY) {
263 			if (evt_prop & BT_HCI_LE_ADV_PROP_ANON) {
264 				return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
265 			}
266 
267 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
268 			/* disallow changing to legacy advertising while
269 			 * periodic advertising enabled.
270 			 */
271 			if (adv->lll.sync) {
272 				const struct ll_adv_sync_set *sync;
273 
274 				sync = HDR_LLL2ULL(adv->lll.sync);
275 				if (sync->is_enabled) {
276 					return BT_HCI_ERR_INVALID_PARAM;
277 				}
278 			}
279 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
280 
281 			adv_type = leg_adv_type_get(evt_prop);
282 
283 			adv->lll.phy_p = PHY_1M;
284 		} else {
285 			/* - Connectable and scannable not allowed;
286 			 * - High duty cycle directed connectable not allowed
287 			 */
288 			if (((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
289 					 BT_HCI_LE_ADV_PROP_SCAN)) ==
290 			     (BT_HCI_LE_ADV_PROP_CONN |
291 			      BT_HCI_LE_ADV_PROP_SCAN)) ||
292 			    (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN)) {
293 				return BT_HCI_ERR_INVALID_PARAM;
294 			}
295 
296 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
297 			if (adv->lll.sync &&
298 			    (evt_prop & (BT_HCI_LE_ADV_PROP_ANON |
299 					 BT_HCI_LE_ADV_PROP_CONN |
300 					 BT_HCI_LE_ADV_PROP_SCAN))) {
301 				const struct ll_adv_sync_set *sync;
302 
303 				sync = HDR_LLL2ULL(adv->lll.sync);
304 				if (sync->is_enabled) {
305 					return BT_HCI_ERR_INVALID_PARAM;
306 				}
307 			}
308 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
309 
310 #if (CONFIG_BT_CTLR_ADV_AUX_SET == 0)
311 			/* Connectable or scannable requires aux */
312 			if (evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
313 					BT_HCI_LE_ADV_PROP_SCAN)) {
314 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
315 			}
316 #endif
317 
318 			adv_type = 0x05; /* index of PDU_ADV_TYPE_EXT_IND in */
319 					 /* pdu_adv_type[] */
320 
321 			/* Fallback to 1M if upper layer did not check HCI
322 			 * parameters for Coded PHY support.
323 			 * This fallback allows *testing* extended advertising
324 			 * using 1M using a upper layer that is requesting Coded
325 			 * PHY on Controllers without Coded PHY support.
326 			 */
327 			if (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
328 			    (phy_p == PHY_CODED)) {
329 				phy_p = PHY_1M;
330 			}
331 
332 			adv->lll.phy_p = phy_p;
333 			adv->lll.phy_flags = PHY_FLAGS_S8;
334 		}
335 	} else {
336 		adv->lll.phy_p = PHY_1M;
337 	}
338 
339 	is_new_set = !adv->is_created;
340 	adv->is_created = 1;
341 	adv->is_ad_data_cmplt = 1U;
342 	adv->max_skip = skip;
343 #endif /* CONFIG_BT_CTLR_ADV_EXT */
344 
345 	/* remember parameters so that set adv/scan data and adv enable
346 	 * interface can correctly update adv/scan data in the
347 	 * double buffer between caller and controller context.
348 	 */
349 	/* Set interval for Undirected or Low Duty Cycle Directed Advertising */
350 	if (adv_type != 0x01) {
351 		adv->interval = interval;
352 	} else {
353 		adv->interval = 0;
354 	}
355 	adv->lll.chan_map = chan_map;
356 	adv->lll.filter_policy = filter_policy;
357 
358 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) && defined(CONFIG_BT_CTLR_ADV_EXT)
359 	adv->lll.scan_req_notify = sreq;
360 #endif
361 
362 	/* update the "current" primary adv PDU */
363 	pdu = lll_adv_data_peek(&adv->lll);
364 	pdu_type_prev = pdu->type;
365 #if defined(CONFIG_BT_CTLR_ADV_EXT)
366 	if (is_new_set) {
367 		is_pdu_type_changed = 1;
368 
369 		pdu->type = pdu_adv_type[adv_type];
370 		if (pdu->type != PDU_ADV_TYPE_EXT_IND) {
371 			pdu->len = 0U;
372 		}
373 	/* check if new PDU type is different that past one */
374 	} else if (pdu->type != pdu_adv_type[adv_type]) {
375 		is_pdu_type_changed = 1;
376 
377 		/* If old PDU was extended advertising PDU, release
378 		 * auxiliary and periodic advertising sets.
379 		 */
380 		if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
381 			struct lll_adv_aux *lll_aux = adv->lll.aux;
382 
383 			if (lll_aux) {
384 				struct ll_adv_aux_set *aux;
385 
386 				/* FIXME: copy AD data from auxiliary channel
387 				 * PDU.
388 				 */
389 				pdu->len = 0;
390 
391 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
392 				if (adv->lll.sync) {
393 					struct ll_adv_sync_set *sync;
394 
395 					sync = HDR_LLL2ULL(adv->lll.sync);
396 					adv->lll.sync = NULL;
397 
398 					ull_adv_sync_release(sync);
399 				}
400 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
401 
402 				/* Release auxiliary channel set */
403 				aux = HDR_LLL2ULL(lll_aux);
404 				adv->lll.aux = NULL;
405 
406 				ull_adv_aux_release(aux);
407 			} else {
408 				/* No previous AD data in auxiliary channel
409 				 * PDU.
410 				 */
411 				pdu->len = 0;
412 			}
413 		}
414 
415 		pdu->type = pdu_adv_type[adv_type];
416 	}
417 
418 #else /* !CONFIG_BT_CTLR_ADV_EXT */
419 	pdu->type = pdu_adv_type[adv_type];
420 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
421 
422 	pdu->rfu = 0;
423 
424 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2) &&
425 	    ((pdu->type == PDU_ADV_TYPE_ADV_IND) ||
426 	     (pdu->type == PDU_ADV_TYPE_DIRECT_IND))) {
427 		pdu->chan_sel = 1;
428 	} else {
429 		pdu->chan_sel = 0;
430 	}
431 
432 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
433 	/* Backup the legacy AD Data if switching to legacy directed advertising
434 	 * or to Extended Advertising.
435 	 */
436 	if (((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
437 	     (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
438 	      (pdu->type == PDU_ADV_TYPE_EXT_IND))) &&
439 	    (pdu_type_prev != PDU_ADV_TYPE_DIRECT_IND) &&
440 	    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
441 	     (pdu_type_prev != PDU_ADV_TYPE_EXT_IND))) {
442 		if (pdu->len == 0U) {
443 			adv->ad_data_backup.len = 0U;
444 		} else {
445 			LL_ASSERT(pdu->len >=
446 				  offsetof(struct pdu_adv_adv_ind, data));
447 
448 			adv->ad_data_backup.len = pdu->len -
449 				offsetof(struct pdu_adv_adv_ind, data);
450 			memcpy(adv->ad_data_backup.data, pdu->adv_ind.data,
451 			       adv->ad_data_backup.len);
452 		}
453 	}
454 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
455 
456 #if defined(CONFIG_BT_CTLR_PRIVACY)
457 	adv->own_addr_type = own_addr_type;
458 	if (adv->own_addr_type == BT_HCI_OWN_ADDR_RPA_OR_PUBLIC ||
459 	    adv->own_addr_type == BT_HCI_OWN_ADDR_RPA_OR_RANDOM) {
460 		adv->peer_addr_type = direct_addr_type;
461 		memcpy(&adv->peer_addr, direct_addr, BDADDR_SIZE);
462 	}
463 #endif /* CONFIG_BT_CTLR_PRIVACY */
464 
465 	if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
466 		pdu->tx_addr = own_addr_type & 0x1;
467 		pdu->rx_addr = direct_addr_type;
468 		memcpy(&pdu->direct_ind.tgt_addr[0], direct_addr, BDADDR_SIZE);
469 		pdu->len = sizeof(struct pdu_adv_direct_ind);
470 
471 #if defined(CONFIG_BT_CTLR_ADV_EXT)
472 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
473 		struct pdu_adv_ext_hdr *pri_hdr, pri_hdr_prev;
474 		struct pdu_adv_com_ext_adv *pri_com_hdr;
475 		uint8_t *pri_dptr_prev, *pri_dptr;
476 		uint8_t len;
477 
478 		pri_com_hdr = (void *)&pdu->adv_ext_ind;
479 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
480 		pri_dptr = pri_hdr->data;
481 		pri_dptr_prev = pri_dptr;
482 
483 		/* No ACAD and no AdvData */
484 		pri_com_hdr->adv_mode = evt_prop & 0x03;
485 
486 		/* Zero-init header flags */
487 		if (is_pdu_type_changed) {
488 			*(uint8_t *)&pri_hdr_prev = 0U;
489 		} else {
490 			pri_hdr_prev = *pri_hdr;
491 		}
492 		*(uint8_t *)pri_hdr = 0U;
493 
494 		/* AdvA flag */
495 		if (pri_hdr_prev.adv_addr) {
496 			pri_dptr_prev += BDADDR_SIZE;
497 		}
498 		if (!pri_com_hdr->adv_mode &&
499 		    !(evt_prop & BT_HCI_LE_ADV_PROP_ANON) &&
500 		    (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
501 			/* TODO: optional on 1M with Aux Ptr */
502 			pri_hdr->adv_addr = 1;
503 
504 			/* NOTE: AdvA is filled at enable */
505 			pdu->tx_addr = own_addr_type & 0x1;
506 			pri_dptr += BDADDR_SIZE;
507 		} else {
508 			pdu->tx_addr = 0;
509 		}
510 
511 		/* TargetA flag */
512 		if (pri_hdr_prev.tgt_addr) {
513 			pri_dptr_prev += BDADDR_SIZE;
514 		}
515 		/* TargetA flag in primary channel PDU only for directed */
516 		if (evt_prop & BT_HCI_LE_ADV_PROP_DIRECT) {
517 			pri_hdr->tgt_addr = 1;
518 			pdu->rx_addr = direct_addr_type;
519 			pri_dptr += BDADDR_SIZE;
520 		} else {
521 			pdu->rx_addr = 0;
522 		}
523 
524 		/* No CTEInfo flag in primary channel PDU */
525 
526 		/* ADI flag */
527 		if (pri_hdr_prev.adi) {
528 			pri_dptr_prev += sizeof(struct pdu_adv_adi);
529 
530 			pri_hdr->adi = 1;
531 			pri_dptr += sizeof(struct pdu_adv_adi);
532 		}
533 
534 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
535 		/* AuxPtr flag */
536 		if (pri_hdr_prev.aux_ptr) {
537 			pri_dptr_prev += sizeof(struct pdu_adv_aux_ptr);
538 		}
539 		/* Need aux for connectable or scannable extended advertising */
540 		if (pri_hdr_prev.aux_ptr ||
541 		    ((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
542 				  BT_HCI_LE_ADV_PROP_SCAN)))) {
543 			pri_hdr->aux_ptr = 1;
544 			pri_dptr += sizeof(struct pdu_adv_aux_ptr);
545 		}
546 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
547 
548 		/* No SyncInfo flag in primary channel PDU */
549 
550 		/* Tx Power flag */
551 		if (pri_hdr_prev.tx_pwr) {
552 			pri_dptr_prev += sizeof(uint8_t);
553 		}
554 		/* C1, Tx Power is optional on the LE 1M PHY, and reserved for
555 		 * for future use on the LE Coded PHY.
556 		 */
557 		if ((evt_prop & BT_HCI_LE_ADV_PROP_TX_POWER) &&
558 		    (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
559 			pri_hdr->tx_pwr = 1;
560 			pri_dptr += sizeof(uint8_t);
561 		}
562 
563 		/* Calc primary PDU len */
564 		len = ull_adv_aux_hdr_len_calc(pri_com_hdr, &pri_dptr);
565 		ull_adv_aux_hdr_len_fill(pri_com_hdr, len);
566 
567 		/* Set PDU length */
568 		pdu->len = len;
569 
570 		/* Start filling primary PDU payload based on flags */
571 
572 		/* No AdvData in primary channel PDU */
573 
574 		/* No ACAD in primary channel PDU */
575 
576 		/* Tx Power */
577 		if (pri_hdr_prev.tx_pwr) {
578 			pri_dptr_prev -= sizeof(uint8_t);
579 		}
580 		if (pri_hdr->tx_pwr) {
581 			uint8_t _tx_pwr;
582 
583 			_tx_pwr = 0;
584 			if (tx_pwr) {
585 				if (*tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) {
586 					_tx_pwr = *tx_pwr;
587 				} else {
588 					*tx_pwr = _tx_pwr;
589 				}
590 			}
591 
592 			pri_dptr -= sizeof(uint8_t);
593 			*pri_dptr = _tx_pwr;
594 		}
595 
596 		/* No SyncInfo in primary channel PDU */
597 
598 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
599 		/* Fallback to 1M if upper layer did not check HCI
600 		 * parameters for Coded PHY support.
601 		 * This fallback allows *testing* extended advertising
602 		 * using 1M using a upper layer that is requesting Coded
603 		 * PHY on Controllers without Coded PHY support.
604 		 */
605 		if (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
606 		    (phy_s == PHY_CODED)) {
607 			phy_s = PHY_1M;
608 		}
609 
610 		adv->lll.phy_s = phy_s;
611 
612 		/* AuxPtr */
613 		if (pri_hdr_prev.aux_ptr) {
614 			pri_dptr_prev -= sizeof(struct pdu_adv_aux_ptr);
615 		}
616 		if (pri_hdr->aux_ptr) {
617 			pri_dptr -= sizeof(struct pdu_adv_aux_ptr);
618 			ull_adv_aux_ptr_fill((void *)pri_dptr, 0U,
619 					     adv->lll.phy_s);
620 		}
621 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
622 
623 		/* ADI */
624 		if (pri_hdr_prev.adi) {
625 			pri_dptr_prev -= sizeof(struct pdu_adv_adi);
626 		}
627 		if (pri_hdr->adi) {
628 			struct pdu_adv_adi *adi;
629 
630 			pri_dptr -= sizeof(struct pdu_adv_adi);
631 
632 			/* NOTE: memmove shall handle overlapping buffers */
633 			memmove(pri_dptr, pri_dptr_prev,
634 				sizeof(struct pdu_adv_adi));
635 
636 			adi = (void *)pri_dptr;
637 			PDU_ADV_ADI_SID_SET(adi, sid);
638 		}
639 		adv->sid = sid;
640 
641 		/* No CTEInfo field in primary channel PDU */
642 
643 		/* TargetA */
644 		if (pri_hdr_prev.tgt_addr) {
645 			pri_dptr_prev -= BDADDR_SIZE;
646 		}
647 		if (pri_hdr->tgt_addr) {
648 			pri_dptr -= BDADDR_SIZE;
649 			/* NOTE: RPA will be updated on enable, if needed */
650 			memcpy(pri_dptr, direct_addr, BDADDR_SIZE);
651 		}
652 
653 		/* NOTE: AdvA, filled at enable and RPA timeout */
654 
655 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
656 		/* Make sure aux is created if we have AuxPtr */
657 		if (pri_hdr->aux_ptr) {
658 			uint8_t pri_idx, sec_idx;
659 			uint8_t err;
660 
661 			err = ull_adv_aux_hdr_set_clear(adv,
662 						ULL_ADV_PDU_HDR_FIELD_ADVA,
663 						0U, &own_addr_type,
664 						&pri_idx, &sec_idx);
665 			if (err) {
666 				/* TODO: cleanup? */
667 				return err;
668 			}
669 
670 			lll_adv_aux_data_enqueue(adv->lll.aux, sec_idx);
671 			lll_adv_data_enqueue(&adv->lll, pri_idx);
672 		}
673 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
674 
675 #endif /* CONFIG_BT_CTLR_ADV_EXT */
676 
677 	} else if (pdu->len == 0) {
678 		pdu->tx_addr = own_addr_type & 0x1;
679 		pdu->rx_addr = 0;
680 		pdu->len = BDADDR_SIZE;
681 	} else {
682 
683 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
684 		if (((pdu_type_prev == PDU_ADV_TYPE_DIRECT_IND) ||
685 		     (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
686 		      (pdu_type_prev == PDU_ADV_TYPE_EXT_IND))) &&
687 		    (pdu->type != PDU_ADV_TYPE_DIRECT_IND) &&
688 		    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
689 		     (pdu->type != PDU_ADV_TYPE_EXT_IND))) {
690 			/* Restore the legacy AD Data */
691 			memcpy(pdu->adv_ind.data, adv->ad_data_backup.data,
692 			       adv->ad_data_backup.len);
693 			pdu->len = offsetof(struct pdu_adv_adv_ind, data) +
694 				   adv->ad_data_backup.len;
695 		}
696 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
697 
698 		pdu->tx_addr = own_addr_type & 0x1;
699 		pdu->rx_addr = 0;
700 	}
701 
702 	/* Initialize LLL header with parent pointer so that ULL contexts
703 	 * can be referenced in functions having the LLL context reference.
704 	 */
705 	lll_hdr_init(&adv->lll, adv);
706 
707 	if (0) {
708 #if defined(CONFIG_BT_CTLR_ADV_EXT)
709 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
710 		/* Make sure new extended advertising set is initialized with no
711 		 * scan response data. Existing sets keep whatever data was set.
712 		 */
713 		if (is_pdu_type_changed) {
714 			uint8_t err;
715 
716 			/* Make sure the scan response PDU is allocated from the right pool */
717 			(void)lll_adv_data_release(&adv->lll.scan_rsp);
718 			lll_adv_data_reset(&adv->lll.scan_rsp);
719 			err = lll_adv_aux_data_init(&adv->lll.scan_rsp);
720 			if (err) {
721 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
722 			}
723 
724 			pdu = lll_adv_scan_rsp_peek(&adv->lll);
725 			pdu->type = PDU_ADV_TYPE_AUX_SCAN_RSP;
726 			pdu->len = 0;
727 		}
728 #endif /* CONFIG_BT_CTLR_ADV_EXT */
729 	} else {
730 		pdu = lll_adv_scan_rsp_peek(&adv->lll);
731 
732 #if defined(CONFIG_BT_CTLR_ADV_EXT)
733 		if (is_pdu_type_changed || !pdu) {
734 			uint8_t err;
735 
736 			/* Make sure the scan response PDU is allocated from the right pool */
737 			(void)lll_adv_data_release(&adv->lll.scan_rsp);
738 			lll_adv_data_reset(&adv->lll.scan_rsp);
739 			err = lll_adv_data_init(&adv->lll.scan_rsp);
740 			if (err) {
741 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
742 			}
743 
744 			pdu = lll_adv_scan_rsp_peek(&adv->lll);
745 		}
746 #endif /* CONFIG_BT_CTLR_ADV_EXT */
747 
748 		/* Make sure legacy advertising set has scan response data
749 		 * initialized.
750 		 */
751 		pdu->type = PDU_ADV_TYPE_SCAN_RSP;
752 		pdu->rfu = 0;
753 		pdu->chan_sel = 0;
754 		pdu->tx_addr = own_addr_type & 0x1;
755 		pdu->rx_addr = 0;
756 		if (pdu->len == 0) {
757 			pdu->len = BDADDR_SIZE;
758 		}
759 	}
760 
761 	return 0;
762 }
763 
764 #if defined(CONFIG_BT_CTLR_ADV_EXT)
765 uint8_t ll_adv_data_set(uint8_t handle, uint8_t len, uint8_t const *const data)
766 {
767 #else /* !CONFIG_BT_CTLR_ADV_EXT */
768 uint8_t ll_adv_data_set(uint8_t len, uint8_t const *const data)
769 {
770 	const uint8_t handle = 0;
771 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
772 	struct ll_adv_set *adv;
773 
774 	adv = ull_adv_set_get(handle);
775 	if (!adv) {
776 		return BT_HCI_ERR_CMD_DISALLOWED;
777 	}
778 
779 	return ull_adv_data_set(adv, len, data);
780 }
781 
782 #if defined(CONFIG_BT_CTLR_ADV_EXT)
783 uint8_t ll_adv_scan_rsp_set(uint8_t handle, uint8_t len,
784 			    uint8_t const *const data)
785 {
786 #else /* !CONFIG_BT_CTLR_ADV_EXT */
787 uint8_t ll_adv_scan_rsp_set(uint8_t len, uint8_t const *const data)
788 {
789 	const uint8_t handle = 0;
790 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
791 	struct ll_adv_set *adv;
792 
793 	adv = ull_adv_set_get(handle);
794 	if (!adv) {
795 		return BT_HCI_ERR_CMD_DISALLOWED;
796 	}
797 
798 	return ull_scan_rsp_set(adv, len, data);
799 }
800 
801 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
802 #if defined(CONFIG_BT_HCI_MESH_EXT)
803 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
804 		   uint8_t at_anchor, uint32_t ticks_anchor, uint8_t retry,
805 		   uint8_t scan_window, uint8_t scan_delay)
806 {
807 #else /* !CONFIG_BT_HCI_MESH_EXT */
808 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
809 		   uint16_t duration, uint8_t max_ext_adv_evts)
810 {
811 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
812 	struct ll_adv_sync_set *sync = NULL;
813 	uint8_t sync_is_started = 0U;
814 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
815 	struct ll_adv_aux_set *aux = NULL;
816 	uint8_t aux_is_started = 0U;
817 	uint32_t ticks_anchor;
818 #endif /* !CONFIG_BT_HCI_MESH_EXT */
819 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
820 uint8_t ll_adv_enable(uint8_t enable)
821 {
822 	uint8_t const handle = 0;
823 	uint32_t ticks_anchor;
824 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
825 	uint32_t ticks_slot_overhead;
826 	uint32_t ticks_slot_offset;
827 	uint32_t volatile ret_cb;
828 	struct pdu_adv *pdu_scan;
829 	struct pdu_adv *pdu_adv;
830 	struct ll_adv_set *adv;
831 	struct lll_adv *lll;
832 	uint8_t hci_err;
833 	uint32_t ret;
834 
835 	if (!enable) {
836 		return disable(handle);
837 	}
838 
839 	adv = is_disabled_get(handle);
840 	if (!adv) {
841 		/* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
842 		 * Enabling advertising when it is already enabled can cause the
843 		 * random address to change. As the current implementation does
844 		 * does not update RPAs on every advertising enable, only on
845 		 * `rpa_timeout_ms` timeout, we are not going to implement the
846 		 * "can cause the random address to change" for legacy
847 		 * advertisements.
848 		 */
849 
850 		/* If HCI LE Set Extended Advertising Enable command is sent
851 		 * again for an advertising set while that set is enabled, the
852 		 * timer used for duration and the number of events counter are
853 		 * reset and any change to the random address shall take effect.
854 		 */
855 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT) ||
856 		    IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
857 #if defined(CONFIG_BT_CTLR_ADV_EXT)
858 			if (ll_adv_cmds_is_ext()) {
859 				enum node_rx_type volatile *type;
860 
861 				adv = ull_adv_is_enabled_get(handle);
862 				if (!adv) {
863 					/* This should not be happening as
864 					 * is_disabled_get failed.
865 					 */
866 					return BT_HCI_ERR_CMD_DISALLOWED;
867 				}
868 
869 				/* Change random address in the primary or
870 				 * auxiliary PDU as necessary.
871 				 */
872 				lll = &adv->lll;
873 				pdu_adv = lll_adv_data_peek(lll);
874 				pdu_scan = lll_adv_scan_rsp_peek(lll);
875 				hci_err = adv_scan_pdu_addr_update(adv,
876 								   pdu_adv,
877 								   pdu_scan);
878 				if (hci_err) {
879 					return hci_err;
880 				}
881 
882 				if (!adv->lll.node_rx_adv_term) {
883 					/* This should not be happening,
884 					 * adv->is_enabled would be 0 if
885 					 * node_rx_adv_term is released back to
886 					 * pool.
887 					 */
888 					return BT_HCI_ERR_CMD_DISALLOWED;
889 				}
890 
891 				/* Check advertising not terminated */
892 				type = &adv->lll.node_rx_adv_term->hdr.type;
893 				if (*type == NODE_RX_TYPE_NONE) {
894 					/* Reset event counter, update duration,
895 					 * and max events
896 					 */
897 					adv_max_events_duration_set(adv,
898 						duration, max_ext_adv_evts);
899 				}
900 
901 				/* Check the counter reset did not race with
902 				 * advertising terminated.
903 				 */
904 				if (*type != NODE_RX_TYPE_NONE) {
905 					/* Race with advertising terminated */
906 					return BT_HCI_ERR_CMD_DISALLOWED;
907 				}
908 			}
909 #endif /* CONFIG_BT_CTLR_ADV_EXT */
910 
911 			return 0;
912 		}
913 
914 		/* Fail on being strict as a legacy controller, valid only under
915 		 * Bluetooth Specification v4.x.
916 		 * Bluetooth Specification v5.0 and above shall not fail to
917 		 * enable already enabled advertising.
918 		 */
919 		return BT_HCI_ERR_CMD_DISALLOWED;
920 	}
921 
922 	lll = &adv->lll;
923 
924 #if defined(CONFIG_BT_CTLR_PRIVACY)
925 	lll->rl_idx = FILTER_IDX_NONE;
926 
927 	/* Prepare filter accept list and optionally resolving list */
928 	ull_filter_adv_update(lll->filter_policy);
929 
930 	if (adv->own_addr_type == BT_HCI_OWN_ADDR_RPA_OR_PUBLIC ||
931 	    adv->own_addr_type == BT_HCI_OWN_ADDR_RPA_OR_RANDOM) {
932 		/* Look up the resolving list */
933 		lll->rl_idx = ull_filter_rl_find(adv->peer_addr_type,
934 						 adv->peer_addr, NULL);
935 
936 		if (lll->rl_idx != FILTER_IDX_NONE) {
937 			/* Generate RPAs if required */
938 			ull_filter_rpa_update(false);
939 		}
940 	}
941 #endif /* !CONFIG_BT_CTLR_PRIVACY */
942 
943 	pdu_adv = lll_adv_data_peek(lll);
944 	pdu_scan = lll_adv_scan_rsp_peek(lll);
945 
946 #if defined(CONFIG_BT_CTLR_ADV_EXT)
947 	if (!pdu_scan) {
948 		uint8_t err;
949 
950 		if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
951 			/* Should never happen */
952 			return BT_HCI_ERR_CMD_DISALLOWED;
953 		}
954 
955 		err = lll_adv_data_init(&adv->lll.scan_rsp);
956 		if (err) {
957 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
958 		}
959 
960 		pdu_scan = lll_adv_scan_rsp_peek(lll);
961 		init_pdu(pdu_scan, PDU_ADV_TYPE_SCAN_RSP);
962 	}
963 #endif /* CONFIG_BT_CTLR_ADV_EXT */
964 
965 	/* Update Bluetooth Device address in advertising and scan response
966 	 * PDUs.
967 	 */
968 	hci_err = adv_scan_pdu_addr_update(adv, pdu_adv, pdu_scan);
969 	if (hci_err) {
970 		return hci_err;
971 	}
972 
973 #if defined(CONFIG_BT_HCI_MESH_EXT)
974 	if (scan_delay) {
975 		if (ull_scan_is_enabled(0)) {
976 			return BT_HCI_ERR_CMD_DISALLOWED;
977 		}
978 
979 		lll->is_mesh = 1;
980 	}
981 #endif /* CONFIG_BT_HCI_MESH_EXT */
982 
983 #if defined(CONFIG_BT_PERIPHERAL)
984 	/* prepare connectable advertising */
985 	if ((pdu_adv->type == PDU_ADV_TYPE_ADV_IND) ||
986 	    (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND) ||
987 #if defined(CONFIG_BT_CTLR_ADV_EXT)
988 	    ((pdu_adv->type == PDU_ADV_TYPE_EXT_IND) &&
989 	     (pdu_adv->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_CONN))
990 #else
991 	    0
992 #endif
993 	     ) {
994 		struct node_rx_pdu *node_rx;
995 		struct ll_conn *conn;
996 		struct lll_conn *conn_lll;
997 		void *link;
998 		int err;
999 
1000 		if (lll->conn) {
1001 			return BT_HCI_ERR_CMD_DISALLOWED;
1002 		}
1003 
1004 		link = ll_rx_link_alloc();
1005 		if (!link) {
1006 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1007 		}
1008 
1009 		node_rx = ll_rx_alloc();
1010 		if (!node_rx) {
1011 			ll_rx_link_release(link);
1012 
1013 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1014 		}
1015 
1016 		conn = ll_conn_acquire();
1017 		if (!conn) {
1018 			ll_rx_release(node_rx);
1019 			ll_rx_link_release(link);
1020 
1021 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1022 		}
1023 
1024 		conn_lll = &conn->lll;
1025 		conn_lll->handle = 0xFFFF;
1026 
1027 		if (!conn_lll->link_tx_free) {
1028 			conn_lll->link_tx_free = &conn_lll->link_tx;
1029 		}
1030 
1031 		memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head,
1032 			  &conn_lll->memq_tx.tail);
1033 		conn_lll->link_tx_free = NULL;
1034 
1035 		conn_lll->packet_tx_head_len = 0;
1036 		conn_lll->packet_tx_head_offset = 0;
1037 
1038 		conn_lll->sn = 0;
1039 		conn_lll->nesn = 0;
1040 		conn_lll->empty = 0;
1041 
1042 #if defined(CONFIG_BT_CTLR_PHY)
1043 		if (0) {
1044 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1045 		} else if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
1046 			conn_lll->phy_tx = lll->phy_s;
1047 			conn_lll->phy_tx_time = lll->phy_s;
1048 			conn_lll->phy_flags = lll->phy_flags;
1049 			conn_lll->phy_rx = lll->phy_s;
1050 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1051 		} else {
1052 			conn_lll->phy_tx = PHY_1M;
1053 			conn_lll->phy_tx_time = PHY_1M;
1054 			conn_lll->phy_flags = PHY_FLAGS_S8;
1055 			conn_lll->phy_rx = PHY_1M;
1056 		}
1057 #endif /* CONFIG_BT_CTLR_PHY */
1058 
1059 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1060 		conn_lll->rssi_latest = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1061 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1062 		conn_lll->rssi_reported = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1063 		conn_lll->rssi_sample_count = 0;
1064 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1065 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1066 
1067 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
1068 		conn_lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
1069 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
1070 
1071 		/* FIXME: BEGIN: Move to ULL? */
1072 		conn_lll->role = 1;
1073 		conn_lll->periph.initiated = 0;
1074 		conn_lll->periph.cancelled = 0;
1075 		conn_lll->periph.forced = 0;
1076 		conn_lll->data_chan_sel = 0;
1077 		conn_lll->data_chan_use = 0;
1078 		conn_lll->event_counter = 0;
1079 
1080 		conn_lll->latency_prepare = 0;
1081 		conn_lll->latency_event = 0;
1082 		conn_lll->periph.latency_enabled = 0;
1083 		conn_lll->periph.window_widening_prepare_us = 0;
1084 		conn_lll->periph.window_widening_event_us = 0;
1085 		conn_lll->periph.window_size_prepare_us = 0;
1086 		/* FIXME: END: Move to ULL? */
1087 #if defined(CONFIG_BT_CTLR_CONN_META)
1088 		memset(&conn_lll->conn_meta, 0, sizeof(conn_lll->conn_meta));
1089 #endif /* CONFIG_BT_CTLR_CONN_META */
1090 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1091 		conn_lll->df_rx_cfg.is_initialized = 0U;
1092 		conn_lll->df_rx_cfg.hdr.elem_size = sizeof(struct lll_df_conn_rx_params);
1093 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1094 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1095 		conn_lll->df_tx_cfg.is_initialized = 0U;
1096 		conn_lll->df_tx_cfg.cte_rsp_en = 0U;
1097 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1098 		conn->connect_expire = 6;
1099 		conn->supervision_expire = 0;
1100 
1101 #if defined(CONFIG_BT_CTLR_LE_PING)
1102 		conn->apto_expire = 0U;
1103 		conn->appto_expire = 0U;
1104 #endif
1105 
1106 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
1107 		conn->own_id_addr_type = BT_ADDR_LE_NONE->type;
1108 		(void)memcpy(conn->own_id_addr, BT_ADDR_LE_NONE->a.val,
1109 			     sizeof(conn->own_id_addr));
1110 		conn->peer_id_addr_type = BT_ADDR_LE_NONE->type;
1111 		(void)memcpy(conn->peer_id_addr, BT_ADDR_LE_NONE->a.val,
1112 			     sizeof(conn->peer_id_addr));
1113 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1114 
1115 		/* Re-initialize the control procedure data structures */
1116 		ull_llcp_init(conn);
1117 
1118 		conn->llcp_terminate.reason_final = 0;
1119 		/* NOTE: use allocated link for generating dedicated
1120 		 * terminate ind rx node
1121 		 */
1122 		conn->llcp_terminate.node_rx.rx.hdr.link = link;
1123 
1124 #if defined(CONFIG_BT_CTLR_PHY)
1125 		conn->phy_pref_tx = ull_conn_default_phy_tx_get();
1126 		conn->phy_pref_rx = ull_conn_default_phy_rx_get();
1127 #endif /* CONFIG_BT_CTLR_PHY */
1128 
1129 #if defined(CONFIG_BT_CTLR_LE_ENC)
1130 		conn->pause_rx_data = 0U;
1131 #endif /* CONFIG_BT_CTLR_LE_ENC */
1132 
1133 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1134 		uint8_t phy_in_use = PHY_1M;
1135 
1136 
1137 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1138 		if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
1139 			phy_in_use = lll->phy_s;
1140 		}
1141 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1142 
1143 		ull_dle_init(conn, phy_in_use);
1144 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1145 
1146 		/* Re-initialize the Tx Q */
1147 		ull_tx_q_init(&conn->tx_q);
1148 
1149 		/* NOTE: using same link as supplied for terminate ind */
1150 		adv->link_cc_free = link;
1151 		adv->node_rx_cc_free = node_rx;
1152 		lll->conn = conn_lll;
1153 
1154 		ull_hdr_init(&conn->ull);
1155 		lll_hdr_init(&conn->lll, conn);
1156 
1157 		/* wait for stable clocks */
1158 		err = lll_clock_wait();
1159 		if (err) {
1160 			conn_release(adv);
1161 
1162 			return BT_HCI_ERR_HW_FAILURE;
1163 		}
1164 	}
1165 #endif /* CONFIG_BT_PERIPHERAL */
1166 
1167 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1168 	if (ll_adv_cmds_is_ext()) {
1169 		struct node_rx_pdu *node_rx_adv_term;
1170 		void *link_adv_term;
1171 
1172 		/* The alloc here used for ext adv termination event */
1173 		link_adv_term = ll_rx_link_alloc();
1174 		if (!link_adv_term) {
1175 #if defined(CONFIG_BT_PERIPHERAL)
1176 			if (adv->lll.conn) {
1177 				conn_release(adv);
1178 			}
1179 #endif /* CONFIG_BT_PERIPHERAL */
1180 
1181 			/* TODO: figure out right return value */
1182 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1183 		}
1184 
1185 		node_rx_adv_term = ll_rx_alloc();
1186 		if (!node_rx_adv_term) {
1187 #if defined(CONFIG_BT_PERIPHERAL)
1188 			if (adv->lll.conn) {
1189 				conn_release(adv);
1190 			}
1191 #endif /* CONFIG_BT_PERIPHERAL */
1192 
1193 			ll_rx_link_release(link_adv_term);
1194 
1195 			/* TODO: figure out right return value */
1196 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1197 		}
1198 
1199 		node_rx_adv_term->hdr.type = NODE_RX_TYPE_NONE;
1200 
1201 		node_rx_adv_term->hdr.link = (void *)link_adv_term;
1202 		adv->lll.node_rx_adv_term = (void *)node_rx_adv_term;
1203 
1204 		if (0) {
1205 #if defined(CONFIG_BT_PERIPHERAL)
1206 		} else if (lll->is_hdcd) {
1207 			adv_max_events_duration_set(adv, 0U, 0U);
1208 #endif /* CONFIG_BT_PERIPHERAL */
1209 		} else {
1210 			adv_max_events_duration_set(adv, duration,
1211 						    max_ext_adv_evts);
1212 		}
1213 	} else {
1214 		adv->lll.node_rx_adv_term = NULL;
1215 		adv_max_events_duration_set(adv, 0U, 0U);
1216 	}
1217 
1218 	const uint8_t phy = lll->phy_p;
1219 	const uint8_t phy_flags = lll->phy_flags;
1220 
1221 	adv->event_counter = 0U;
1222 #else
1223 	/* Legacy ADV only supports LE_1M PHY */
1224 	const uint8_t phy = PHY_1M;
1225 	const uint8_t phy_flags = 0U;
1226 #endif
1227 
1228 	/* For now we adv on all channels enabled in channel map */
1229 	uint8_t ch_map = lll->chan_map;
1230 	const uint8_t adv_chn_cnt = util_ones_count_get(&ch_map, sizeof(ch_map));
1231 
1232 	if (adv_chn_cnt == 0) {
1233 		/* ADV needs at least one channel */
1234 		goto failure_cleanup;
1235 	}
1236 
1237 	/* Calculate the advertising time reservation */
1238 	uint16_t time_us = adv_time_get(pdu_adv, pdu_scan, adv_chn_cnt, phy,
1239 					phy_flags);
1240 
1241 	uint16_t interval = adv->interval;
1242 #if defined(CONFIG_BT_HCI_MESH_EXT)
1243 	if (lll->is_mesh) {
1244 		uint16_t interval_min_us;
1245 
1246 		_radio.advertiser.retry = retry;
1247 		_radio.advertiser.scan_delay_ms = scan_delay;
1248 		_radio.advertiser.scan_window_ms = scan_window;
1249 
1250 		interval_min_us = time_us +
1251 				  (scan_delay + scan_window) * USEC_PER_MSEC;
1252 		if ((interval * SCAN_INT_UNIT_US) < interval_min_us) {
1253 			interval = DIV_ROUND_UP(interval_min_us,
1254 						    SCAN_INT_UNIT_US);
1255 		}
1256 
1257 		/* passive scanning */
1258 		_radio.scanner.type = 0;
1259 
1260 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1261 		/* TODO: Coded PHY support */
1262 		_radio.scanner.phy = 0;
1263 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1264 
1265 #if defined(CONFIG_BT_CTLR_PRIVACY)
1266 		/* TODO: Privacy support */
1267 		_radio.scanner.rpa_gen = 0;
1268 		_radio.scanner.rl_idx = rl_idx;
1269 #endif /* CONFIG_BT_CTLR_PRIVACY */
1270 
1271 		_radio.scanner.filter_policy = filter_policy;
1272 	}
1273 #endif /* CONFIG_BT_HCI_MESH_EXT */
1274 
1275 	/* Initialize ULL context before radio event scheduling is started. */
1276 	ull_hdr_init(&adv->ull);
1277 
1278 	/* TODO: active_to_start feature port */
1279 	adv->ull.ticks_active_to_start = 0;
1280 	adv->ull.ticks_prepare_to_start =
1281 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1282 	adv->ull.ticks_preempt_to_start =
1283 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1284 	adv->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
1285 
1286 	ticks_slot_offset = MAX(adv->ull.ticks_active_to_start,
1287 				adv->ull.ticks_prepare_to_start);
1288 
1289 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1290 		ticks_slot_overhead = ticks_slot_offset;
1291 	} else {
1292 		ticks_slot_overhead = 0;
1293 	}
1294 
1295 #if !defined(CONFIG_BT_HCI_MESH_EXT)
1296 	ticks_anchor = ticker_ticks_now_get();
1297 	ticks_anchor += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1298 
1299 #else /* CONFIG_BT_HCI_MESH_EXT */
1300 	if (!at_anchor) {
1301 		ticks_anchor = ticker_ticks_now_get();
1302 	}
1303 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1304 
1305 	/* High Duty Cycle Directed Advertising if interval is 0. */
1306 #if defined(CONFIG_BT_PERIPHERAL)
1307 	lll->is_hdcd = !interval && (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND);
1308 	if (lll->is_hdcd) {
1309 		ret_cb = TICKER_STATUS_BUSY;
1310 
1311 #if defined(CONFIG_BT_TICKER_EXT)
1312 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1313 		ll_adv_ticker_ext[handle].ticks_slot_window = 0;
1314 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
1315 
1316 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1317 		ll_adv_ticker_ext[handle].expire_info_id = TICKER_NULL;
1318 		ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1319 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1320 
1321 		ret = ticker_start_ext(
1322 #else /* !CONFIG_BT_TICKER_EXT */
1323 		ret = ticker_start(
1324 #endif /* !CONFIG_BT_TICKER_EXT */
1325 				   TICKER_INSTANCE_ID_CTLR,
1326 				   TICKER_USER_ID_THREAD,
1327 				   (TICKER_ID_ADV_BASE + handle),
1328 				   ticks_anchor, 0,
1329 				   (adv->ull.ticks_slot + ticks_slot_overhead),
1330 				   TICKER_NULL_REMAINDER, TICKER_NULL_LAZY,
1331 				   (adv->ull.ticks_slot + ticks_slot_overhead),
1332 				   ticker_cb, adv,
1333 				   ull_ticker_status_give, (void *)&ret_cb
1334 #if defined(CONFIG_BT_TICKER_EXT)
1335 				   ,
1336 				   &ll_adv_ticker_ext[handle]
1337 #endif /* CONFIG_BT_TICKER_EXT */
1338 				   );
1339 		ret = ull_ticker_status_take(ret, &ret_cb);
1340 		if (ret != TICKER_STATUS_SUCCESS) {
1341 			goto failure_cleanup;
1342 		}
1343 
1344 		ret_cb = TICKER_STATUS_BUSY;
1345 		ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1346 				   TICKER_USER_ID_THREAD,
1347 				   TICKER_ID_ADV_STOP, ticks_anchor,
1348 				   HAL_TICKER_US_TO_TICKS(ticks_slot_offset +
1349 							  (1280 * 1000)),
1350 				   TICKER_NULL_PERIOD, TICKER_NULL_REMAINDER,
1351 				   TICKER_NULL_LAZY, TICKER_NULL_SLOT,
1352 				   ticker_stop_cb, adv,
1353 				   ull_ticker_status_give, (void *)&ret_cb);
1354 	} else
1355 #endif /* CONFIG_BT_PERIPHERAL */
1356 	{
1357 		const uint32_t ticks_slot = adv->ull.ticks_slot +
1358 					 ticks_slot_overhead;
1359 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1360 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1361 		uint8_t pri_idx, sec_idx;
1362 
1363 		/* Add sync_info into auxiliary PDU */
1364 		if (lll->sync) {
1365 			sync = HDR_LLL2ULL(lll->sync);
1366 			if (sync->is_enabled && !sync->is_started) {
1367 				struct pdu_adv_sync_info *sync_info;
1368 				uint8_t value[1 + sizeof(sync_info)];
1369 				uint8_t err;
1370 
1371 				err = ull_adv_aux_hdr_set_clear(adv,
1372 						ULL_ADV_PDU_HDR_FIELD_SYNC_INFO,
1373 						0U, value, &pri_idx, &sec_idx);
1374 				if (err) {
1375 					return err;
1376 				}
1377 
1378 				/* First byte in the length-value encoded
1379 				 * parameter is size of sync_info structure,
1380 				 * followed by pointer to sync_info in the
1381 				 * PDU.
1382 				 */
1383 				memcpy(&sync_info, &value[1], sizeof(sync_info));
1384 				ull_adv_sync_info_fill(sync, sync_info);
1385 			} else {
1386 				/* Do not start periodic advertising */
1387 				sync = NULL;
1388 			}
1389 		}
1390 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1391 
1392 		if (lll->aux) {
1393 			struct lll_adv_aux *lll_aux = lll->aux;
1394 			uint32_t ticks_slot_overhead_aux;
1395 			uint32_t ticks_anchor_aux;
1396 			uint64_t interval_us;
1397 
1398 			aux = HDR_LLL2ULL(lll_aux);
1399 
1400 			/* Schedule auxiliary PDU after primary channel
1401 			 * PDUs.
1402 			 * Reduce the MAFS offset by the Event Overhead
1403 			 * so that actual radio air packet start as
1404 			 * close as possible after the MAFS gap.
1405 			 * Add 2 ticks offset as compensation towards
1406 			 * the +/- 1 tick ticker scheduling jitter due
1407 			 * to accumulation of remainder to maintain
1408 			 * average ticker interval.
1409 			 */
1410 			ticks_anchor_aux =
1411 				ticks_anchor + ticks_slot +
1412 				HAL_TICKER_US_TO_TICKS(
1413 					MAX(EVENT_MAFS_US,
1414 					    EVENT_OVERHEAD_START_US) -
1415 					EVENT_OVERHEAD_START_US +
1416 					(EVENT_TICKER_RES_MARGIN_US << 1));
1417 
1418 			ticks_slot_overhead_aux =
1419 				ull_adv_aux_evt_init(aux, &ticks_anchor_aux);
1420 
1421 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1422 			/* Start periodic advertising if enabled and not already
1423 			 * started.
1424 			 */
1425 			if (sync) {
1426 				uint32_t ticks_slot_overhead2;
1427 				uint32_t ticks_slot_aux;
1428 
1429 #if defined(CONFIG_BT_CTLR_ADV_RESERVE_MAX)
1430 				uint32_t us_slot;
1431 
1432 				us_slot = ull_adv_aux_time_get(aux,
1433 						PDU_AC_PAYLOAD_SIZE_MAX,
1434 						PDU_AC_PAYLOAD_SIZE_MAX);
1435 				ticks_slot_aux =
1436 					HAL_TICKER_US_TO_TICKS(us_slot) +
1437 					ticks_slot_overhead_aux;
1438 #else
1439 				ticks_slot_aux = aux->ull.ticks_slot +
1440 						 ticks_slot_overhead_aux;
1441 #endif
1442 
1443 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET) || \
1444 	(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET == 0)
1445 				/* Schedule periodic advertising PDU after
1446 				 * auxiliary PDUs.
1447 				 * Reduce the MAFS offset by the Event Overhead
1448 				 * so that actual radio air packet start as
1449 				 * close as possible after the MAFS gap.
1450 				 * Add 2 ticks offset as compensation towards
1451 				 * the +/- 1 tick ticker scheduling jitter due
1452 				 * to accumulation of remainder to maintain
1453 				 * average ticker interval.
1454 				 */
1455 				uint32_t ticks_anchor_sync = ticks_anchor_aux +
1456 					ticks_slot_aux +
1457 					HAL_TICKER_US_TO_TICKS(
1458 						MAX(EVENT_MAFS_US,
1459 						    EVENT_OVERHEAD_START_US) -
1460 						EVENT_OVERHEAD_START_US +
1461 						(EVENT_TICKER_RES_MARGIN_US << 1));
1462 
1463 #else /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
1464 				uint32_t ticks_anchor_sync = ticks_anchor_aux +
1465 					HAL_TICKER_US_TO_TICKS(
1466 						CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET);
1467 
1468 #endif /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
1469 
1470 				ticks_slot_overhead2 = ull_adv_sync_evt_init(adv, sync, NULL);
1471 				ret = ull_adv_sync_start(adv, sync,
1472 							 ticks_anchor_sync,
1473 							 ticks_slot_overhead2);
1474 				if (ret) {
1475 					goto failure_cleanup;
1476 				}
1477 
1478 				sync_is_started = 1U;
1479 
1480 				lll_adv_aux_data_enqueue(adv->lll.aux, sec_idx);
1481 				lll_adv_data_enqueue(lll, pri_idx);
1482 			} else {
1483 				/* TODO: Find the anchor before the group of
1484 				 *       active Periodic Advertising events, so
1485 				 *       that auxiliary sets are grouped such
1486 				 *       that auxiliary sets and Periodic
1487 				 *       Advertising sets are non-overlapping
1488 				 *       for the same event interval.
1489 				 */
1490 			}
1491 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1492 
1493 			/* Keep aux interval equal or lower than primary PDU
1494 			 * interval * (max_skip + 1).
1495 			 * Use periodic interval units to represent the
1496 			 * periodic behavior of scheduling of AUX_ADV_IND PDUs
1497 			 * so that it is grouped with similar interval units
1498 			 * used for ACL Connections, Periodic Advertising and
1499 			 * BIG radio events.
1500 			 */
1501 			interval_us = (uint64_t)adv->interval * ADV_INT_UNIT_US;
1502 
1503 			if (adv->max_skip == 0U) {
1504 				/* Special case to keep behaviour unchanged from
1505 				 * before max_skip was implemented; In this case
1506 				 * add ULL_ADV_RANDOM_DELAY and round up for a
1507 				 * aux interval equal or higher instead
1508 				 */
1509 				aux->interval = DIV_ROUND_UP(interval_us +
1510 						     HAL_TICKER_TICKS_TO_US(ULL_ADV_RANDOM_DELAY),
1511 						     PERIODIC_INT_UNIT_US);
1512 			} else {
1513 				aux->interval = (interval_us * (adv->max_skip + 1))
1514 						 / PERIODIC_INT_UNIT_US;
1515 			}
1516 
1517 			ret = ull_adv_aux_start(aux, ticks_anchor_aux,
1518 						ticks_slot_overhead_aux);
1519 			if (ret) {
1520 				goto failure_cleanup;
1521 			}
1522 
1523 			aux_is_started = 1U;
1524 		}
1525 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1526 
1527 		ret_cb = TICKER_STATUS_BUSY;
1528 
1529 #if defined(CONFIG_BT_TICKER_EXT)
1530 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1531 		ll_adv_ticker_ext[handle].ticks_slot_window =
1532 			ULL_ADV_RANDOM_DELAY + ticks_slot;
1533 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1534 
1535 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1536 		if (lll->aux) {
1537 			uint8_t aux_handle = ull_adv_aux_handle_get(aux);
1538 
1539 			ll_adv_ticker_ext[handle].expire_info_id = TICKER_ID_ADV_AUX_BASE +
1540 								  aux_handle;
1541 			ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1542 		} else {
1543 			ll_adv_ticker_ext[handle].expire_info_id = TICKER_NULL;
1544 			ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1545 		}
1546 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1547 
1548 		ret = ticker_start_ext(
1549 #else /* !CONFIG_BT_TICKER_EXT */
1550 		ret = ticker_start(
1551 #endif /* !CONFIG_BT_TICKER_EXT */
1552 				   TICKER_INSTANCE_ID_CTLR,
1553 				   TICKER_USER_ID_THREAD,
1554 				   (TICKER_ID_ADV_BASE + handle),
1555 				   ticks_anchor, 0,
1556 				   HAL_TICKER_US_TO_TICKS((uint64_t)interval *
1557 							  ADV_INT_UNIT_US),
1558 				   TICKER_NULL_REMAINDER,
1559 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1560 	!defined(CONFIG_BT_CTLR_LOW_LAT)
1561 				   /* Force expiry to ensure timing update */
1562 				   TICKER_LAZY_MUST_EXPIRE,
1563 #else
1564 				   TICKER_NULL_LAZY,
1565 #endif /* !CONFIG_BT_TICKER_LOW_LAT && !CONFIG_BT_CTLR_LOW_LAT */
1566 				   ticks_slot,
1567 				   ticker_cb, adv,
1568 				   ull_ticker_status_give, (void *)&ret_cb
1569 #if defined(CONFIG_BT_TICKER_EXT)
1570 				   ,
1571 				   &ll_adv_ticker_ext[handle]
1572 #endif /* CONFIG_BT_TICKER_EXT */
1573 				   );
1574 	}
1575 
1576 	ret = ull_ticker_status_take(ret, &ret_cb);
1577 	if (ret != TICKER_STATUS_SUCCESS) {
1578 		goto failure_cleanup;
1579 	}
1580 
1581 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1582 	if (aux_is_started) {
1583 		aux->is_started = aux_is_started;
1584 
1585 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1586 		if (sync_is_started) {
1587 			sync->is_started = sync_is_started;
1588 		}
1589 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1590 	}
1591 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1592 
1593 	adv->is_enabled = 1;
1594 
1595 #if defined(CONFIG_BT_CTLR_PRIVACY)
1596 #if defined(CONFIG_BT_HCI_MESH_EXT)
1597 	if (_radio.advertiser.is_mesh) {
1598 		_radio.scanner.is_enabled = 1;
1599 
1600 		ull_filter_adv_scan_state_cb(BIT(0) | BIT(1));
1601 	}
1602 #else /* !CONFIG_BT_HCI_MESH_EXT */
1603 	if (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled_get(0)) {
1604 		ull_filter_adv_scan_state_cb(BIT(0));
1605 	}
1606 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1607 #endif /* CONFIG_BT_CTLR_PRIVACY */
1608 
1609 	return 0;
1610 
1611 failure_cleanup:
1612 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1613 	if (aux_is_started) {
1614 		/* TODO: Stop extended advertising and release resources */
1615 	}
1616 
1617 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1618 	if (sync_is_started) {
1619 		/* TODO: Stop periodic advertising and release resources */
1620 	}
1621 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1622 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1623 
1624 #if defined(CONFIG_BT_PERIPHERAL)
1625 	if (adv->lll.conn) {
1626 		conn_release(adv);
1627 	}
1628 #endif /* CONFIG_BT_PERIPHERAL */
1629 
1630 	return BT_HCI_ERR_CMD_DISALLOWED;
1631 }
1632 
1633 int ull_adv_init(void)
1634 {
1635 	int err;
1636 
1637 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1638 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1639 	if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1640 		err = ull_adv_aux_init();
1641 		if (err) {
1642 			return err;
1643 		}
1644 	}
1645 
1646 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1647 	err = ull_adv_sync_init();
1648 	if (err) {
1649 		return err;
1650 	}
1651 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1652 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1653 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1654 
1655 	err = init_reset();
1656 	if (err) {
1657 		return err;
1658 	}
1659 
1660 	return 0;
1661 }
1662 
1663 uint8_t ll_adv_disable_all(void)
1664 {
1665 	uint8_t handle;
1666 
1667 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1668 		(void)disable(handle);
1669 	}
1670 
1671 	return 0U;
1672 }
1673 
1674 int ull_adv_reset(void)
1675 {
1676 	(void)ll_adv_disable_all();
1677 
1678 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1679 #if defined(CONFIG_BT_HCI_RAW)
1680 	ll_adv_cmds = LL_ADV_CMDS_ANY;
1681 #endif
1682 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1683 	{
1684 		int err;
1685 
1686 		err = ull_adv_sync_reset();
1687 		if (err) {
1688 			return err;
1689 		}
1690 	}
1691 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1692 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1693 
1694 	return 0;
1695 }
1696 
1697 int ull_adv_reset_finalize(void)
1698 {
1699 	uint8_t handle;
1700 	int err;
1701 
1702 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1703 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1704 	if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1705 		err = ull_adv_aux_reset_finalize();
1706 		if (err) {
1707 			return err;
1708 		}
1709 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1710 		err = ull_adv_sync_reset_finalize();
1711 		if (err) {
1712 			return err;
1713 		}
1714 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1715 	}
1716 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1717 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1718 
1719 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1720 		struct ll_adv_set *adv = &ll_adv[handle];
1721 		struct lll_adv *lll = &adv->lll;
1722 
1723 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1724 		adv->is_created = 0;
1725 		lll->aux = NULL;
1726 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1727 		lll->sync = NULL;
1728 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1729 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1730 		lll_adv_data_reset(&lll->adv_data);
1731 		lll_adv_data_reset(&lll->scan_rsp);
1732 	}
1733 
1734 	err = init_reset();
1735 	if (err) {
1736 		return err;
1737 	}
1738 
1739 	return 0;
1740 }
1741 
1742 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle)
1743 {
1744 	if (handle >= BT_CTLR_ADV_SET) {
1745 		return NULL;
1746 	}
1747 
1748 	return &ll_adv[handle];
1749 }
1750 
1751 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv)
1752 {
1753 	return ((uint8_t *)adv - (uint8_t *)ll_adv) / sizeof(*adv);
1754 }
1755 
1756 uint16_t ull_adv_lll_handle_get(struct lll_adv *lll)
1757 {
1758 	return ull_adv_handle_get(HDR_LLL2ULL(lll));
1759 }
1760 
1761 inline struct ll_adv_set *ull_adv_is_enabled_get(uint8_t handle)
1762 {
1763 	struct ll_adv_set *adv;
1764 
1765 	adv = ull_adv_set_get(handle);
1766 	if (!adv || !adv->is_enabled) {
1767 		return NULL;
1768 	}
1769 
1770 	return adv;
1771 }
1772 
1773 int ull_adv_is_enabled(uint8_t handle)
1774 {
1775 	struct ll_adv_set *adv;
1776 
1777 	adv = ull_adv_is_enabled_get(handle);
1778 
1779 	return adv != NULL;
1780 }
1781 
1782 uint32_t ull_adv_filter_pol_get(uint8_t handle)
1783 {
1784 	struct ll_adv_set *adv;
1785 
1786 	adv = ull_adv_is_enabled_get(handle);
1787 	if (!adv) {
1788 		return 0;
1789 	}
1790 
1791 	return adv->lll.filter_policy;
1792 }
1793 
1794 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1795 struct ll_adv_set *ull_adv_is_created_get(uint8_t handle)
1796 {
1797 	struct ll_adv_set *adv;
1798 
1799 	adv = ull_adv_set_get(handle);
1800 	if (!adv || !adv->is_created) {
1801 		return NULL;
1802 	}
1803 
1804 	return adv;
1805 }
1806 
1807 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1808 void ull_adv_aux_created(struct ll_adv_set *adv)
1809 {
1810 	if (adv->lll.aux && adv->is_enabled) {
1811 		uint8_t aux_handle = ull_adv_aux_handle_get(HDR_LLL2ULL(adv->lll.aux));
1812 		uint8_t handle = ull_adv_handle_get(adv);
1813 
1814 		ticker_update_ext(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1815 			   (TICKER_ID_ADV_BASE + handle), 0, 0, 0, 0, 0, 0,
1816 			   ticker_update_op_cb, adv, 0,
1817 			   TICKER_ID_ADV_AUX_BASE + aux_handle);
1818 	}
1819 }
1820 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1821 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1822 
1823 uint8_t ull_adv_data_set(struct ll_adv_set *adv, uint8_t len,
1824 			 uint8_t const *const data)
1825 {
1826 	struct pdu_adv *prev;
1827 	struct pdu_adv *pdu;
1828 	uint8_t idx;
1829 
1830 	/* Check invalid AD Data length */
1831 	if (len > PDU_AC_LEG_DATA_SIZE_MAX) {
1832 		return BT_HCI_ERR_INVALID_PARAM;
1833 	}
1834 
1835 	prev = lll_adv_data_peek(&adv->lll);
1836 
1837 	/* Dont update data if directed, back it up */
1838 	if ((prev->type == PDU_ADV_TYPE_DIRECT_IND) ||
1839 	    (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
1840 	     (prev->type == PDU_ADV_TYPE_EXT_IND))) {
1841 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
1842 		/* Update the backup AD Data */
1843 		adv->ad_data_backup.len = len;
1844 		memcpy(adv->ad_data_backup.data, data, adv->ad_data_backup.len);
1845 		return 0;
1846 
1847 #else /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1848 		return BT_HCI_ERR_CMD_DISALLOWED;
1849 #endif /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1850 	}
1851 
1852 	/* update adv pdu fields. */
1853 	pdu = lll_adv_data_alloc(&adv->lll, &idx);
1854 
1855 	/* check for race condition with LLL ISR */
1856 	if (IS_ENABLED(CONFIG_ASSERT)) {
1857 		uint8_t idx_test;
1858 
1859 		lll_adv_data_alloc(&adv->lll, &idx_test);
1860 		__ASSERT((idx == idx_test), "Probable AD Data Corruption.\n");
1861 	}
1862 
1863 	pdu->type = prev->type;
1864 	pdu->rfu = 0U;
1865 
1866 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
1867 		pdu->chan_sel = prev->chan_sel;
1868 	} else {
1869 		pdu->chan_sel = 0U;
1870 	}
1871 
1872 	pdu->tx_addr = prev->tx_addr;
1873 	pdu->rx_addr = prev->rx_addr;
1874 	memmove(&pdu->adv_ind.addr[0], &prev->adv_ind.addr[0], BDADDR_SIZE);
1875 	memcpy(&pdu->adv_ind.data[0], data, len);
1876 	pdu->len = BDADDR_SIZE + len;
1877 
1878 	/* Update time reservation */
1879 	if (adv->is_enabled) {
1880 		struct pdu_adv *pdu_scan;
1881 		struct lll_adv *lll;
1882 		uint8_t err;
1883 
1884 		lll = &adv->lll;
1885 		pdu_scan = lll_adv_scan_rsp_peek(lll);
1886 
1887 		err = ull_adv_time_update(adv, pdu, pdu_scan);
1888 		if (err) {
1889 			return err;
1890 		}
1891 	}
1892 
1893 	lll_adv_data_enqueue(&adv->lll, idx);
1894 
1895 	return 0;
1896 }
1897 
1898 uint8_t ull_scan_rsp_set(struct ll_adv_set *adv, uint8_t len,
1899 			 uint8_t const *const data)
1900 {
1901 	struct pdu_adv *prev;
1902 	struct pdu_adv *pdu;
1903 	uint8_t idx;
1904 
1905 	if (len > PDU_AC_LEG_DATA_SIZE_MAX) {
1906 		return BT_HCI_ERR_INVALID_PARAM;
1907 	}
1908 
1909 	/* update scan pdu fields. */
1910 	prev = lll_adv_scan_rsp_peek(&adv->lll);
1911 	if (!prev) {
1912 		uint8_t err;
1913 
1914 		err = lll_adv_data_init(&adv->lll.scan_rsp);
1915 		if (err) {
1916 			return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1917 		}
1918 
1919 		prev = lll_adv_scan_rsp_peek(&adv->lll);
1920 		init_pdu(prev, PDU_ADV_TYPE_SCAN_RSP);
1921 	}
1922 
1923 	pdu = lll_adv_scan_rsp_alloc(&adv->lll, &idx);
1924 	pdu->type = PDU_ADV_TYPE_SCAN_RSP;
1925 	pdu->rfu = 0;
1926 	pdu->chan_sel = 0;
1927 	pdu->tx_addr = prev->tx_addr;
1928 	pdu->rx_addr = 0;
1929 	pdu->len = BDADDR_SIZE + len;
1930 	memmove(&pdu->scan_rsp.addr[0], &prev->scan_rsp.addr[0], BDADDR_SIZE);
1931 	memcpy(&pdu->scan_rsp.data[0], data, len);
1932 
1933 	/* Update time reservation */
1934 	if (adv->is_enabled) {
1935 		struct pdu_adv *pdu_adv_scan;
1936 		struct lll_adv *lll;
1937 		uint8_t err;
1938 
1939 		lll = &adv->lll;
1940 		pdu_adv_scan = lll_adv_data_peek(lll);
1941 
1942 		if ((pdu_adv_scan->type == PDU_ADV_TYPE_ADV_IND) ||
1943 		    (pdu_adv_scan->type == PDU_ADV_TYPE_SCAN_IND)) {
1944 			err = ull_adv_time_update(adv, pdu_adv_scan, pdu);
1945 			if (err) {
1946 				return err;
1947 			}
1948 		}
1949 	}
1950 
1951 	lll_adv_scan_rsp_enqueue(&adv->lll, idx);
1952 
1953 	return 0;
1954 }
1955 
1956 static uint32_t ticker_update_rand(struct ll_adv_set *adv, uint32_t ticks_delay_window,
1957 				   uint32_t ticks_delay_window_offset,
1958 				   uint32_t ticks_adjust_minus,
1959 				   ticker_op_func fp_op_func)
1960 {
1961 	uint32_t random_delay;
1962 	uint32_t ret;
1963 
1964 	/* Get pseudo-random number in the range [0..ticks_delay_window].
1965 	 * Please note that using modulo of 2^32 sample space has an uneven
1966 	 * distribution, slightly favoring smaller values.
1967 	 */
1968 	lll_rand_isr_get(&random_delay, sizeof(random_delay));
1969 	random_delay %= ticks_delay_window;
1970 	random_delay += (ticks_delay_window_offset + 1);
1971 
1972 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1973 			    TICKER_USER_ID_ULL_HIGH,
1974 			    TICKER_ID_ADV_BASE + ull_adv_handle_get(adv),
1975 			    random_delay,
1976 			    ticks_adjust_minus, 0, 0, 0, 0,
1977 			    fp_op_func, adv);
1978 
1979 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1980 		  (ret == TICKER_STATUS_BUSY) ||
1981 		  (fp_op_func == NULL));
1982 
1983 	return random_delay;
1984 }
1985 
1986 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
1987 	defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1988 void ull_adv_done(struct node_rx_event_done *done)
1989 {
1990 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1991 	struct lll_adv_aux *lll_aux;
1992 	struct node_rx_pdu *rx;
1993 	uint8_t handle;
1994 	uint32_t ret;
1995 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1996 	struct ll_adv_set *adv;
1997 	struct lll_adv *lll;
1998 
1999 	/* Get reference to ULL context */
2000 	adv = CONTAINER_OF(done->param, struct ll_adv_set, ull);
2001 	lll = &adv->lll;
2002 
2003 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2004 	if (done->extra.type == EVENT_DONE_EXTRA_TYPE_ADV && done->extra.result != DONE_COMPLETED) {
2005 		/* Event aborted or too late - try to re-schedule */
2006 		uint32_t ticks_elapsed;
2007 		uint32_t ticks_now;
2008 		uint32_t delay_remain;
2009 
2010 		const uint32_t prepare_overhead =
2011 			HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
2012 		const uint32_t ticks_adv_airtime = adv->ticks_at_expire +
2013 			prepare_overhead;
2014 
2015 		ticks_elapsed = 0U;
2016 
2017 		ticks_now = cntr_cnt_get();
2018 		if ((int32_t)(ticks_now - ticks_adv_airtime) > 0) {
2019 			ticks_elapsed = ticks_now - ticks_adv_airtime;
2020 		}
2021 
2022 		if (adv->delay_at_expire + ticks_elapsed <= ULL_ADV_RANDOM_DELAY) {
2023 			/* The perturbation window is still open */
2024 			delay_remain = ULL_ADV_RANDOM_DELAY - (adv->delay_at_expire +
2025 							       ticks_elapsed);
2026 		} else {
2027 			delay_remain = 0U;
2028 		}
2029 
2030 		/* Check if we have enough time to re-schedule */
2031 		if (delay_remain > prepare_overhead) {
2032 			uint32_t interval_us = adv->interval * ADV_INT_UNIT_US;
2033 			uint32_t ticks_adjust_minus;
2034 			uint32_t random_delay;
2035 
2036 			/* Get negative ticker adjustment needed to pull back ADV one
2037 			 * interval plus the randomized delay. This means that the ticker
2038 			 * will be updated to expire in time frame of now + start
2039 			 * overhead, until 10 ms window is exhausted.
2040 			 */
2041 			ticks_adjust_minus = HAL_TICKER_US_TO_TICKS(interval_us) + adv->delay;
2042 
2043 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2044 			if (adv->remain_duration_us > interval_us) {
2045 				/* Reset remain_duration_us to value before last ticker expire
2046 				 * to correct for the re-scheduling
2047 				 */
2048 				adv->remain_duration_us += interval_us +
2049 							   HAL_TICKER_TICKS_TO_US(
2050 								adv->delay_at_expire);
2051 			}
2052 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2053 
2054 			/* Apply random delay in range [prepare_overhead..delay_remain].
2055 			 * NOTE: This ticker_update may fail if update races with
2056 			 * ticker_stop, e.g. from ull_periph_setup. This is not a problem
2057 			 * and we can safely ignore the operation result.
2058 			 */
2059 			random_delay = ticker_update_rand(adv, delay_remain - prepare_overhead,
2060 							  prepare_overhead, ticks_adjust_minus,
2061 							  NULL);
2062 
2063 			/* Delay from ticker_update_rand is in addition to the last random delay */
2064 			adv->delay = random_delay;
2065 			adv->delay += adv->delay_at_expire;
2066 
2067 			/* Score of the event was increased due to the result, but since
2068 			 * we're getting a another chance we'll set it back.
2069 			 */
2070 			adv->lll.hdr.score -= 1;
2071 		}
2072 	}
2073 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2074 	if (done->extra.type == EVENT_DONE_EXTRA_TYPE_ADV && adv->lll.aux) {
2075 		/* Primary event of extended advertising done - wait for aux done */
2076 		return;
2077 	}
2078 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2079 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2080 
2081 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2082 	if (adv->max_events && (adv->event_counter >= adv->max_events)) {
2083 		adv->max_events = 0U;
2084 
2085 		rx = (void *)lll->node_rx_adv_term;
2086 		rx->rx_ftr.param_adv_term.status = BT_HCI_ERR_LIMIT_REACHED;
2087 	} else if (adv->remain_duration_us &&
2088 		   (adv->remain_duration_us <=
2089 		    ((uint64_t)adv->interval * ADV_INT_UNIT_US))) {
2090 		adv->remain_duration_us = 0U;
2091 
2092 		rx = (void *)lll->node_rx_adv_term;
2093 		rx->rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2094 	} else {
2095 		return;
2096 	}
2097 
2098 	handle = ull_adv_handle_get(adv);
2099 	LL_ASSERT(handle < BT_CTLR_ADV_SET);
2100 
2101 	rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2102 	rx->hdr.handle = handle;
2103 	rx->rx_ftr.param_adv_term.conn_handle = 0xffff;
2104 	rx->rx_ftr.param_adv_term.num_events = adv->event_counter;
2105 
2106 	lll_aux = lll->aux;
2107 	if (lll_aux) {
2108 		struct ll_adv_aux_set *aux;
2109 		uint8_t aux_handle;
2110 
2111 		aux = HDR_LLL2ULL(lll_aux);
2112 		aux_handle = ull_adv_aux_handle_get(aux);
2113 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2114 				  TICKER_USER_ID_ULL_HIGH,
2115 				  (TICKER_ID_ADV_AUX_BASE + aux_handle),
2116 				  ticker_stop_aux_op_cb, adv);
2117 	} else {
2118 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2119 				  TICKER_USER_ID_ULL_HIGH,
2120 				  (TICKER_ID_ADV_BASE + handle),
2121 				  ticker_stop_ext_op_cb, adv);
2122 	}
2123 
2124 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2125 		  (ret == TICKER_STATUS_BUSY));
2126 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2127 }
2128 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
2129 
2130 const uint8_t *ull_adv_pdu_update_addrs(struct ll_adv_set *adv,
2131 					struct pdu_adv *pdu)
2132 {
2133 	const uint8_t *adv_addr;
2134 
2135 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2136 	struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
2137 	struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
2138 	struct pdu_adv_ext_hdr hdr_flags;
2139 
2140 	if (com_hdr->ext_hdr_len) {
2141 		hdr_flags = *hdr;
2142 	} else {
2143 		*(uint8_t *)&hdr_flags = 0U;
2144 	}
2145 #endif
2146 
2147 	adv_addr = adva_update(adv, pdu);
2148 
2149 	/* Update TargetA only if directed advertising PDU is supplied. Note
2150 	 * that AUX_SCAN_REQ does not have TargetA flag set so it will be
2151 	 * ignored here as expected.
2152 	 */
2153 	if ((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
2154 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2155 	    ((pdu->type == PDU_ADV_TYPE_EXT_IND) && hdr_flags.tgt_addr) ||
2156 #endif
2157 	    0) {
2158 		tgta_update(adv, pdu);
2159 	}
2160 
2161 	return adv_addr;
2162 }
2163 
2164 uint8_t ull_adv_time_update(struct ll_adv_set *adv, struct pdu_adv *pdu,
2165 			    struct pdu_adv *pdu_scan)
2166 {
2167 	struct lll_adv *lll;
2168 	uint32_t time_ticks;
2169 	uint8_t phy_flags;
2170 	uint16_t time_us;
2171 	uint8_t chan_map;
2172 	uint8_t chan_cnt;
2173 	uint8_t phy;
2174 
2175 	lll = &adv->lll;
2176 
2177 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2178 	phy = lll->phy_p;
2179 	phy_flags = lll->phy_flags;
2180 #else
2181 	phy = PHY_1M;
2182 	phy_flags = 0U;
2183 #endif
2184 
2185 	chan_map = lll->chan_map;
2186 	chan_cnt = util_ones_count_get(&chan_map, sizeof(chan_map));
2187 	time_us = adv_time_get(pdu, pdu_scan, chan_cnt, phy, phy_flags);
2188 	time_ticks = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
2189 
2190 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2191 	uint32_t volatile ret_cb;
2192 	uint32_t ticks_minus;
2193 	uint32_t ticks_plus;
2194 	uint32_t ret;
2195 
2196 	if (adv->ull.ticks_slot > time_ticks) {
2197 		ticks_minus = adv->ull.ticks_slot - time_ticks;
2198 		ticks_plus = 0U;
2199 	} else if (adv->ull.ticks_slot < time_ticks) {
2200 		ticks_minus = 0U;
2201 		ticks_plus = time_ticks - adv->ull.ticks_slot;
2202 	} else {
2203 		return BT_HCI_ERR_SUCCESS;
2204 	}
2205 
2206 	ret_cb = TICKER_STATUS_BUSY;
2207 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
2208 			    TICKER_USER_ID_THREAD,
2209 			    (TICKER_ID_ADV_BASE +
2210 			     ull_adv_handle_get(adv)),
2211 			    0, 0, ticks_plus, ticks_minus, 0, 0,
2212 			    ull_ticker_status_give, (void *)&ret_cb);
2213 	ret = ull_ticker_status_take(ret, &ret_cb);
2214 	if (ret != TICKER_STATUS_SUCCESS) {
2215 		return BT_HCI_ERR_CMD_DISALLOWED;
2216 	}
2217 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
2218 
2219 	adv->ull.ticks_slot = time_ticks;
2220 
2221 	return BT_HCI_ERR_SUCCESS;
2222 }
2223 
2224 static int init_reset(void)
2225 {
2226 	uint8_t handle;
2227 
2228 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL) && \
2229 	!defined(CONFIG_BT_CTLR_ADV_EXT)
2230 	ll_adv[0].lll.tx_pwr_lvl = RADIO_TXP_DEFAULT;
2231 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL && !CONFIG_BT_CTLR_ADV_EXT */
2232 
2233 	for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
2234 		lll_adv_data_init(&ll_adv[handle].lll.adv_data);
2235 
2236 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2237 		/* scan_rsp is not init'ed until we know if it is a legacy or extended scan rsp */
2238 		memset(&ll_adv[handle].lll.scan_rsp, 0, sizeof(ll_adv[handle].lll.scan_rsp));
2239 #else
2240 		lll_adv_data_init(&ll_adv[handle].lll.scan_rsp);
2241 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
2242 
2243 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
2244 		/* Pointer to DF configuration must be cleared on reset. In other case it will point
2245 		 * to a memory pool address that should be released. It may be used by the pool
2246 		 * itself. In such situation it may cause error.
2247 		 */
2248 		ll_adv[handle].df_cfg = NULL;
2249 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2250 	}
2251 
2252 	/* Make sure that set #0 is initialized with empty legacy PDUs. This is
2253 	 * especially important if legacy HCI interface is used for advertising
2254 	 * because it allows to enable advertising without any configuration,
2255 	 * thus we need to have PDUs already initialized.
2256 	 */
2257 	init_set(&ll_adv[0]);
2258 
2259 	return 0;
2260 }
2261 
2262 static inline struct ll_adv_set *is_disabled_get(uint8_t handle)
2263 {
2264 	struct ll_adv_set *adv;
2265 
2266 	adv = ull_adv_set_get(handle);
2267 	if (!adv || adv->is_enabled) {
2268 		return NULL;
2269 	}
2270 
2271 	return adv;
2272 }
2273 
2274 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
2275 			     uint8_t adv_chn_cnt, uint8_t phy,
2276 			     uint8_t phy_flags)
2277 {
2278 	uint16_t time_us = EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
2279 
2280 	/* NOTE: 16-bit value is sufficient to calculate the maximum radio
2281 	 *       event time reservation for PDUs on primary advertising
2282 	 *       channels (37, 38, and 39 channel indices of 1M and Coded PHY).
2283 	 */
2284 
2285 	/* Calculate the PDU Tx Time and hence the radio event length */
2286 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2287 	if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2288 		time_us += PDU_AC_US(pdu->len, phy, phy_flags) * adv_chn_cnt +
2289 			   EVENT_RX_TX_TURNAROUND(phy) * (adv_chn_cnt - 1);
2290 	} else
2291 #endif
2292 	{
2293 		uint16_t adv_size =
2294 			PDU_OVERHEAD_SIZE(PHY_1M) + ADVA_SIZE;
2295 		const uint16_t conn_ind_us =
2296 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2297 				 INITA_SIZE + ADVA_SIZE + LLDATA_SIZE), PHY_1M);
2298 		const uint8_t scan_req_us  =
2299 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2300 				 SCANA_SIZE + ADVA_SIZE), PHY_1M);
2301 		const uint16_t scan_rsp_us =
2302 			BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2303 				 ADVA_SIZE + pdu_scan->len), PHY_1M);
2304 		const uint8_t rx_to_us	= EVENT_RX_TO_US(PHY_1M);
2305 		const uint8_t rxtx_turn_us = EVENT_RX_TX_TURNAROUND(PHY_1M);
2306 
2307 		if (pdu->type == PDU_ADV_TYPE_NONCONN_IND) {
2308 			adv_size += pdu->len;
2309 			time_us += BYTES2US(adv_size, PHY_1M) * adv_chn_cnt +
2310 				   rxtx_turn_us * (adv_chn_cnt - 1);
2311 		} else {
2312 			if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
2313 				adv_size += TARGETA_SIZE;
2314 				time_us += conn_ind_us;
2315 			} else if (pdu->type == PDU_ADV_TYPE_ADV_IND) {
2316 				adv_size += pdu->len;
2317 				time_us += MAX(scan_req_us + EVENT_IFS_MAX_US +
2318 						scan_rsp_us, conn_ind_us);
2319 			} else if (pdu->type == PDU_ADV_TYPE_SCAN_IND) {
2320 				adv_size += pdu->len;
2321 				time_us += scan_req_us + EVENT_IFS_MAX_US +
2322 					   scan_rsp_us;
2323 			}
2324 
2325 			time_us += (BYTES2US(adv_size, PHY_1M) +
2326 				    EVENT_IFS_MAX_US + rx_to_us +
2327 				    rxtx_turn_us) * (adv_chn_cnt - 1) +
2328 				   BYTES2US(adv_size, PHY_1M) + EVENT_IFS_MAX_US;
2329 		}
2330 	}
2331 
2332 	return time_us;
2333 }
2334 
2335 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2336 		      uint32_t remainder, uint16_t lazy, uint8_t force,
2337 		      void *param)
2338 {
2339 	static memq_link_t link;
2340 	static struct mayfly mfy = {0, 0, &link, NULL, lll_adv_prepare};
2341 	static struct lll_prepare_param p;
2342 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2343 	struct ticker_ext_context *context = param;
2344 	struct ll_adv_set *adv = context->context;
2345 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2346 	struct ll_adv_set *adv = param;
2347 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2348 	uint32_t random_delay;
2349 	struct lll_adv *lll;
2350 	uint32_t ret;
2351 	uint8_t ref;
2352 
2353 	DEBUG_RADIO_PREPARE_A(1);
2354 
2355 	lll = &adv->lll;
2356 
2357 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2358 	if (lll->aux) {
2359 		/* Check if we are about to exceed the duration or max events limit
2360 		 * Usually this will be handled in ull_adv_done(), but in cases where
2361 		 * the extended advertising events overlap (ie. several primary advertisings
2362 		 * point to the same AUX_ADV_IND packet) the ticker will not be stopped
2363 		 * in time. To handle this, we simply ignore the extra ticker callback and
2364 		 * wait for the usual ull_adv_done() handling to run
2365 		 */
2366 		if ((adv->max_events && adv->event_counter >= adv->max_events) ||
2367 		    (adv->remain_duration_us &&
2368 		     adv->remain_duration_us <= (uint64_t)adv->interval * ADV_INT_UNIT_US)) {
2369 			return;
2370 		}
2371 	}
2372 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2373 
2374 	if (IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) ||
2375 	    (lazy != TICKER_LAZY_MUST_EXPIRE)) {
2376 		/* Increment prepare reference count */
2377 		ref = ull_ref_inc(&adv->ull);
2378 		LL_ASSERT(ref);
2379 
2380 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2381 	defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2382 		if (adv->lll.aux) {
2383 			uint32_t ticks_to_expire;
2384 			uint32_t other_remainder;
2385 
2386 			LL_ASSERT(context->other_expire_info);
2387 
2388 			/* Adjust ticks to expire based on remainder value */
2389 			ticks_to_expire = context->other_expire_info->ticks_to_expire;
2390 			other_remainder = context->other_expire_info->remainder;
2391 			hal_ticker_remove_jitter(&ticks_to_expire, &other_remainder);
2392 
2393 			/* Store the ticks and remainder offset for aux ptr population in LLL */
2394 			adv->lll.aux->ticks_pri_pdu_offset = ticks_to_expire;
2395 			adv->lll.aux->us_pri_pdu_offset = other_remainder;
2396 		}
2397 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) &&
2398 	* CONFIG_BT_TICKER_EXT_EXPIRE_INFO
2399 	*/
2400 
2401 		/* Append timing parameters */
2402 		p.ticks_at_expire = ticks_at_expire;
2403 		p.remainder = remainder;
2404 		p.lazy = lazy;
2405 		p.force = force;
2406 		p.param = lll;
2407 		mfy.param = &p;
2408 
2409 		/* Kick LLL prepare */
2410 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2411 				     TICKER_USER_ID_LLL, 0, &mfy);
2412 		LL_ASSERT(!ret);
2413 
2414 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING) || \
2415 	(defined(CONFIG_BT_CTLR_ADV_EXT) && \
2416 	 (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2417 	 !defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO))
2418 		/* Remember the ticks_at_expire, will be used by JIT scheduling
2419 		 * and for checking latency calculating the aux offset for
2420 		 * extended advertising.
2421 		 */
2422 		adv->ticks_at_expire = ticks_at_expire;
2423 
2424 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2425 		adv->delay_at_expire = adv->delay;
2426 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2427 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING ||
2428 	* (CONFIG_BT_CTLR_ADV_EXT &&
2429 	*  (CONFIG_BT_CTLR_ADV_AUX_SET > 0) &&
2430 	*  !CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2431 	*/
2432 
2433 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2434 	!defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2435 		if (adv->lll.aux) {
2436 			ull_adv_aux_offset_get(adv);
2437 		}
2438 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2439 	* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO
2440 	*/
2441 	}
2442 
2443 	/* Apply adv random delay */
2444 #if defined(CONFIG_BT_PERIPHERAL)
2445 	if (!lll->is_hdcd)
2446 #endif /* CONFIG_BT_PERIPHERAL */
2447 	{
2448 		if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING) ||
2449 		    (ticker_update_req == ticker_update_ack)) {
2450 			/* Ticker update requested */
2451 			ticker_update_req++;
2452 
2453 			/* Apply random delay in range [0..ULL_ADV_RANDOM_DELAY] */
2454 			random_delay = ticker_update_rand(adv, ULL_ADV_RANDOM_DELAY, 0U, 0U,
2455 							  ticker_update_op_cb);
2456 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2457 			adv->delay = random_delay;
2458 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2459 		} else {
2460 			random_delay = 0U;
2461 		}
2462 
2463 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2464 		uint16_t event_counter_inc;
2465 
2466 		if (lazy == TICKER_LAZY_MUST_EXPIRE) {
2467 			lazy = 0U;
2468 			event_counter_inc = 0U;
2469 		} else {
2470 			event_counter_inc = (lazy + 1U);
2471 		}
2472 
2473 		if (adv->remain_duration_us && adv->event_counter > 0U) {
2474 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2475 			/* ticks_drift is always 0 with JIT scheduling, populate manually */
2476 			ticks_drift = adv->delay_at_expire;
2477 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2478 			uint32_t interval_us = (uint64_t)adv->interval * ADV_INT_UNIT_US;
2479 			uint32_t elapsed_us = interval_us * (lazy + 1U) +
2480 						 HAL_TICKER_TICKS_TO_US(ticks_drift);
2481 
2482 			/* End advertising if the added random delay pushes us beyond the limit */
2483 			if (adv->remain_duration_us > elapsed_us + interval_us +
2484 						      HAL_TICKER_TICKS_TO_US(random_delay)) {
2485 				adv->remain_duration_us -= elapsed_us;
2486 			} else {
2487 				adv->remain_duration_us = interval_us;
2488 			}
2489 		}
2490 
2491 		adv->event_counter += event_counter_inc;
2492 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2493 	}
2494 
2495 	DEBUG_RADIO_PREPARE_A(1);
2496 }
2497 
2498 static void ticker_update_op_cb(uint32_t status, void *param)
2499 {
2500 	/* Reset update requested */
2501 	ticker_update_ack = ticker_update_req;
2502 
2503 #if defined(CONFIG_BT_PERIPHERAL) && (defined(CONFIG_BT_ASSERT) || defined(CONFIG_ASSERT))
2504 	struct ll_adv_set *adv = param;
2505 	struct pdu_adv *pdu = lll_adv_data_peek(&adv->lll);
2506 	bool connectable = (pdu->type == PDU_ADV_TYPE_ADV_IND) ||
2507 			   (pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
2508 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2509 			   ((pdu->type == PDU_ADV_TYPE_EXT_IND) &&
2510 			    (pdu->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_CONN)) ||
2511 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2512 			   0;
2513 #endif /* CONFIG_BT_PERIPHERAL && (CONFIG_BT_ASSERT || CONFIG_ASSERT) */
2514 
2515 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
2516 		  param == ull_disable_mark_get() ||
2517 #if defined(CONFIG_BT_PERIPHERAL)
2518 		   /* if using connectable adv and lll.conn is 0 -> a connection is underway */
2519 		  (connectable && !adv->lll.conn) ||
2520 #endif /* CONFIG_BT_PERIPHERAL */
2521 		  0);
2522 }
2523 
2524 #if defined(CONFIG_BT_PERIPHERAL)
2525 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2526 			   uint32_t remainder, uint16_t lazy, uint8_t force,
2527 			   void *param)
2528 {
2529 	struct ll_adv_set *adv = param;
2530 	uint8_t handle;
2531 	uint32_t ret;
2532 
2533 	handle = ull_adv_handle_get(adv);
2534 	LL_ASSERT(handle < BT_CTLR_ADV_SET);
2535 
2536 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2537 			  TICKER_ID_ADV_BASE + handle,
2538 			  ticker_stop_op_cb, adv);
2539 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2540 		  (ret == TICKER_STATUS_BUSY));
2541 }
2542 
2543 static void ticker_stop_op_cb(uint32_t status, void *param)
2544 {
2545 	static memq_link_t link;
2546 	static struct mayfly mfy = {0, 0, &link, NULL, adv_disable};
2547 	uint32_t ret;
2548 
2549 	/* Ignore if race between thread and ULL */
2550 	if (status != TICKER_STATUS_SUCCESS) {
2551 		/* TODO: detect race */
2552 
2553 		return;
2554 	}
2555 
2556 #if defined(CONFIG_BT_HCI_MESH_EXT)
2557 	/* FIXME: why is this here for Mesh commands? */
2558 	if (param) {
2559 		return;
2560 	}
2561 #endif /* CONFIG_BT_HCI_MESH_EXT */
2562 
2563 	/* Check if any pending LLL events that need to be aborted */
2564 	mfy.param = param;
2565 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2566 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2567 	LL_ASSERT(!ret);
2568 }
2569 
2570 static void adv_disable(void *param)
2571 {
2572 	struct ll_adv_set *adv;
2573 	struct ull_hdr *hdr;
2574 
2575 	/* Check ref count to determine if any pending LLL events in pipeline */
2576 	adv = param;
2577 	hdr = &adv->ull;
2578 	if (ull_ref_get(hdr)) {
2579 		static memq_link_t link;
2580 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2581 		uint32_t ret;
2582 
2583 		mfy.param = &adv->lll;
2584 
2585 		/* Setup disabled callback to be called when ref count
2586 		 * returns to zero.
2587 		 */
2588 		LL_ASSERT(!hdr->disabled_cb);
2589 		hdr->disabled_param = mfy.param;
2590 		hdr->disabled_cb = disabled_cb;
2591 
2592 		/* Trigger LLL disable */
2593 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2594 				     TICKER_USER_ID_LLL, 0, &mfy);
2595 		LL_ASSERT(!ret);
2596 	} else {
2597 		/* No pending LLL events */
2598 		disabled_cb(&adv->lll);
2599 	}
2600 }
2601 
2602 static void disabled_cb(void *param)
2603 {
2604 	struct ll_adv_set *adv;
2605 	struct node_rx_pdu *rx;
2606 	struct node_rx_cc *cc;
2607 	memq_link_t *link;
2608 
2609 	adv = ((struct lll_hdr *)param)->parent;
2610 
2611 	LL_ASSERT(adv->link_cc_free);
2612 	link = adv->link_cc_free;
2613 	adv->link_cc_free = NULL;
2614 
2615 	LL_ASSERT(adv->node_rx_cc_free);
2616 	rx = adv->node_rx_cc_free;
2617 	adv->node_rx_cc_free = NULL;
2618 
2619 	rx->hdr.type = NODE_RX_TYPE_CONNECTION;
2620 	rx->hdr.handle = 0xffff;
2621 
2622 	cc = (void *)rx->pdu;
2623 	memset(cc, 0x00, sizeof(struct node_rx_cc));
2624 	cc->status = BT_HCI_ERR_ADV_TIMEOUT;
2625 
2626 	rx->rx_ftr.param = param;
2627 
2628 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2629 	if (adv->lll.node_rx_adv_term) {
2630 		uint8_t handle;
2631 
2632 		ll_rx_put(link, rx);
2633 
2634 		handle = ull_adv_handle_get(adv);
2635 		LL_ASSERT(handle < BT_CTLR_ADV_SET);
2636 
2637 		rx = (void *)adv->lll.node_rx_adv_term;
2638 		rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2639 		rx->hdr.handle = handle;
2640 		rx->rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2641 		rx->rx_ftr.param_adv_term.conn_handle = 0xffff;
2642 		rx->rx_ftr.param_adv_term.num_events = adv->event_counter;
2643 
2644 		link = rx->hdr.link;
2645 	}
2646 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2647 
2648 	ll_rx_put_sched(link, rx);
2649 }
2650 
2651 static void conn_release(struct ll_adv_set *adv)
2652 {
2653 	struct lll_conn *lll = adv->lll.conn;
2654 	memq_link_t *link;
2655 
2656 	LL_ASSERT(!lll->link_tx_free);
2657 	link = memq_deinit(&lll->memq_tx.head, &lll->memq_tx.tail);
2658 	LL_ASSERT(link);
2659 	lll->link_tx_free = link;
2660 
2661 	ll_conn_release(lll->hdr.parent);
2662 	adv->lll.conn = NULL;
2663 
2664 	ll_rx_release(adv->node_rx_cc_free);
2665 	adv->node_rx_cc_free = NULL;
2666 	ll_rx_link_release(adv->link_cc_free);
2667 	adv->link_cc_free = NULL;
2668 }
2669 #endif /* CONFIG_BT_PERIPHERAL */
2670 
2671 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2672 static uint8_t leg_adv_type_get(uint8_t evt_prop)
2673 {
2674 	/* We take advantage of the fact that 2 LS bits
2675 	 * of evt_prop can be used in a lookup to return
2676 	 * PDU type value in the pdu_adv_type[] lookup.
2677 	 */
2678 	uint8_t const leg_adv_type[] = {
2679 		0x03, /* index of PDU_ADV_TYPE_NONCONN_IND in pdu_adv_type[] */
2680 		0x04, /* index of PDU_ADV_TYPE_DIRECT_IND in pdu_adv_type[] */
2681 		0x02, /* index of PDU_ADV_TYPE_SCAN_IND in pdu_adv_type[] */
2682 		0x00  /* index of PDU_ADV_TYPE_ADV_IND in pdu_adv_type[] */
2683 	};
2684 
2685 	/* if high duty cycle directed */
2686 	if (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN) {
2687 		/* index of PDU_ADV_TYPE_DIRECT_IND in pdu_adv_type[] */
2688 		return 0x01;
2689 	}
2690 
2691 	return leg_adv_type[evt_prop & 0x03];
2692 }
2693 
2694 static void adv_max_events_duration_set(struct ll_adv_set *adv,
2695 					uint16_t duration,
2696 					uint8_t max_ext_adv_evts)
2697 {
2698 	adv->event_counter = 0;
2699 	adv->max_events = max_ext_adv_evts;
2700 	adv->remain_duration_us = (uint32_t)duration * 10U * USEC_PER_MSEC;
2701 }
2702 
2703 static void ticker_stop_aux_op_cb(uint32_t status, void *param)
2704 {
2705 	static memq_link_t link;
2706 	static struct mayfly mfy = {0, 0, &link, NULL, aux_disable};
2707 	uint32_t ret;
2708 
2709 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2710 
2711 	/* Check if any pending LLL events that need to be aborted */
2712 	mfy.param = param;
2713 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2714 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2715 	LL_ASSERT(!ret);
2716 }
2717 
2718 static void aux_disable(void *param)
2719 {
2720 	struct lll_adv_aux *lll_aux;
2721 	struct ll_adv_aux_set *aux;
2722 	struct ll_adv_set *adv;
2723 	struct ull_hdr *hdr;
2724 
2725 	adv = param;
2726 	lll_aux = adv->lll.aux;
2727 	aux = HDR_LLL2ULL(lll_aux);
2728 	hdr = &aux->ull;
2729 	if (ull_ref_get(hdr)) {
2730 		LL_ASSERT(!hdr->disabled_cb);
2731 		hdr->disabled_param = adv;
2732 		hdr->disabled_cb = aux_disabled_cb;
2733 	} else {
2734 		aux_disabled_cb(param);
2735 	}
2736 }
2737 
2738 static void aux_disabled_cb(void *param)
2739 {
2740 	uint8_t handle;
2741 	uint32_t ret;
2742 
2743 	handle = ull_adv_handle_get(param);
2744 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2745 			  TICKER_USER_ID_ULL_HIGH,
2746 			  (TICKER_ID_ADV_BASE + handle),
2747 			  ticker_stop_ext_op_cb, param);
2748 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2749 		  (ret == TICKER_STATUS_BUSY));
2750 }
2751 
2752 static void ticker_stop_ext_op_cb(uint32_t status, void *param)
2753 {
2754 	static memq_link_t link;
2755 	static struct mayfly mfy = {0, 0, &link, NULL, ext_disable};
2756 	uint32_t ret;
2757 
2758 	/* Ignore if race between thread and ULL */
2759 	if (status != TICKER_STATUS_SUCCESS) {
2760 		/* TODO: detect race */
2761 
2762 		return;
2763 	}
2764 
2765 	/* Check if any pending LLL events that need to be aborted */
2766 	mfy.param = param;
2767 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2768 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2769 	LL_ASSERT(!ret);
2770 }
2771 
2772 static void ext_disable(void *param)
2773 {
2774 	struct ll_adv_set *adv;
2775 	struct ull_hdr *hdr;
2776 
2777 	/* Check ref count to determine if any pending LLL events in pipeline */
2778 	adv = param;
2779 	hdr = &adv->ull;
2780 	if (ull_ref_get(hdr)) {
2781 		static memq_link_t link;
2782 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2783 		uint32_t ret;
2784 
2785 		mfy.param = &adv->lll;
2786 
2787 		/* Setup disabled callback to be called when ref count
2788 		 * returns to zero.
2789 		 */
2790 		LL_ASSERT(!hdr->disabled_cb);
2791 		hdr->disabled_param = mfy.param;
2792 		hdr->disabled_cb = ext_disabled_cb;
2793 
2794 		/* Trigger LLL disable */
2795 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2796 				     TICKER_USER_ID_LLL, 0, &mfy);
2797 		LL_ASSERT(!ret);
2798 	} else {
2799 		/* No pending LLL events */
2800 		ext_disabled_cb(&adv->lll);
2801 	}
2802 }
2803 
2804 static void ext_disabled_cb(void *param)
2805 {
2806 	struct lll_adv *lll = (void *)param;
2807 	struct node_rx_pdu *rx = lll->node_rx_adv_term;
2808 
2809 	/* Under race condition, if a connection has been established then
2810 	 * node_rx is already utilized to send terminate event on connection
2811 	 */
2812 	if (!rx) {
2813 		return;
2814 	}
2815 
2816 	/* NOTE: parameters are already populated on disable, just enqueue here
2817 	 */
2818 	ll_rx_put_sched(rx->hdr.link, rx);
2819 }
2820 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2821 
2822 static inline uint8_t disable(uint8_t handle)
2823 {
2824 	uint32_t volatile ret_cb;
2825 	struct ll_adv_set *adv;
2826 	uint32_t ret;
2827 	void *mark;
2828 	int err;
2829 
2830 	adv = ull_adv_is_enabled_get(handle);
2831 	if (!adv) {
2832 		/* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
2833 		 * Disabling advertising when it is already disabled has no
2834 		 * effect.
2835 		 */
2836 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT)) {
2837 			return 0;
2838 		}
2839 
2840 		return BT_HCI_ERR_CMD_DISALLOWED;
2841 	}
2842 
2843 #if defined(CONFIG_BT_PERIPHERAL)
2844 	if (adv->lll.conn) {
2845 		/* Indicate to LLL that a cancellation is requested */
2846 		adv->lll.conn->periph.cancelled = 1U;
2847 		cpu_dmb();
2848 
2849 		/* Check if a connection was initiated (connection
2850 		 * establishment race between LLL and ULL).
2851 		 */
2852 		if (unlikely(adv->lll.conn->periph.initiated)) {
2853 			return BT_HCI_ERR_CMD_DISALLOWED;
2854 		}
2855 	}
2856 #endif /* CONFIG_BT_PERIPHERAL */
2857 
2858 	mark = ull_disable_mark(adv);
2859 	LL_ASSERT(mark == adv);
2860 
2861 #if defined(CONFIG_BT_PERIPHERAL)
2862 	if (adv->lll.is_hdcd) {
2863 		ret_cb = TICKER_STATUS_BUSY;
2864 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2865 				  TICKER_USER_ID_THREAD, TICKER_ID_ADV_STOP,
2866 				  ull_ticker_status_give, (void *)&ret_cb);
2867 		ret = ull_ticker_status_take(ret, &ret_cb);
2868 		if (ret) {
2869 			mark = ull_disable_unmark(adv);
2870 			LL_ASSERT(mark == adv);
2871 
2872 			return BT_HCI_ERR_CMD_DISALLOWED;
2873 		}
2874 	}
2875 #endif /* CONFIG_BT_PERIPHERAL */
2876 
2877 	ret_cb = TICKER_STATUS_BUSY;
2878 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
2879 			  TICKER_ID_ADV_BASE + handle,
2880 			  ull_ticker_status_give, (void *)&ret_cb);
2881 	ret = ull_ticker_status_take(ret, &ret_cb);
2882 	if (ret) {
2883 		mark = ull_disable_unmark(adv);
2884 		LL_ASSERT(mark == adv);
2885 
2886 		return BT_HCI_ERR_CMD_DISALLOWED;
2887 	}
2888 
2889 	err = ull_disable(&adv->lll);
2890 	LL_ASSERT(!err || (err == -EALREADY));
2891 
2892 	mark = ull_disable_unmark(adv);
2893 	LL_ASSERT(mark == adv);
2894 
2895 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2896 	struct lll_adv_aux *lll_aux = adv->lll.aux;
2897 
2898 	if (lll_aux) {
2899 		struct ll_adv_aux_set *aux;
2900 
2901 		aux = HDR_LLL2ULL(lll_aux);
2902 
2903 		err = ull_adv_aux_stop(aux);
2904 		if (err && (err != -EALREADY)) {
2905 			return BT_HCI_ERR_CMD_DISALLOWED;
2906 		}
2907 	}
2908 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2909 
2910 #if defined(CONFIG_BT_PERIPHERAL)
2911 	if (adv->lll.conn) {
2912 		conn_release(adv);
2913 	}
2914 #endif /* CONFIG_BT_PERIPHERAL */
2915 
2916 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2917 	struct lll_adv *lll = &adv->lll;
2918 
2919 	if (lll->node_rx_adv_term) {
2920 		struct node_rx_pdu *node_rx_adv_term =
2921 			(void *)lll->node_rx_adv_term;
2922 
2923 		lll->node_rx_adv_term = NULL;
2924 
2925 		ll_rx_link_release(node_rx_adv_term->hdr.link);
2926 		ll_rx_release(node_rx_adv_term);
2927 	}
2928 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2929 
2930 	adv->is_enabled = 0U;
2931 
2932 #if defined(CONFIG_BT_CTLR_PRIVACY)
2933 	if (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled_get(0)) {
2934 		ull_filter_adv_scan_state_cb(0);
2935 	}
2936 #endif /* CONFIG_BT_CTLR_PRIVACY */
2937 
2938 	return 0;
2939 }
2940 
2941 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
2942 					struct pdu_adv *pdu,
2943 					struct pdu_adv *pdu_scan)
2944 {
2945 	struct pdu_adv *pdu_adv_to_update;
2946 	struct lll_adv *lll;
2947 
2948 	pdu_adv_to_update = NULL;
2949 	lll = &adv->lll;
2950 
2951 	if (0) {
2952 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2953 	} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2954 		struct pdu_adv_com_ext_adv *pri_com_hdr;
2955 		struct pdu_adv_ext_hdr pri_hdr_flags;
2956 		struct pdu_adv_ext_hdr *pri_hdr;
2957 
2958 		pri_com_hdr = (void *)&pdu->adv_ext_ind;
2959 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
2960 		if (pri_com_hdr->ext_hdr_len) {
2961 			pri_hdr_flags = *pri_hdr;
2962 		} else {
2963 			*(uint8_t *)&pri_hdr_flags = 0U;
2964 		}
2965 
2966 		if (pri_com_hdr->adv_mode & BT_HCI_LE_ADV_PROP_SCAN) {
2967 			struct pdu_adv *sr = lll_adv_scan_rsp_peek(lll);
2968 
2969 			if (!sr->len) {
2970 				return BT_HCI_ERR_CMD_DISALLOWED;
2971 			}
2972 		}
2973 
2974 		/* AdvA, fill here at enable */
2975 		if (pri_hdr_flags.adv_addr) {
2976 			pdu_adv_to_update = pdu;
2977 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2978 		} else if (pri_hdr_flags.aux_ptr) {
2979 			struct pdu_adv_com_ext_adv *sec_com_hdr;
2980 			struct pdu_adv_ext_hdr sec_hdr_flags;
2981 			struct pdu_adv_ext_hdr *sec_hdr;
2982 			struct pdu_adv *sec_pdu;
2983 
2984 			sec_pdu = lll_adv_aux_data_peek(lll->aux);
2985 
2986 			sec_com_hdr = (void *)&sec_pdu->adv_ext_ind;
2987 			sec_hdr = (void *)sec_com_hdr->ext_hdr_adv_data;
2988 			if (sec_com_hdr->ext_hdr_len) {
2989 				sec_hdr_flags = *sec_hdr;
2990 			} else {
2991 				*(uint8_t *)&sec_hdr_flags = 0U;
2992 			}
2993 
2994 			if (sec_hdr_flags.adv_addr) {
2995 				pdu_adv_to_update = sec_pdu;
2996 			}
2997 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2998 		}
2999 #endif /* CONFIG_BT_CTLR_ADV_EXT */
3000 	} else {
3001 		pdu_adv_to_update = pdu;
3002 	}
3003 
3004 	if (pdu_adv_to_update) {
3005 		const uint8_t *adv_addr;
3006 
3007 		adv_addr = ull_adv_pdu_update_addrs(adv, pdu_adv_to_update);
3008 
3009 		/* In case the local IRK was not set or no match was
3010 		 * found the fallback address was used instead, check
3011 		 * that a valid address has been set.
3012 		 */
3013 		if (pdu_adv_to_update->tx_addr &&
3014 		    !mem_nz((void *)adv_addr, BDADDR_SIZE)) {
3015 			return BT_HCI_ERR_INVALID_PARAM;
3016 		}
3017 
3018 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3019 		/* Do not update scan response for extended non-scannable since
3020 		 * there may be no scan response set.
3021 		 */
3022 		if ((pdu->type != PDU_ADV_TYPE_EXT_IND) ||
3023 		    (pdu->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_SCAN)) {
3024 #else
3025 		if (1) {
3026 #endif
3027 			ull_adv_pdu_update_addrs(adv, pdu_scan);
3028 		}
3029 
3030 	}
3031 
3032 	return 0;
3033 }
3034 
3035 static inline uint8_t *adv_pdu_adva_get(struct pdu_adv *pdu)
3036 {
3037 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3038 	struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
3039 	struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
3040 	struct pdu_adv_ext_hdr hdr_flags;
3041 
3042 	if (com_hdr->ext_hdr_len) {
3043 		hdr_flags = *hdr;
3044 	} else {
3045 		*(uint8_t *)&hdr_flags = 0U;
3046 	}
3047 
3048 	/* All extended PDUs have AdvA at the same offset in common header */
3049 	if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
3050 		LL_ASSERT(hdr_flags.adv_addr);
3051 
3052 		return &com_hdr->ext_hdr_adv_data[1];
3053 	}
3054 #endif
3055 
3056 	/* All legacy PDUs have AdvA at the same offset */
3057 	return pdu->adv_ind.addr;
3058 }
3059 
3060 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
3061 {
3062 #if defined(CONFIG_BT_CTLR_PRIVACY)
3063 	const uint8_t *rpa = ull_filter_adva_get(adv->lll.rl_idx);
3064 #else
3065 	const uint8_t *rpa = NULL;
3066 #endif
3067 	const uint8_t *own_id_addr;
3068 	const uint8_t *tx_addr;
3069 	uint8_t *adv_addr;
3070 
3071 	if (!rpa || IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)) {
3072 		if (0) {
3073 #if defined(CONFIG_BT_CTLR_ADV_EXT)
3074 		} else if (ll_adv_cmds_is_ext() && pdu->tx_addr) {
3075 			own_id_addr = adv->rnd_addr;
3076 #endif
3077 		} else {
3078 			own_id_addr = ll_addr_get(pdu->tx_addr);
3079 		}
3080 	}
3081 
3082 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
3083 	(void)memcpy(adv->own_id_addr, own_id_addr, BDADDR_SIZE);
3084 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
3085 
3086 	if (rpa) {
3087 		pdu->tx_addr = 1;
3088 		tx_addr = rpa;
3089 	} else {
3090 		tx_addr = own_id_addr;
3091 	}
3092 
3093 	adv_addr = adv_pdu_adva_get(pdu);
3094 	memcpy(adv_addr, tx_addr, BDADDR_SIZE);
3095 
3096 	return adv_addr;
3097 }
3098 
3099 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
3100 {
3101 #if defined(CONFIG_BT_CTLR_PRIVACY)
3102 	const uint8_t *rx_addr = NULL;
3103 	uint8_t *tgt_addr;
3104 
3105 	rx_addr = ull_filter_tgta_get(adv->lll.rl_idx);
3106 	if (rx_addr) {
3107 		pdu->rx_addr = 1;
3108 
3109 		/* TargetA always follows AdvA in all PDUs */
3110 		tgt_addr = adv_pdu_adva_get(pdu) + BDADDR_SIZE;
3111 		memcpy(tgt_addr, rx_addr, BDADDR_SIZE);
3112 	}
3113 #endif
3114 
3115 	/* NOTE: identity TargetA is set when configuring advertising set, no
3116 	 *       need to update if LL Privacy is not supported.
3117 	 */
3118 }
3119 
3120 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type)
3121 {
3122 	/* TODO: Add support for extended advertising PDU if needed */
3123 	pdu->type = pdu_type;
3124 	pdu->rfu = 0;
3125 	pdu->chan_sel = 0;
3126 	pdu->tx_addr = 0;
3127 	pdu->rx_addr = 0;
3128 	pdu->len = BDADDR_SIZE;
3129 }
3130 
3131 static void init_set(struct ll_adv_set *adv)
3132 {
3133 	adv->interval = BT_LE_ADV_INTERVAL_DEFAULT;
3134 #if defined(CONFIG_BT_CTLR_PRIVACY)
3135 	adv->own_addr_type = BT_HCI_OWN_ADDR_RPA_OR_PUBLIC;
3136 #endif /* CONFIG_BT_CTLR_PRIVACY */
3137 	adv->lll.chan_map = BT_LE_ADV_CHAN_MAP_ALL;
3138 	adv->lll.filter_policy = BT_LE_ADV_FP_NO_FILTER;
3139 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
3140 	adv->delay = 0U;
3141 #endif /* ONFIG_BT_CTLR_JIT_SCHEDULING */
3142 
3143 	init_pdu(lll_adv_data_peek(&ll_adv[0].lll), PDU_ADV_TYPE_ADV_IND);
3144 
3145 #if !defined(CONFIG_BT_CTLR_ADV_EXT)
3146 	init_pdu(lll_adv_scan_rsp_peek(&ll_adv[0].lll), PDU_ADV_TYPE_SCAN_RSP);
3147 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
3148 }
3149