1 /*
2 * Copyright (c) 2016-2021 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <string.h>
9
10 #include <zephyr/kernel.h>
11 #include <soc.h>
12 #include <zephyr/bluetooth/hci_types.h>
13 #include <zephyr/sys/byteorder.h>
14
15 #include "hal/cpu.h"
16 #include "hal/ccm.h"
17 #include "hal/radio.h"
18 #include "hal/ticker.h"
19 #include "hal/cntr.h"
20
21 #include "util/util.h"
22 #include "util/mem.h"
23 #include "util/memq.h"
24 #include "util/mayfly.h"
25 #include "util/dbuf.h"
26
27 #include "ticker/ticker.h"
28
29 #include "pdu_df.h"
30 #include "lll/pdu_vendor.h"
31 #include "pdu.h"
32
33 #include "lll.h"
34 #include "lll_clock.h"
35 #include "lll/lll_vendor.h"
36 #include "lll/lll_adv_types.h"
37 #include "lll_adv.h"
38 #include "lll/lll_adv_pdu.h"
39 #include "lll_scan.h"
40 #include "lll/lll_df_types.h"
41 #include "lll_conn.h"
42 #include "lll_filter.h"
43 #include "lll_conn_iso.h"
44
45 #include "ll_sw/ull_tx_queue.h"
46
47 #include "ull_adv_types.h"
48 #include "ull_scan_types.h"
49 #include "ull_conn_types.h"
50 #include "ull_filter.h"
51
52 #include "ull_adv_internal.h"
53 #include "ull_scan_internal.h"
54 #include "ull_conn_internal.h"
55 #include "ull_internal.h"
56
57 #include "ll.h"
58 #include "ll_feat.h"
59 #include "ll_settings.h"
60
61 #include "ll_sw/isoal.h"
62 #include "ll_sw/ull_iso_types.h"
63 #include "ll_sw/ull_conn_iso_types.h"
64
65 #include "ll_sw/ull_llcp.h"
66
67
68 #include "hal/debug.h"
69
70 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle);
71 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv);
72
73 static int init_reset(void);
74 static inline struct ll_adv_set *is_disabled_get(uint8_t handle);
75 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
76 uint8_t adv_chn_cnt, uint8_t phy,
77 uint8_t phy_flags);
78
79 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
80 uint32_t remainder, uint16_t lazy, uint8_t force,
81 void *param);
82 static void ticker_update_op_cb(uint32_t status, void *param);
83
84 #if defined(CONFIG_BT_PERIPHERAL)
85 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
86 uint32_t remainder, uint16_t lazy, uint8_t force,
87 void *param);
88 static void ticker_stop_op_cb(uint32_t status, void *param);
89 static void adv_disable(void *param);
90 static void disabled_cb(void *param);
91 static void conn_release(struct ll_adv_set *adv);
92 #endif /* CONFIG_BT_PERIPHERAL */
93
94 #if defined(CONFIG_BT_CTLR_ADV_EXT)
95 static uint8_t leg_adv_type_get(uint8_t evt_prop);
96 static void adv_max_events_duration_set(struct ll_adv_set *adv,
97 uint16_t duration,
98 uint8_t max_ext_adv_evts);
99 static void ticker_stop_aux_op_cb(uint32_t status, void *param);
100 static void aux_disable(void *param);
101 static void aux_disabled_cb(void *param);
102 static void ticker_stop_ext_op_cb(uint32_t status, void *param);
103 static void ext_disable(void *param);
104 static void ext_disabled_cb(void *param);
105 #endif /* CONFIG_BT_CTLR_ADV_EXT */
106
107 static inline uint8_t disable(uint8_t handle);
108
109 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
110 struct pdu_adv *pdu,
111 struct pdu_adv *pdu_scan);
112 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
113 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu);
114
115 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type);
116 static void init_set(struct ll_adv_set *adv);
117
118 static struct ll_adv_set ll_adv[BT_CTLR_ADV_SET];
119
120 #if defined(CONFIG_BT_TICKER_EXT)
121 static struct ticker_ext ll_adv_ticker_ext[BT_CTLR_ADV_SET];
122 #endif /* CONFIG_BT_TICKER_EXT */
123
124 #if defined(CONFIG_BT_HCI_RAW) && defined(CONFIG_BT_CTLR_ADV_EXT)
125 static uint8_t ll_adv_cmds;
126
ll_adv_cmds_set(uint8_t adv_cmds)127 int ll_adv_cmds_set(uint8_t adv_cmds)
128 {
129 if (!ll_adv_cmds) {
130 ll_adv_cmds = adv_cmds;
131
132 if (adv_cmds == LL_ADV_CMDS_LEGACY) {
133 struct ll_adv_set *adv = &ll_adv[0];
134
135 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
136 adv->hci_handle = 0;
137 #endif
138 adv->is_created = 1;
139 }
140 }
141
142 if (ll_adv_cmds != adv_cmds) {
143 return -EINVAL;
144 }
145
146 return 0;
147 }
148
ll_adv_cmds_is_ext(void)149 int ll_adv_cmds_is_ext(void)
150 {
151 return ll_adv_cmds == LL_ADV_CMDS_EXT;
152 }
153 #endif
154
155 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_set_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)156 uint8_t ll_adv_set_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
157 {
158 struct ll_adv_set *adv;
159 uint8_t idx;
160
161 adv = &ll_adv[0];
162
163 for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
164 if (adv->is_created && (adv->hci_handle == hci_handle)) {
165 *handle = idx;
166 return 0;
167 }
168 }
169
170 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
171 }
172
ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle,uint8_t * handle)173 uint8_t ll_adv_set_by_hci_handle_get_or_new(uint8_t hci_handle, uint8_t *handle)
174 {
175 struct ll_adv_set *adv, *adv_empty;
176 uint8_t idx;
177
178 adv = &ll_adv[0];
179 adv_empty = NULL;
180
181 for (idx = 0U; idx < BT_CTLR_ADV_SET; idx++, adv++) {
182 if (adv->is_created) {
183 if (adv->hci_handle == hci_handle) {
184 *handle = idx;
185 return 0;
186 }
187 } else if (!adv_empty) {
188 adv_empty = adv;
189 }
190 }
191
192 if (adv_empty) {
193 adv_empty->hci_handle = hci_handle;
194 *handle = ull_adv_handle_get(adv_empty);
195 return 0;
196 }
197
198 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
199 }
200
ll_adv_set_hci_handle_get(uint8_t handle)201 uint8_t ll_adv_set_hci_handle_get(uint8_t handle)
202 {
203 struct ll_adv_set *adv;
204
205 adv = ull_adv_set_get(handle);
206 LL_ASSERT(adv && adv->is_created);
207
208 return adv->hci_handle;
209 }
210 #endif
211
212 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_adv_params_set(uint8_t handle,uint16_t evt_prop,uint32_t interval,uint8_t adv_type,uint8_t own_addr_type,uint8_t direct_addr_type,uint8_t const * const direct_addr,uint8_t chan_map,uint8_t filter_policy,uint8_t * const tx_pwr,uint8_t phy_p,uint8_t skip,uint8_t phy_s,uint8_t sid,uint8_t sreq)213 uint8_t ll_adv_params_set(uint8_t handle, uint16_t evt_prop, uint32_t interval,
214 uint8_t adv_type, uint8_t own_addr_type,
215 uint8_t direct_addr_type, uint8_t const *const direct_addr,
216 uint8_t chan_map, uint8_t filter_policy,
217 uint8_t *const tx_pwr, uint8_t phy_p, uint8_t skip,
218 uint8_t phy_s, uint8_t sid, uint8_t sreq)
219 {
220 uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
221 PDU_ADV_TYPE_DIRECT_IND,
222 PDU_ADV_TYPE_SCAN_IND,
223 PDU_ADV_TYPE_NONCONN_IND,
224 PDU_ADV_TYPE_DIRECT_IND,
225 PDU_ADV_TYPE_EXT_IND};
226 uint8_t is_pdu_type_changed = 0;
227 uint8_t is_new_set;
228 #else /* !CONFIG_BT_CTLR_ADV_EXT */
229 uint8_t ll_adv_params_set(uint16_t interval, uint8_t adv_type,
230 uint8_t own_addr_type, uint8_t direct_addr_type,
231 uint8_t const *const direct_addr, uint8_t chan_map,
232 uint8_t filter_policy)
233 {
234 uint8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
235 PDU_ADV_TYPE_DIRECT_IND,
236 PDU_ADV_TYPE_SCAN_IND,
237 PDU_ADV_TYPE_NONCONN_IND,
238 PDU_ADV_TYPE_DIRECT_IND};
239 uint8_t const handle = 0;
240 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
241
242 struct ll_adv_set *adv;
243 uint8_t pdu_type_prev;
244 struct pdu_adv *pdu;
245
246 adv = is_disabled_get(handle);
247 if (!adv) {
248 return BT_HCI_ERR_CMD_DISALLOWED;
249 }
250
251 #if defined(CONFIG_BT_CTLR_ADV_EXT)
252 /* TODO: check and fail (0x12, invalid HCI cmd param) if invalid
253 * evt_prop bits.
254 */
255
256 /* Extended adv param set command used */
257 if (adv_type == PDU_ADV_TYPE_EXT_IND) {
258 /* legacy */
259 if (evt_prop & BT_HCI_LE_ADV_PROP_LEGACY) {
260 if (evt_prop & BT_HCI_LE_ADV_PROP_ANON) {
261 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
262 }
263
264 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
265 /* disallow changing to legacy advertising while
266 * periodic advertising enabled.
267 */
268 if (adv->lll.sync) {
269 const struct ll_adv_sync_set *sync;
270
271 sync = HDR_LLL2ULL(adv->lll.sync);
272 if (sync->is_enabled) {
273 return BT_HCI_ERR_INVALID_PARAM;
274 }
275 }
276 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
277
278 adv_type = leg_adv_type_get(evt_prop);
279
280 adv->lll.phy_p = PHY_1M;
281 } else {
282 /* - Connectable and scannable not allowed;
283 * - High duty cycle directed connectable not allowed
284 */
285 if (((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
286 BT_HCI_LE_ADV_PROP_SCAN)) ==
287 (BT_HCI_LE_ADV_PROP_CONN |
288 BT_HCI_LE_ADV_PROP_SCAN)) ||
289 (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN)) {
290 return BT_HCI_ERR_INVALID_PARAM;
291 }
292
293 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
294 if (adv->lll.sync &&
295 (evt_prop & (BT_HCI_LE_ADV_PROP_ANON |
296 BT_HCI_LE_ADV_PROP_CONN |
297 BT_HCI_LE_ADV_PROP_SCAN))) {
298 const struct ll_adv_sync_set *sync;
299
300 sync = HDR_LLL2ULL(adv->lll.sync);
301 if (sync->is_enabled) {
302 return BT_HCI_ERR_INVALID_PARAM;
303 }
304 }
305 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
306
307 #if (CONFIG_BT_CTLR_ADV_AUX_SET == 0)
308 /* Connectable or scannable requires aux */
309 if (evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
310 BT_HCI_LE_ADV_PROP_SCAN)) {
311 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
312 }
313 #endif
314
315 adv_type = 0x05; /* index of PDU_ADV_TYPE_EXT_IND in */
316 /* pdu_adv_type[] */
317
318 adv->lll.phy_p = phy_p;
319 adv->lll.phy_flags = PHY_FLAGS_S8;
320 }
321 } else {
322 adv->lll.phy_p = PHY_1M;
323 }
324
325 is_new_set = !adv->is_created;
326 adv->is_created = 1;
327 adv->is_ad_data_cmplt = 1U;
328 #endif /* CONFIG_BT_CTLR_ADV_EXT */
329
330 /* remember parameters so that set adv/scan data and adv enable
331 * interface can correctly update adv/scan data in the
332 * double buffer between caller and controller context.
333 */
334 /* Set interval for Undirected or Low Duty Cycle Directed Advertising */
335 if (adv_type != 0x01) {
336 adv->interval = interval;
337 } else {
338 adv->interval = 0;
339 }
340 adv->lll.chan_map = chan_map;
341 adv->lll.filter_policy = filter_policy;
342
343 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
344 adv->lll.scan_req_notify = sreq;
345 #endif
346
347 /* update the "current" primary adv PDU */
348 pdu = lll_adv_data_peek(&adv->lll);
349 pdu_type_prev = pdu->type;
350 #if defined(CONFIG_BT_CTLR_ADV_EXT)
351 if (is_new_set) {
352 is_pdu_type_changed = 1;
353
354 pdu->type = pdu_adv_type[adv_type];
355 if (pdu->type != PDU_ADV_TYPE_EXT_IND) {
356 pdu->len = 0U;
357 }
358 /* check if new PDU type is different that past one */
359 } else if (pdu->type != pdu_adv_type[adv_type]) {
360 is_pdu_type_changed = 1;
361
362 /* If old PDU was extended advertising PDU, release
363 * auxiliary and periodic advertising sets.
364 */
365 if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
366 struct lll_adv_aux *lll_aux = adv->lll.aux;
367
368 if (lll_aux) {
369 struct ll_adv_aux_set *aux;
370
371 /* FIXME: copy AD data from auxiliary channel
372 * PDU.
373 */
374 pdu->len = 0;
375
376 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
377 if (adv->lll.sync) {
378 struct ll_adv_sync_set *sync;
379
380 sync = HDR_LLL2ULL(adv->lll.sync);
381 adv->lll.sync = NULL;
382
383 ull_adv_sync_release(sync);
384 }
385 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
386
387 /* Release auxiliary channel set */
388 aux = HDR_LLL2ULL(lll_aux);
389 adv->lll.aux = NULL;
390
391 ull_adv_aux_release(aux);
392 } else {
393 /* No previous AD data in auxiliary channel
394 * PDU.
395 */
396 pdu->len = 0;
397 }
398 }
399
400 pdu->type = pdu_adv_type[adv_type];
401 }
402
403 #else /* !CONFIG_BT_CTLR_ADV_EXT */
404 pdu->type = pdu_adv_type[adv_type];
405 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
406
407 pdu->rfu = 0;
408
409 if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2) &&
410 ((pdu->type == PDU_ADV_TYPE_ADV_IND) ||
411 (pdu->type == PDU_ADV_TYPE_DIRECT_IND))) {
412 pdu->chan_sel = 1;
413 } else {
414 pdu->chan_sel = 0;
415 }
416
417 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
418 /* Backup the legacy AD Data if switching to legacy directed advertising
419 * or to Extended Advertising.
420 */
421 if (((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
422 (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
423 (pdu->type == PDU_ADV_TYPE_EXT_IND))) &&
424 (pdu_type_prev != PDU_ADV_TYPE_DIRECT_IND) &&
425 (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
426 (pdu_type_prev != PDU_ADV_TYPE_EXT_IND))) {
427 if (pdu->len == 0U) {
428 adv->ad_data_backup.len = 0U;
429 } else {
430 LL_ASSERT(pdu->len >=
431 offsetof(struct pdu_adv_adv_ind, data));
432
433 adv->ad_data_backup.len = pdu->len -
434 offsetof(struct pdu_adv_adv_ind, data);
435 memcpy(adv->ad_data_backup.data, pdu->adv_ind.data,
436 adv->ad_data_backup.len);
437 }
438 }
439 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
440
441 #if defined(CONFIG_BT_CTLR_PRIVACY)
442 adv->own_addr_type = own_addr_type;
443 if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
444 adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) {
445 adv->peer_addr_type = direct_addr_type;
446 memcpy(&adv->peer_addr, direct_addr, BDADDR_SIZE);
447 }
448 #endif /* CONFIG_BT_CTLR_PRIVACY */
449
450 if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
451 pdu->tx_addr = own_addr_type & 0x1;
452 pdu->rx_addr = direct_addr_type;
453 memcpy(&pdu->direct_ind.tgt_addr[0], direct_addr, BDADDR_SIZE);
454 pdu->len = sizeof(struct pdu_adv_direct_ind);
455
456 #if defined(CONFIG_BT_CTLR_ADV_EXT)
457 } else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
458 struct pdu_adv_ext_hdr *pri_hdr, pri_hdr_prev;
459 struct pdu_adv_com_ext_adv *pri_com_hdr;
460 uint8_t *pri_dptr_prev, *pri_dptr;
461 uint8_t len;
462
463 pri_com_hdr = (void *)&pdu->adv_ext_ind;
464 pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
465 pri_dptr = pri_hdr->data;
466 pri_dptr_prev = pri_dptr;
467
468 /* No ACAD and no AdvData */
469 pri_com_hdr->adv_mode = evt_prop & 0x03;
470
471 /* Zero-init header flags */
472 if (is_pdu_type_changed) {
473 *(uint8_t *)&pri_hdr_prev = 0U;
474 } else {
475 pri_hdr_prev = *pri_hdr;
476 }
477 *(uint8_t *)pri_hdr = 0U;
478
479 /* AdvA flag */
480 if (pri_hdr_prev.adv_addr) {
481 pri_dptr_prev += BDADDR_SIZE;
482 }
483 if (!pri_com_hdr->adv_mode &&
484 !(evt_prop & BT_HCI_LE_ADV_PROP_ANON) &&
485 (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
486 /* TODO: optional on 1M with Aux Ptr */
487 pri_hdr->adv_addr = 1;
488
489 /* NOTE: AdvA is filled at enable */
490 pdu->tx_addr = own_addr_type & 0x1;
491 pri_dptr += BDADDR_SIZE;
492 } else {
493 pdu->tx_addr = 0;
494 }
495
496 /* TargetA flag */
497 if (pri_hdr_prev.tgt_addr) {
498 pri_dptr_prev += BDADDR_SIZE;
499 }
500 /* TargetA flag in primary channel PDU only for directed */
501 if (evt_prop & BT_HCI_LE_ADV_PROP_DIRECT) {
502 pri_hdr->tgt_addr = 1;
503 pdu->rx_addr = direct_addr_type;
504 pri_dptr += BDADDR_SIZE;
505 } else {
506 pdu->rx_addr = 0;
507 }
508
509 /* No CTEInfo flag in primary channel PDU */
510
511 /* ADI flag */
512 if (pri_hdr_prev.adi) {
513 pri_dptr_prev += sizeof(struct pdu_adv_adi);
514
515 pri_hdr->adi = 1;
516 pri_dptr += sizeof(struct pdu_adv_adi);
517 }
518
519 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
520 /* AuxPtr flag */
521 if (pri_hdr_prev.aux_ptr) {
522 pri_dptr_prev += sizeof(struct pdu_adv_aux_ptr);
523 }
524 /* Need aux for connectable or scannable extended advertising */
525 if (pri_hdr_prev.aux_ptr ||
526 ((evt_prop & (BT_HCI_LE_ADV_PROP_CONN |
527 BT_HCI_LE_ADV_PROP_SCAN)))) {
528 pri_hdr->aux_ptr = 1;
529 pri_dptr += sizeof(struct pdu_adv_aux_ptr);
530 }
531 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
532
533 /* No SyncInfo flag in primary channel PDU */
534
535 /* Tx Power flag */
536 if (pri_hdr_prev.tx_pwr) {
537 pri_dptr_prev += sizeof(uint8_t);
538 }
539 /* C1, Tx Power is optional on the LE 1M PHY, and reserved for
540 * for future use on the LE Coded PHY.
541 */
542 if ((evt_prop & BT_HCI_LE_ADV_PROP_TX_POWER) &&
543 (!pri_hdr_prev.aux_ptr || (phy_p != PHY_CODED))) {
544 pri_hdr->tx_pwr = 1;
545 pri_dptr += sizeof(uint8_t);
546 }
547
548 /* Calc primary PDU len */
549 len = ull_adv_aux_hdr_len_calc(pri_com_hdr, &pri_dptr);
550 ull_adv_aux_hdr_len_fill(pri_com_hdr, len);
551
552 /* Set PDU length */
553 pdu->len = len;
554
555 /* Start filling primary PDU payload based on flags */
556
557 /* No AdvData in primary channel PDU */
558
559 /* No ACAD in primary channel PDU */
560
561 /* Tx Power */
562 if (pri_hdr_prev.tx_pwr) {
563 pri_dptr_prev -= sizeof(uint8_t);
564 }
565 if (pri_hdr->tx_pwr) {
566 uint8_t _tx_pwr;
567
568 _tx_pwr = 0;
569 if (tx_pwr) {
570 if (*tx_pwr != BT_HCI_LE_ADV_TX_POWER_NO_PREF) {
571 _tx_pwr = *tx_pwr;
572 } else {
573 *tx_pwr = _tx_pwr;
574 }
575 }
576
577 pri_dptr -= sizeof(uint8_t);
578 *pri_dptr = _tx_pwr;
579 }
580
581 /* No SyncInfo in primary channel PDU */
582
583 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
584 /* AuxPtr */
585 if (pri_hdr_prev.aux_ptr) {
586 pri_dptr_prev -= sizeof(struct pdu_adv_aux_ptr);
587 }
588 if (pri_hdr->aux_ptr) {
589 pri_dptr -= sizeof(struct pdu_adv_aux_ptr);
590 ull_adv_aux_ptr_fill((void *)pri_dptr, 0U, phy_s);
591 }
592 adv->lll.phy_s = phy_s;
593 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
594
595 /* ADI */
596 if (pri_hdr_prev.adi) {
597 pri_dptr_prev -= sizeof(struct pdu_adv_adi);
598 }
599 if (pri_hdr->adi) {
600 struct pdu_adv_adi *adi;
601
602 pri_dptr -= sizeof(struct pdu_adv_adi);
603
604 /* NOTE: memmove shall handle overlapping buffers */
605 memmove(pri_dptr, pri_dptr_prev,
606 sizeof(struct pdu_adv_adi));
607
608 adi = (void *)pri_dptr;
609 PDU_ADV_ADI_SID_SET(adi, sid);
610 }
611 adv->sid = sid;
612
613 /* No CTEInfo field in primary channel PDU */
614
615 /* TargetA */
616 if (pri_hdr_prev.tgt_addr) {
617 pri_dptr_prev -= BDADDR_SIZE;
618 }
619 if (pri_hdr->tgt_addr) {
620 pri_dptr -= BDADDR_SIZE;
621 /* NOTE: RPA will be updated on enable, if needed */
622 memcpy(pri_dptr, direct_addr, BDADDR_SIZE);
623 }
624
625 /* NOTE: AdvA, filled at enable and RPA timeout */
626
627 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
628 /* Make sure aux is created if we have AuxPtr */
629 if (pri_hdr->aux_ptr) {
630 uint8_t pri_idx, sec_idx;
631 uint8_t err;
632
633 err = ull_adv_aux_hdr_set_clear(adv,
634 ULL_ADV_PDU_HDR_FIELD_ADVA,
635 0U, &own_addr_type,
636 &pri_idx, &sec_idx);
637 if (err) {
638 /* TODO: cleanup? */
639 return err;
640 }
641
642 lll_adv_aux_data_enqueue(adv->lll.aux, sec_idx);
643 lll_adv_data_enqueue(&adv->lll, pri_idx);
644 }
645 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
646
647 #endif /* CONFIG_BT_CTLR_ADV_EXT */
648
649 } else if (pdu->len == 0) {
650 pdu->tx_addr = own_addr_type & 0x1;
651 pdu->rx_addr = 0;
652 pdu->len = BDADDR_SIZE;
653 } else {
654
655 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
656 if (((pdu_type_prev == PDU_ADV_TYPE_DIRECT_IND) ||
657 (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
658 (pdu_type_prev == PDU_ADV_TYPE_EXT_IND))) &&
659 (pdu->type != PDU_ADV_TYPE_DIRECT_IND) &&
660 (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
661 (pdu->type != PDU_ADV_TYPE_EXT_IND))) {
662 /* Restore the legacy AD Data */
663 memcpy(pdu->adv_ind.data, adv->ad_data_backup.data,
664 adv->ad_data_backup.len);
665 pdu->len = offsetof(struct pdu_adv_adv_ind, data) +
666 adv->ad_data_backup.len;
667 }
668 #endif /* CONFIG_BT_CTLR_AD_DATA_BACKUP */
669
670 pdu->tx_addr = own_addr_type & 0x1;
671 pdu->rx_addr = 0;
672 }
673
674 /* Initialize LLL header with parent pointer so that ULL contexts
675 * can be referenced in functions having the LLL context reference.
676 */
677 lll_hdr_init(&adv->lll, adv);
678
679 if (0) {
680 #if defined(CONFIG_BT_CTLR_ADV_EXT)
681 } else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
682 /* Make sure new extended advertising set is initialized with no
683 * scan response data. Existing sets keep whatever data was set.
684 */
685 if (is_pdu_type_changed) {
686 uint8_t err;
687
688 /* Make sure the scan response PDU is allocated from the right pool */
689 (void)lll_adv_data_release(&adv->lll.scan_rsp);
690 lll_adv_data_reset(&adv->lll.scan_rsp);
691 err = lll_adv_aux_data_init(&adv->lll.scan_rsp);
692 if (err) {
693 return err;
694 }
695
696 pdu = lll_adv_scan_rsp_peek(&adv->lll);
697 pdu->type = PDU_ADV_TYPE_AUX_SCAN_RSP;
698 pdu->len = 0;
699 }
700 #endif /* CONFIG_BT_CTLR_ADV_EXT */
701 } else {
702 pdu = lll_adv_scan_rsp_peek(&adv->lll);
703
704 #if defined(CONFIG_BT_CTLR_ADV_EXT)
705 if (is_pdu_type_changed || !pdu) {
706 uint8_t err;
707
708 /* Make sure the scan response PDU is allocated from the right pool */
709 (void)lll_adv_data_release(&adv->lll.scan_rsp);
710 lll_adv_data_reset(&adv->lll.scan_rsp);
711 err = lll_adv_data_init(&adv->lll.scan_rsp);
712 if (err) {
713 return err;
714 }
715
716 pdu = lll_adv_scan_rsp_peek(&adv->lll);
717 }
718 #endif /* CONFIG_BT_CTLR_ADV_EXT */
719
720 /* Make sure legacy advertising set has scan response data
721 * initialized.
722 */
723 pdu->type = PDU_ADV_TYPE_SCAN_RSP;
724 pdu->rfu = 0;
725 pdu->chan_sel = 0;
726 pdu->tx_addr = own_addr_type & 0x1;
727 pdu->rx_addr = 0;
728 if (pdu->len == 0) {
729 pdu->len = BDADDR_SIZE;
730 }
731 }
732
733 return 0;
734 }
735
736 #if defined(CONFIG_BT_CTLR_ADV_EXT)
737 uint8_t ll_adv_data_set(uint8_t handle, uint8_t len, uint8_t const *const data)
738 {
739 #else /* !CONFIG_BT_CTLR_ADV_EXT */
740 uint8_t ll_adv_data_set(uint8_t len, uint8_t const *const data)
741 {
742 const uint8_t handle = 0;
743 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
744 struct ll_adv_set *adv;
745
746 adv = ull_adv_set_get(handle);
747 if (!adv) {
748 return BT_HCI_ERR_CMD_DISALLOWED;
749 }
750
751 return ull_adv_data_set(adv, len, data);
752 }
753
754 #if defined(CONFIG_BT_CTLR_ADV_EXT)
755 uint8_t ll_adv_scan_rsp_set(uint8_t handle, uint8_t len,
756 uint8_t const *const data)
757 {
758 #else /* !CONFIG_BT_CTLR_ADV_EXT */
759 uint8_t ll_adv_scan_rsp_set(uint8_t len, uint8_t const *const data)
760 {
761 const uint8_t handle = 0;
762 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
763 struct ll_adv_set *adv;
764
765 adv = ull_adv_set_get(handle);
766 if (!adv) {
767 return BT_HCI_ERR_CMD_DISALLOWED;
768 }
769
770 return ull_scan_rsp_set(adv, len, data);
771 }
772
773 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
774 #if defined(CONFIG_BT_HCI_MESH_EXT)
775 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
776 uint8_t at_anchor, uint32_t ticks_anchor, uint8_t retry,
777 uint8_t scan_window, uint8_t scan_delay)
778 {
779 #else /* !CONFIG_BT_HCI_MESH_EXT */
780 uint8_t ll_adv_enable(uint8_t handle, uint8_t enable,
781 uint16_t duration, uint8_t max_ext_adv_evts)
782 {
783 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
784 struct ll_adv_sync_set *sync = NULL;
785 uint8_t sync_is_started = 0U;
786 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
787 struct ll_adv_aux_set *aux = NULL;
788 uint8_t aux_is_started = 0U;
789 uint32_t ticks_anchor;
790 #endif /* !CONFIG_BT_HCI_MESH_EXT */
791 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
792 uint8_t ll_adv_enable(uint8_t enable)
793 {
794 uint8_t const handle = 0;
795 uint32_t ticks_anchor;
796 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
797 uint32_t ticks_slot_overhead;
798 uint32_t ticks_slot_offset;
799 uint32_t volatile ret_cb;
800 struct pdu_adv *pdu_scan;
801 struct pdu_adv *pdu_adv;
802 struct ll_adv_set *adv;
803 struct lll_adv *lll;
804 uint8_t hci_err;
805 uint32_t ret;
806
807 if (!enable) {
808 return disable(handle);
809 }
810
811 adv = is_disabled_get(handle);
812 if (!adv) {
813 /* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
814 * Enabling advertising when it is already enabled can cause the
815 * random address to change. As the current implementation does
816 * does not update RPAs on every advertising enable, only on
817 * `rpa_timeout_ms` timeout, we are not going to implement the
818 * "can cause the random address to change" for legacy
819 * advertisements.
820 */
821
822 /* If HCI LE Set Extended Advertising Enable command is sent
823 * again for an advertising set while that set is enabled, the
824 * timer used for duration and the number of events counter are
825 * reset and any change to the random address shall take effect.
826 */
827 if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT) ||
828 IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
829 #if defined(CONFIG_BT_CTLR_ADV_EXT)
830 if (ll_adv_cmds_is_ext()) {
831 enum node_rx_type volatile *type;
832
833 adv = ull_adv_is_enabled_get(handle);
834 if (!adv) {
835 /* This should not be happening as
836 * is_disabled_get failed.
837 */
838 return BT_HCI_ERR_CMD_DISALLOWED;
839 }
840
841 /* Change random address in the primary or
842 * auxiliary PDU as necessary.
843 */
844 lll = &adv->lll;
845 pdu_adv = lll_adv_data_peek(lll);
846 pdu_scan = lll_adv_scan_rsp_peek(lll);
847 hci_err = adv_scan_pdu_addr_update(adv,
848 pdu_adv,
849 pdu_scan);
850 if (hci_err) {
851 return hci_err;
852 }
853
854 if (!adv->lll.node_rx_adv_term) {
855 /* This should not be happening,
856 * adv->is_enabled would be 0 if
857 * node_rx_adv_term is released back to
858 * pool.
859 */
860 return BT_HCI_ERR_CMD_DISALLOWED;
861 }
862
863 /* Check advertising not terminated */
864 type = &adv->lll.node_rx_adv_term->type;
865 if (*type == NODE_RX_TYPE_NONE) {
866 /* Reset event counter, update duration,
867 * and max events
868 */
869 adv_max_events_duration_set(adv,
870 duration, max_ext_adv_evts);
871 }
872
873 /* Check the counter reset did not race with
874 * advertising terminated.
875 */
876 if (*type != NODE_RX_TYPE_NONE) {
877 /* Race with advertising terminated */
878 return BT_HCI_ERR_CMD_DISALLOWED;
879 }
880 }
881 #endif /* CONFIG_BT_CTLR_ADV_EXT */
882
883 return 0;
884 }
885
886 /* Fail on being strict as a legacy controller, valid only under
887 * Bluetooth Specification v4.x.
888 * Bluetooth Specification v5.0 and above shall not fail to
889 * enable already enabled advertising.
890 */
891 return BT_HCI_ERR_CMD_DISALLOWED;
892 }
893
894 lll = &adv->lll;
895
896 #if defined(CONFIG_BT_CTLR_PRIVACY)
897 lll->rl_idx = FILTER_IDX_NONE;
898
899 /* Prepare filter accept list and optionally resolving list */
900 ull_filter_adv_update(lll->filter_policy);
901
902 if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
903 adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) {
904 /* Look up the resolving list */
905 lll->rl_idx = ull_filter_rl_find(adv->peer_addr_type,
906 adv->peer_addr, NULL);
907
908 if (lll->rl_idx != FILTER_IDX_NONE) {
909 /* Generate RPAs if required */
910 ull_filter_rpa_update(false);
911 }
912 }
913 #endif /* !CONFIG_BT_CTLR_PRIVACY */
914
915 pdu_adv = lll_adv_data_peek(lll);
916 pdu_scan = lll_adv_scan_rsp_peek(lll);
917
918 #if defined(CONFIG_BT_CTLR_ADV_EXT)
919 if (!pdu_scan) {
920 uint8_t err;
921
922 if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
923 /* Should never happen */
924 return BT_HCI_ERR_CMD_DISALLOWED;
925 }
926
927 err = lll_adv_data_init(&adv->lll.scan_rsp);
928 if (err) {
929 return err;
930 }
931
932 pdu_scan = lll_adv_scan_rsp_peek(lll);
933 init_pdu(pdu_scan, PDU_ADV_TYPE_SCAN_RSP);
934 }
935 #endif /* CONFIG_BT_CTLR_ADV_EXT */
936
937 /* Update Bluetooth Device address in advertising and scan response
938 * PDUs.
939 */
940 hci_err = adv_scan_pdu_addr_update(adv, pdu_adv, pdu_scan);
941 if (hci_err) {
942 return hci_err;
943 }
944
945 #if defined(CONFIG_BT_HCI_MESH_EXT)
946 if (scan_delay) {
947 if (ull_scan_is_enabled(0)) {
948 return BT_HCI_ERR_CMD_DISALLOWED;
949 }
950
951 lll->is_mesh = 1;
952 }
953 #endif /* CONFIG_BT_HCI_MESH_EXT */
954
955 #if defined(CONFIG_BT_PERIPHERAL)
956 /* prepare connectable advertising */
957 if ((pdu_adv->type == PDU_ADV_TYPE_ADV_IND) ||
958 (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND) ||
959 #if defined(CONFIG_BT_CTLR_ADV_EXT)
960 ((pdu_adv->type == PDU_ADV_TYPE_EXT_IND) &&
961 (pdu_adv->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_CONN))
962 #else
963 0
964 #endif
965 ) {
966 struct node_rx_pdu *node_rx;
967 struct ll_conn *conn;
968 struct lll_conn *conn_lll;
969 void *link;
970 int err;
971
972 if (lll->conn) {
973 return BT_HCI_ERR_CMD_DISALLOWED;
974 }
975
976 link = ll_rx_link_alloc();
977 if (!link) {
978 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
979 }
980
981 node_rx = ll_rx_alloc();
982 if (!node_rx) {
983 ll_rx_link_release(link);
984
985 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
986 }
987
988 conn = ll_conn_acquire();
989 if (!conn) {
990 ll_rx_release(node_rx);
991 ll_rx_link_release(link);
992
993 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
994 }
995
996 conn_lll = &conn->lll;
997 conn_lll->handle = 0xFFFF;
998
999 if (!conn_lll->link_tx_free) {
1000 conn_lll->link_tx_free = &conn_lll->link_tx;
1001 }
1002
1003 memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head,
1004 &conn_lll->memq_tx.tail);
1005 conn_lll->link_tx_free = NULL;
1006
1007 conn_lll->packet_tx_head_len = 0;
1008 conn_lll->packet_tx_head_offset = 0;
1009
1010 conn_lll->sn = 0;
1011 conn_lll->nesn = 0;
1012 conn_lll->empty = 0;
1013
1014 #if defined(CONFIG_BT_CTLR_PHY)
1015 conn_lll->phy_flags = 0;
1016 if (0) {
1017 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1018 } else if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
1019 conn_lll->phy_tx = lll->phy_s;
1020 conn_lll->phy_tx_time = lll->phy_s;
1021 conn_lll->phy_rx = lll->phy_s;
1022 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1023 } else {
1024 conn_lll->phy_tx = PHY_1M;
1025 conn_lll->phy_tx_time = PHY_1M;
1026 conn_lll->phy_rx = PHY_1M;
1027 }
1028 #endif /* CONFIG_BT_CTLR_PHY */
1029
1030 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1031 conn_lll->rssi_latest = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1032 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1033 conn_lll->rssi_reported = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1034 conn_lll->rssi_sample_count = 0;
1035 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1036 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1037
1038 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
1039 conn_lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
1040 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
1041
1042 /* FIXME: BEGIN: Move to ULL? */
1043 conn_lll->role = 1;
1044 conn_lll->periph.initiated = 0;
1045 conn_lll->periph.cancelled = 0;
1046 conn_lll->data_chan_sel = 0;
1047 conn_lll->data_chan_use = 0;
1048 conn_lll->event_counter = 0;
1049
1050 conn_lll->latency_prepare = 0;
1051 conn_lll->latency_event = 0;
1052 conn_lll->periph.latency_enabled = 0;
1053 conn_lll->periph.window_widening_prepare_us = 0;
1054 conn_lll->periph.window_widening_event_us = 0;
1055 conn_lll->periph.window_size_prepare_us = 0;
1056 /* FIXME: END: Move to ULL? */
1057 #if defined(CONFIG_BT_CTLR_CONN_META)
1058 memset(&conn_lll->conn_meta, 0, sizeof(conn_lll->conn_meta));
1059 #endif /* CONFIG_BT_CTLR_CONN_META */
1060 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1061 conn_lll->df_rx_cfg.is_initialized = 0U;
1062 conn_lll->df_rx_cfg.hdr.elem_size = sizeof(struct lll_df_conn_rx_params);
1063 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1064 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX)
1065 conn_lll->df_tx_cfg.is_initialized = 0U;
1066 conn_lll->df_tx_cfg.cte_rsp_en = 0U;
1067 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX */
1068 conn->connect_expire = 6;
1069 conn->supervision_expire = 0;
1070
1071 #if defined(CONFIG_BT_CTLR_LE_PING)
1072 conn->apto_expire = 0U;
1073 conn->appto_expire = 0U;
1074 #endif
1075
1076 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
1077 conn->own_id_addr_type = BT_ADDR_LE_NONE->type;
1078 (void)memcpy(conn->own_id_addr, BT_ADDR_LE_NONE->a.val,
1079 sizeof(conn->own_id_addr));
1080 conn->peer_id_addr_type = BT_ADDR_LE_NONE->type;
1081 (void)memcpy(conn->peer_id_addr, BT_ADDR_LE_NONE->a.val,
1082 sizeof(conn->peer_id_addr));
1083 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1084
1085 /* Re-initialize the control procedure data structures */
1086 ull_llcp_init(conn);
1087
1088 conn->llcp_terminate.reason_final = 0;
1089 /* NOTE: use allocated link for generating dedicated
1090 * terminate ind rx node
1091 */
1092 conn->llcp_terminate.node_rx.hdr.link = link;
1093
1094 #if defined(CONFIG_BT_CTLR_PHY)
1095 conn->phy_pref_tx = ull_conn_default_phy_tx_get();
1096 conn->phy_pref_rx = ull_conn_default_phy_rx_get();
1097 #endif /* CONFIG_BT_CTLR_PHY */
1098
1099 #if defined(CONFIG_BT_CTLR_LE_ENC)
1100 conn->pause_rx_data = 0U;
1101 #endif /* CONFIG_BT_CTLR_LE_ENC */
1102
1103 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1104 uint8_t phy_in_use = PHY_1M;
1105
1106
1107 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1108 if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
1109 phy_in_use = lll->phy_s;
1110 }
1111 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1112
1113 ull_dle_init(conn, phy_in_use);
1114 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1115
1116 /* Re-initialize the Tx Q */
1117 ull_tx_q_init(&conn->tx_q);
1118
1119 /* NOTE: using same link as supplied for terminate ind */
1120 adv->link_cc_free = link;
1121 adv->node_rx_cc_free = node_rx;
1122 lll->conn = conn_lll;
1123
1124 ull_hdr_init(&conn->ull);
1125 lll_hdr_init(&conn->lll, conn);
1126
1127 /* wait for stable clocks */
1128 err = lll_clock_wait();
1129 if (err) {
1130 conn_release(adv);
1131
1132 return BT_HCI_ERR_HW_FAILURE;
1133 }
1134 }
1135 #endif /* CONFIG_BT_PERIPHERAL */
1136
1137 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1138 if (ll_adv_cmds_is_ext()) {
1139 struct node_rx_pdu *node_rx_adv_term;
1140 void *link_adv_term;
1141
1142 /* The alloc here used for ext adv termination event */
1143 link_adv_term = ll_rx_link_alloc();
1144 if (!link_adv_term) {
1145 #if defined(CONFIG_BT_PERIPHERAL)
1146 if (adv->lll.conn) {
1147 conn_release(adv);
1148 }
1149 #endif /* CONFIG_BT_PERIPHERAL */
1150
1151 /* TODO: figure out right return value */
1152 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1153 }
1154
1155 node_rx_adv_term = ll_rx_alloc();
1156 if (!node_rx_adv_term) {
1157 #if defined(CONFIG_BT_PERIPHERAL)
1158 if (adv->lll.conn) {
1159 conn_release(adv);
1160 }
1161 #endif /* CONFIG_BT_PERIPHERAL */
1162
1163 ll_rx_link_release(link_adv_term);
1164
1165 /* TODO: figure out right return value */
1166 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1167 }
1168
1169 node_rx_adv_term->hdr.type = NODE_RX_TYPE_NONE;
1170
1171 node_rx_adv_term->hdr.link = (void *)link_adv_term;
1172 adv->lll.node_rx_adv_term = (void *)node_rx_adv_term;
1173
1174 if (0) {
1175 #if defined(CONFIG_BT_PERIPHERAL)
1176 } else if (lll->is_hdcd) {
1177 adv_max_events_duration_set(adv, 0U, 0U);
1178 #endif /* CONFIG_BT_PERIPHERAL */
1179 } else {
1180 adv_max_events_duration_set(adv, duration,
1181 max_ext_adv_evts);
1182 }
1183 } else {
1184 adv->lll.node_rx_adv_term = NULL;
1185 adv_max_events_duration_set(adv, 0U, 0U);
1186 }
1187
1188 const uint8_t phy = lll->phy_p;
1189 const uint8_t phy_flags = lll->phy_flags;
1190
1191 adv->event_counter = 0U;
1192 #else
1193 /* Legacy ADV only supports LE_1M PHY */
1194 const uint8_t phy = PHY_1M;
1195 const uint8_t phy_flags = 0U;
1196 #endif
1197
1198 /* For now we adv on all channels enabled in channel map */
1199 uint8_t ch_map = lll->chan_map;
1200 const uint8_t adv_chn_cnt = util_ones_count_get(&ch_map, sizeof(ch_map));
1201
1202 if (adv_chn_cnt == 0) {
1203 /* ADV needs at least one channel */
1204 goto failure_cleanup;
1205 }
1206
1207 /* Calculate the advertising time reservation */
1208 uint16_t time_us = adv_time_get(pdu_adv, pdu_scan, adv_chn_cnt, phy,
1209 phy_flags);
1210
1211 uint16_t interval = adv->interval;
1212 #if defined(CONFIG_BT_HCI_MESH_EXT)
1213 if (lll->is_mesh) {
1214 uint16_t interval_min_us;
1215
1216 _radio.advertiser.retry = retry;
1217 _radio.advertiser.scan_delay_ms = scan_delay;
1218 _radio.advertiser.scan_window_ms = scan_window;
1219
1220 interval_min_us = time_us +
1221 (scan_delay + scan_window) * USEC_PER_MSEC;
1222 if ((interval * SCAN_INT_UNIT_US) < interval_min_us) {
1223 interval = DIV_ROUND_UP(interval_min_us,
1224 SCAN_INT_UNIT_US);
1225 }
1226
1227 /* passive scanning */
1228 _radio.scanner.type = 0;
1229
1230 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1231 /* TODO: Coded PHY support */
1232 _radio.scanner.phy = 0;
1233 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1234
1235 #if defined(CONFIG_BT_CTLR_PRIVACY)
1236 /* TODO: Privacy support */
1237 _radio.scanner.rpa_gen = 0;
1238 _radio.scanner.rl_idx = rl_idx;
1239 #endif /* CONFIG_BT_CTLR_PRIVACY */
1240
1241 _radio.scanner.filter_policy = filter_policy;
1242 }
1243 #endif /* CONFIG_BT_HCI_MESH_EXT */
1244
1245 /* Initialize ULL context before radio event scheduling is started. */
1246 ull_hdr_init(&adv->ull);
1247
1248 /* TODO: active_to_start feature port */
1249 adv->ull.ticks_active_to_start = 0;
1250 adv->ull.ticks_prepare_to_start =
1251 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1252 adv->ull.ticks_preempt_to_start =
1253 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1254 adv->ull.ticks_slot = HAL_TICKER_US_TO_TICKS(time_us);
1255
1256 ticks_slot_offset = MAX(adv->ull.ticks_active_to_start,
1257 adv->ull.ticks_prepare_to_start);
1258
1259 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1260 ticks_slot_overhead = ticks_slot_offset;
1261 } else {
1262 ticks_slot_overhead = 0;
1263 }
1264
1265 #if !defined(CONFIG_BT_HCI_MESH_EXT)
1266 ticks_anchor = ticker_ticks_now_get();
1267 ticks_anchor += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1268
1269 #else /* CONFIG_BT_HCI_MESH_EXT */
1270 if (!at_anchor) {
1271 ticks_anchor = ticker_ticks_now_get();
1272 }
1273 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1274
1275 /* High Duty Cycle Directed Advertising if interval is 0. */
1276 #if defined(CONFIG_BT_PERIPHERAL)
1277 lll->is_hdcd = !interval && (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND);
1278 if (lll->is_hdcd) {
1279 ret_cb = TICKER_STATUS_BUSY;
1280
1281 #if defined(CONFIG_BT_TICKER_EXT)
1282 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1283 ll_adv_ticker_ext[handle].ticks_slot_window = 0;
1284 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
1285
1286 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1287 ll_adv_ticker_ext[handle].expire_info_id = TICKER_NULL;
1288 ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1289 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1290
1291 ret = ticker_start_ext(
1292 #else /* !CONFIG_BT_TICKER_EXT */
1293 ret = ticker_start(
1294 #endif /* !CONFIG_BT_TICKER_EXT */
1295 TICKER_INSTANCE_ID_CTLR,
1296 TICKER_USER_ID_THREAD,
1297 (TICKER_ID_ADV_BASE + handle),
1298 ticks_anchor, 0,
1299 (adv->ull.ticks_slot + ticks_slot_overhead),
1300 TICKER_NULL_REMAINDER, TICKER_NULL_LAZY,
1301 (adv->ull.ticks_slot + ticks_slot_overhead),
1302 ticker_cb, adv,
1303 ull_ticker_status_give, (void *)&ret_cb
1304 #if defined(CONFIG_BT_TICKER_EXT)
1305 ,
1306 &ll_adv_ticker_ext[handle]
1307 #endif /* CONFIG_BT_TICKER_EXT */
1308 );
1309 ret = ull_ticker_status_take(ret, &ret_cb);
1310 if (ret != TICKER_STATUS_SUCCESS) {
1311 goto failure_cleanup;
1312 }
1313
1314 ret_cb = TICKER_STATUS_BUSY;
1315 ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1316 TICKER_USER_ID_THREAD,
1317 TICKER_ID_ADV_STOP, ticks_anchor,
1318 HAL_TICKER_US_TO_TICKS(ticks_slot_offset +
1319 (1280 * 1000)),
1320 TICKER_NULL_PERIOD, TICKER_NULL_REMAINDER,
1321 TICKER_NULL_LAZY, TICKER_NULL_SLOT,
1322 ticker_stop_cb, adv,
1323 ull_ticker_status_give, (void *)&ret_cb);
1324 } else
1325 #endif /* CONFIG_BT_PERIPHERAL */
1326 {
1327 const uint32_t ticks_slot = adv->ull.ticks_slot +
1328 ticks_slot_overhead;
1329 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1330 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1331 uint8_t pri_idx, sec_idx;
1332
1333 /* Add sync_info into auxiliary PDU */
1334 if (lll->sync) {
1335 sync = HDR_LLL2ULL(lll->sync);
1336 if (sync->is_enabled && !sync->is_started) {
1337 struct pdu_adv_sync_info *sync_info;
1338 uint8_t value[1 + sizeof(sync_info)];
1339 uint8_t err;
1340
1341 err = ull_adv_aux_hdr_set_clear(adv,
1342 ULL_ADV_PDU_HDR_FIELD_SYNC_INFO,
1343 0U, value, &pri_idx, &sec_idx);
1344 if (err) {
1345 return err;
1346 }
1347
1348 /* First byte in the length-value encoded
1349 * parameter is size of sync_info structure,
1350 * followed by pointer to sync_info in the
1351 * PDU.
1352 */
1353 memcpy(&sync_info, &value[1], sizeof(sync_info));
1354 ull_adv_sync_info_fill(sync, sync_info);
1355 } else {
1356 /* Do not start periodic advertising */
1357 sync = NULL;
1358 }
1359 }
1360 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1361
1362 if (lll->aux) {
1363 struct lll_adv_aux *lll_aux = lll->aux;
1364 uint32_t ticks_slot_overhead_aux;
1365 uint32_t ticks_anchor_aux;
1366
1367 aux = HDR_LLL2ULL(lll_aux);
1368
1369 /* Schedule auxiliary PDU after primary channel
1370 * PDUs.
1371 * Reduce the MAFS offset by the Event Overhead
1372 * so that actual radio air packet start as
1373 * close as possible after the MAFS gap.
1374 * Add 2 ticks offset as compensation towards
1375 * the +/- 1 tick ticker scheduling jitter due
1376 * to accumulation of remainder to maintain
1377 * average ticker interval.
1378 */
1379 ticks_anchor_aux =
1380 ticks_anchor + ticks_slot +
1381 HAL_TICKER_US_TO_TICKS(
1382 MAX(EVENT_MAFS_US,
1383 EVENT_OVERHEAD_START_US) -
1384 EVENT_OVERHEAD_START_US +
1385 (EVENT_TICKER_RES_MARGIN_US << 1));
1386
1387 ticks_slot_overhead_aux =
1388 ull_adv_aux_evt_init(aux, &ticks_anchor_aux);
1389
1390 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1391 /* Start periodic advertising if enabled and not already
1392 * started.
1393 */
1394 if (sync) {
1395 uint32_t ticks_slot_overhead;
1396 uint32_t ticks_slot_aux;
1397
1398 #if defined(CONFIG_BT_CTLR_ADV_RESERVE_MAX)
1399 uint32_t us_slot;
1400
1401 us_slot = ull_adv_aux_time_get(aux,
1402 PDU_AC_PAYLOAD_SIZE_MAX,
1403 PDU_AC_PAYLOAD_SIZE_MAX);
1404 ticks_slot_aux =
1405 HAL_TICKER_US_TO_TICKS(us_slot) +
1406 ticks_slot_overhead_aux;
1407 #else
1408 ticks_slot_aux = aux->ull.ticks_slot +
1409 ticks_slot_overhead_aux;
1410 #endif
1411
1412 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET) || \
1413 (CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET == 0)
1414 /* Schedule periodic advertising PDU after
1415 * auxiliary PDUs.
1416 * Reduce the MAFS offset by the Event Overhead
1417 * so that actual radio air packet start as
1418 * close as possible after the MAFS gap.
1419 * Add 2 ticks offset as compensation towards
1420 * the +/- 1 tick ticker scheduling jitter due
1421 * to accumulation of remainder to maintain
1422 * average ticker interval.
1423 */
1424 uint32_t ticks_anchor_sync = ticks_anchor_aux +
1425 ticks_slot_aux +
1426 HAL_TICKER_US_TO_TICKS(
1427 MAX(EVENT_MAFS_US,
1428 EVENT_OVERHEAD_START_US) -
1429 EVENT_OVERHEAD_START_US +
1430 (EVENT_TICKER_RES_MARGIN_US << 1));
1431
1432 #else /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
1433 uint32_t ticks_anchor_sync = ticks_anchor_aux +
1434 HAL_TICKER_US_TO_TICKS(
1435 CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET);
1436
1437 #endif /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
1438
1439 ticks_slot_overhead = ull_adv_sync_evt_init(adv, sync, NULL);
1440 ret = ull_adv_sync_start(adv, sync,
1441 ticks_anchor_sync,
1442 ticks_slot_overhead);
1443 if (ret) {
1444 goto failure_cleanup;
1445 }
1446
1447 sync_is_started = 1U;
1448
1449 lll_adv_aux_data_enqueue(adv->lll.aux, sec_idx);
1450 lll_adv_data_enqueue(lll, pri_idx);
1451 } else {
1452 /* TODO: Find the anchor before the group of
1453 * active Periodic Advertising events, so
1454 * that auxiliary sets are grouped such
1455 * that auxiliary sets and Periodic
1456 * Advertising sets are non-overlapping
1457 * for the same event interval.
1458 */
1459 }
1460 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1461
1462 /* Keep aux interval equal or higher than primary PDU
1463 * interval.
1464 * Use periodic interval units to represent the
1465 * periodic behavior of scheduling of AUX_ADV_IND PDUs
1466 * so that it is grouped with similar interval units
1467 * used for ACL Connections, Periodic Advertising and
1468 * BIG radio events.
1469 */
1470 aux->interval =
1471 DIV_ROUND_UP(((uint64_t)adv->interval *
1472 ADV_INT_UNIT_US) +
1473 HAL_TICKER_TICKS_TO_US(
1474 ULL_ADV_RANDOM_DELAY),
1475 PERIODIC_INT_UNIT_US);
1476
1477 ret = ull_adv_aux_start(aux, ticks_anchor_aux,
1478 ticks_slot_overhead_aux);
1479 if (ret) {
1480 goto failure_cleanup;
1481 }
1482
1483 aux_is_started = 1U;
1484 }
1485 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1486
1487 ret_cb = TICKER_STATUS_BUSY;
1488
1489 #if defined(CONFIG_BT_TICKER_EXT)
1490 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1491 ll_adv_ticker_ext[handle].ticks_slot_window =
1492 ULL_ADV_RANDOM_DELAY + ticks_slot;
1493 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1494
1495 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1496 if (lll->aux) {
1497 uint8_t aux_handle = ull_adv_aux_handle_get(aux);
1498
1499 ll_adv_ticker_ext[handle].expire_info_id = TICKER_ID_ADV_AUX_BASE +
1500 aux_handle;
1501 ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1502 } else {
1503 ll_adv_ticker_ext[handle].expire_info_id = TICKER_NULL;
1504 ll_adv_ticker_ext[handle].ext_timeout_func = ticker_cb;
1505 }
1506 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1507
1508 ret = ticker_start_ext(
1509 #else /* !CONFIG_BT_TICKER_EXT */
1510 ret = ticker_start(
1511 #endif /* !CONFIG_BT_TICKER_EXT */
1512 TICKER_INSTANCE_ID_CTLR,
1513 TICKER_USER_ID_THREAD,
1514 (TICKER_ID_ADV_BASE + handle),
1515 ticks_anchor, 0,
1516 HAL_TICKER_US_TO_TICKS((uint64_t)interval *
1517 ADV_INT_UNIT_US),
1518 TICKER_NULL_REMAINDER,
1519 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1520 !defined(CONFIG_BT_CTLR_LOW_LAT)
1521 /* Force expiry to ensure timing update */
1522 TICKER_LAZY_MUST_EXPIRE,
1523 #else
1524 TICKER_NULL_LAZY,
1525 #endif /* !CONFIG_BT_TICKER_LOW_LAT && !CONFIG_BT_CTLR_LOW_LAT */
1526 ticks_slot,
1527 ticker_cb, adv,
1528 ull_ticker_status_give, (void *)&ret_cb
1529 #if defined(CONFIG_BT_TICKER_EXT)
1530 ,
1531 &ll_adv_ticker_ext[handle]
1532 #endif /* CONFIG_BT_TICKER_EXT */
1533 );
1534 }
1535
1536 ret = ull_ticker_status_take(ret, &ret_cb);
1537 if (ret != TICKER_STATUS_SUCCESS) {
1538 goto failure_cleanup;
1539 }
1540
1541 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1542 if (aux_is_started) {
1543 aux->is_started = aux_is_started;
1544
1545 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1546 if (sync_is_started) {
1547 sync->is_started = sync_is_started;
1548 }
1549 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1550 }
1551 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1552
1553 adv->is_enabled = 1;
1554
1555 #if defined(CONFIG_BT_CTLR_PRIVACY)
1556 #if defined(CONFIG_BT_HCI_MESH_EXT)
1557 if (_radio.advertiser.is_mesh) {
1558 _radio.scanner.is_enabled = 1;
1559
1560 ull_filter_adv_scan_state_cb(BIT(0) | BIT(1));
1561 }
1562 #else /* !CONFIG_BT_HCI_MESH_EXT */
1563 if (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled_get(0)) {
1564 ull_filter_adv_scan_state_cb(BIT(0));
1565 }
1566 #endif /* !CONFIG_BT_HCI_MESH_EXT */
1567 #endif /* CONFIG_BT_CTLR_PRIVACY */
1568
1569 return 0;
1570
1571 failure_cleanup:
1572 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1573 if (aux_is_started) {
1574 /* TODO: Stop extended advertising and release resources */
1575 }
1576
1577 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1578 if (sync_is_started) {
1579 /* TODO: Stop periodic advertising and release resources */
1580 }
1581 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1582 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1583
1584 #if defined(CONFIG_BT_PERIPHERAL)
1585 if (adv->lll.conn) {
1586 conn_release(adv);
1587 }
1588 #endif /* CONFIG_BT_PERIPHERAL */
1589
1590 return BT_HCI_ERR_CMD_DISALLOWED;
1591 }
1592
1593 int ull_adv_init(void)
1594 {
1595 int err;
1596
1597 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1598 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1599 if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1600 err = ull_adv_aux_init();
1601 if (err) {
1602 return err;
1603 }
1604 }
1605
1606 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1607 err = ull_adv_sync_init();
1608 if (err) {
1609 return err;
1610 }
1611 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1612 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1613 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1614
1615 err = init_reset();
1616 if (err) {
1617 return err;
1618 }
1619
1620 return 0;
1621 }
1622
1623 uint8_t ll_adv_disable_all(void)
1624 {
1625 uint8_t handle;
1626
1627 for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1628 (void)disable(handle);
1629 }
1630
1631 return 0U;
1632 }
1633
1634 int ull_adv_reset(void)
1635 {
1636 (void)ll_adv_disable_all();
1637
1638 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1639 #if defined(CONFIG_BT_HCI_RAW)
1640 ll_adv_cmds = LL_ADV_CMDS_ANY;
1641 #endif
1642 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1643 {
1644 int err;
1645
1646 err = ull_adv_sync_reset();
1647 if (err) {
1648 return err;
1649 }
1650 }
1651 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1652 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1653
1654 return 0;
1655 }
1656
1657 int ull_adv_reset_finalize(void)
1658 {
1659 uint8_t handle;
1660 int err;
1661
1662 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1663 #if defined(CONFIG_BT_CTLR_ADV_AUX_SET)
1664 if (CONFIG_BT_CTLR_ADV_AUX_SET > 0) {
1665 err = ull_adv_aux_reset_finalize();
1666 if (err) {
1667 return err;
1668 }
1669 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1670 err = ull_adv_sync_reset_finalize();
1671 if (err) {
1672 return err;
1673 }
1674 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1675 }
1676 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET */
1677 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1678
1679 for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
1680 struct ll_adv_set *adv = &ll_adv[handle];
1681 struct lll_adv *lll = &adv->lll;
1682
1683 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1684 adv->is_created = 0;
1685 lll->aux = NULL;
1686 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
1687 lll->sync = NULL;
1688 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
1689 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1690 lll_adv_data_reset(&lll->adv_data);
1691 lll_adv_data_reset(&lll->scan_rsp);
1692 }
1693
1694 err = init_reset();
1695 if (err) {
1696 return err;
1697 }
1698
1699 return 0;
1700 }
1701
1702 inline struct ll_adv_set *ull_adv_set_get(uint8_t handle)
1703 {
1704 if (handle >= BT_CTLR_ADV_SET) {
1705 return NULL;
1706 }
1707
1708 return &ll_adv[handle];
1709 }
1710
1711 inline uint16_t ull_adv_handle_get(struct ll_adv_set *adv)
1712 {
1713 return ((uint8_t *)adv - (uint8_t *)ll_adv) / sizeof(*adv);
1714 }
1715
1716 uint16_t ull_adv_lll_handle_get(struct lll_adv *lll)
1717 {
1718 return ull_adv_handle_get(HDR_LLL2ULL(lll));
1719 }
1720
1721 inline struct ll_adv_set *ull_adv_is_enabled_get(uint8_t handle)
1722 {
1723 struct ll_adv_set *adv;
1724
1725 adv = ull_adv_set_get(handle);
1726 if (!adv || !adv->is_enabled) {
1727 return NULL;
1728 }
1729
1730 return adv;
1731 }
1732
1733 int ull_adv_is_enabled(uint8_t handle)
1734 {
1735 struct ll_adv_set *adv;
1736
1737 adv = ull_adv_is_enabled_get(handle);
1738
1739 return adv != NULL;
1740 }
1741
1742 uint32_t ull_adv_filter_pol_get(uint8_t handle)
1743 {
1744 struct ll_adv_set *adv;
1745
1746 adv = ull_adv_is_enabled_get(handle);
1747 if (!adv) {
1748 return 0;
1749 }
1750
1751 return adv->lll.filter_policy;
1752 }
1753
1754 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1755 struct ll_adv_set *ull_adv_is_created_get(uint8_t handle)
1756 {
1757 struct ll_adv_set *adv;
1758
1759 adv = ull_adv_set_get(handle);
1760 if (!adv || !adv->is_created) {
1761 return NULL;
1762 }
1763
1764 return adv;
1765 }
1766
1767 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1768 void ull_adv_aux_created(struct ll_adv_set *adv)
1769 {
1770 if (adv->lll.aux && adv->is_enabled) {
1771 uint8_t aux_handle = ull_adv_aux_handle_get(HDR_LLL2ULL(adv->lll.aux));
1772 uint8_t handle = ull_adv_handle_get(adv);
1773
1774 ticker_update_ext(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1775 (TICKER_ID_ADV_BASE + handle), 0, 0, 0, 0, 0, 0,
1776 ticker_update_op_cb, adv, 0,
1777 TICKER_ID_ADV_AUX_BASE + aux_handle);
1778 }
1779 }
1780 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1781 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1782
1783 uint8_t ull_adv_data_set(struct ll_adv_set *adv, uint8_t len,
1784 uint8_t const *const data)
1785 {
1786 struct pdu_adv *prev;
1787 struct pdu_adv *pdu;
1788 uint8_t idx;
1789
1790 /* Check invalid AD Data length */
1791 if (len > PDU_AC_LEG_DATA_SIZE_MAX) {
1792 return BT_HCI_ERR_INVALID_PARAM;
1793 }
1794
1795 prev = lll_adv_data_peek(&adv->lll);
1796
1797 /* Dont update data if directed, back it up */
1798 if ((prev->type == PDU_ADV_TYPE_DIRECT_IND) ||
1799 (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
1800 (prev->type == PDU_ADV_TYPE_EXT_IND))) {
1801 #if defined(CONFIG_BT_CTLR_AD_DATA_BACKUP)
1802 /* Update the backup AD Data */
1803 adv->ad_data_backup.len = len;
1804 memcpy(adv->ad_data_backup.data, data, adv->ad_data_backup.len);
1805 return 0;
1806
1807 #else /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1808 return BT_HCI_ERR_CMD_DISALLOWED;
1809 #endif /* !CONFIG_BT_CTLR_AD_DATA_BACKUP */
1810 }
1811
1812 /* update adv pdu fields. */
1813 pdu = lll_adv_data_alloc(&adv->lll, &idx);
1814
1815 /* check for race condition with LLL ISR */
1816 if (IS_ENABLED(CONFIG_ASSERT)) {
1817 uint8_t idx_test;
1818
1819 lll_adv_data_alloc(&adv->lll, &idx_test);
1820 __ASSERT((idx == idx_test), "Probable AD Data Corruption.\n");
1821 }
1822
1823 pdu->type = prev->type;
1824 pdu->rfu = 0U;
1825
1826 if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
1827 pdu->chan_sel = prev->chan_sel;
1828 } else {
1829 pdu->chan_sel = 0U;
1830 }
1831
1832 pdu->tx_addr = prev->tx_addr;
1833 pdu->rx_addr = prev->rx_addr;
1834 memcpy(&pdu->adv_ind.addr[0], &prev->adv_ind.addr[0], BDADDR_SIZE);
1835 memcpy(&pdu->adv_ind.data[0], data, len);
1836 pdu->len = BDADDR_SIZE + len;
1837
1838 /* Update time reservation */
1839 if (adv->is_enabled) {
1840 struct pdu_adv *pdu_scan;
1841 struct lll_adv *lll;
1842 uint8_t err;
1843
1844 lll = &adv->lll;
1845 pdu_scan = lll_adv_scan_rsp_peek(lll);
1846
1847 err = ull_adv_time_update(adv, pdu, pdu_scan);
1848 if (err) {
1849 return err;
1850 }
1851 }
1852
1853 lll_adv_data_enqueue(&adv->lll, idx);
1854
1855 return 0;
1856 }
1857
1858 uint8_t ull_scan_rsp_set(struct ll_adv_set *adv, uint8_t len,
1859 uint8_t const *const data)
1860 {
1861 struct pdu_adv *prev;
1862 struct pdu_adv *pdu;
1863 uint8_t idx;
1864
1865 if (len > PDU_AC_LEG_DATA_SIZE_MAX) {
1866 return BT_HCI_ERR_INVALID_PARAM;
1867 }
1868
1869 /* update scan pdu fields. */
1870 prev = lll_adv_scan_rsp_peek(&adv->lll);
1871 if (!prev) {
1872 uint8_t err;
1873
1874 err = lll_adv_data_init(&adv->lll.scan_rsp);
1875 if (err) {
1876 return err;
1877 }
1878
1879 prev = lll_adv_scan_rsp_peek(&adv->lll);
1880 init_pdu(prev, PDU_ADV_TYPE_SCAN_RSP);
1881 }
1882
1883 pdu = lll_adv_scan_rsp_alloc(&adv->lll, &idx);
1884 pdu->type = PDU_ADV_TYPE_SCAN_RSP;
1885 pdu->rfu = 0;
1886 pdu->chan_sel = 0;
1887 pdu->tx_addr = prev->tx_addr;
1888 pdu->rx_addr = 0;
1889 pdu->len = BDADDR_SIZE + len;
1890 memcpy(&pdu->scan_rsp.addr[0], &prev->scan_rsp.addr[0], BDADDR_SIZE);
1891 memcpy(&pdu->scan_rsp.data[0], data, len);
1892
1893 /* Update time reservation */
1894 if (adv->is_enabled) {
1895 struct pdu_adv *pdu_adv_scan;
1896 struct lll_adv *lll;
1897 uint8_t err;
1898
1899 lll = &adv->lll;
1900 pdu_adv_scan = lll_adv_data_peek(lll);
1901
1902 if ((pdu_adv_scan->type == PDU_ADV_TYPE_ADV_IND) ||
1903 (pdu_adv_scan->type == PDU_ADV_TYPE_SCAN_IND)) {
1904 err = ull_adv_time_update(adv, pdu_adv_scan, pdu);
1905 if (err) {
1906 return err;
1907 }
1908 }
1909 }
1910
1911 lll_adv_scan_rsp_enqueue(&adv->lll, idx);
1912
1913 return 0;
1914 }
1915
1916 static uint32_t ticker_update_rand(struct ll_adv_set *adv, uint32_t ticks_delay_window,
1917 uint32_t ticks_delay_window_offset,
1918 uint32_t ticks_adjust_minus,
1919 ticker_op_func fp_op_func)
1920 {
1921 uint32_t random_delay;
1922 uint32_t ret;
1923
1924 /* Get pseudo-random number in the range [0..ticks_delay_window].
1925 * Please note that using modulo of 2^32 sample space has an uneven
1926 * distribution, slightly favoring smaller values.
1927 */
1928 lll_rand_isr_get(&random_delay, sizeof(random_delay));
1929 random_delay %= ticks_delay_window;
1930 random_delay += (ticks_delay_window_offset + 1);
1931
1932 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1933 TICKER_USER_ID_ULL_HIGH,
1934 TICKER_ID_ADV_BASE + ull_adv_handle_get(adv),
1935 random_delay,
1936 ticks_adjust_minus, 0, 0, 0, 0,
1937 fp_op_func, adv);
1938
1939 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1940 (ret == TICKER_STATUS_BUSY) ||
1941 (fp_op_func == NULL));
1942
1943 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1944 adv->delay = random_delay;
1945 #endif
1946 return random_delay;
1947 }
1948
1949 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
1950 defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1951 void ull_adv_done(struct node_rx_event_done *done)
1952 {
1953 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1954 struct lll_adv_aux *lll_aux;
1955 struct node_rx_hdr *rx_hdr;
1956 uint8_t handle;
1957 uint32_t ret;
1958 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1959 struct ll_adv_set *adv;
1960 struct lll_adv *lll;
1961
1962 /* Get reference to ULL context */
1963 adv = CONTAINER_OF(done->param, struct ll_adv_set, ull);
1964 lll = &adv->lll;
1965
1966 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1967 if (done->extra.type == EVENT_DONE_EXTRA_TYPE_ADV && done->extra.result != DONE_COMPLETED) {
1968 /* Event aborted or too late - try to re-schedule */
1969 uint32_t ticks_elapsed;
1970 uint32_t ticks_now;
1971 uint32_t delay_remain;
1972
1973 const uint32_t prepare_overhead =
1974 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1975 const uint32_t ticks_adv_airtime = adv->ticks_at_expire +
1976 prepare_overhead;
1977
1978 ticks_elapsed = 0U;
1979
1980 ticks_now = cntr_cnt_get();
1981 if ((int32_t)(ticks_now - ticks_adv_airtime) > 0) {
1982 ticks_elapsed = ticks_now - ticks_adv_airtime;
1983 }
1984
1985 if (adv->delay_at_expire + ticks_elapsed <= ULL_ADV_RANDOM_DELAY) {
1986 /* The perturbation window is still open */
1987 delay_remain = ULL_ADV_RANDOM_DELAY - (adv->delay_at_expire +
1988 ticks_elapsed);
1989 } else {
1990 delay_remain = 0U;
1991 }
1992
1993 /* Check if we have enough time to re-schedule */
1994 if (delay_remain > prepare_overhead) {
1995 uint32_t ticks_adjust_minus;
1996 uint32_t interval_us = adv->interval * ADV_INT_UNIT_US;
1997
1998 /* Get negative ticker adjustment needed to pull back ADV one
1999 * interval plus the randomized delay. This means that the ticker
2000 * will be updated to expire in time frame of now + start
2001 * overhead, until 10 ms window is exhausted.
2002 */
2003 ticks_adjust_minus = HAL_TICKER_US_TO_TICKS(interval_us) + adv->delay;
2004
2005 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2006 if (adv->remain_duration_us > interval_us) {
2007 /* Reset remain_duration_us to value before last ticker expire
2008 * to correct for the re-scheduling
2009 */
2010 adv->remain_duration_us += interval_us +
2011 HAL_TICKER_TICKS_TO_US(
2012 adv->delay_at_expire);
2013 }
2014 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2015
2016 /* Apply random delay in range [prepare_overhead..delay_remain].
2017 * NOTE: This ticker_update may fail if update races with
2018 * ticker_stop, e.g. from ull_periph_setup. This is not a problem
2019 * and we can safely ignore the operation result.
2020 */
2021 ticker_update_rand(adv, delay_remain - prepare_overhead,
2022 prepare_overhead, ticks_adjust_minus, NULL);
2023
2024 /* Delay from ticker_update_rand is in addition to the last random delay */
2025 adv->delay += adv->delay_at_expire;
2026
2027 /* Score of the event was increased due to the result, but since
2028 * we're getting a another chance we'll set it back.
2029 */
2030 adv->lll.hdr.score -= 1;
2031 }
2032 }
2033 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2034 if (done->extra.type == EVENT_DONE_EXTRA_TYPE_ADV && adv->lll.aux) {
2035 /* Primary event of extended advertising done - wait for aux done */
2036 return;
2037 }
2038 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2039 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2040
2041 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2042 if (adv->max_events && (adv->event_counter >= adv->max_events)) {
2043 adv->max_events = 0U;
2044
2045 rx_hdr = (void *)lll->node_rx_adv_term;
2046 rx_hdr->rx_ftr.param_adv_term.status = BT_HCI_ERR_LIMIT_REACHED;
2047 } else if (adv->remain_duration_us &&
2048 (adv->remain_duration_us <=
2049 ((uint64_t)adv->interval * ADV_INT_UNIT_US))) {
2050 adv->remain_duration_us = 0U;
2051
2052 rx_hdr = (void *)lll->node_rx_adv_term;
2053 rx_hdr->rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2054 } else {
2055 return;
2056 }
2057
2058 handle = ull_adv_handle_get(adv);
2059 LL_ASSERT(handle < BT_CTLR_ADV_SET);
2060
2061 rx_hdr->type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2062 rx_hdr->handle = handle;
2063 rx_hdr->rx_ftr.param_adv_term.conn_handle = 0xffff;
2064 rx_hdr->rx_ftr.param_adv_term.num_events = adv->event_counter;
2065
2066 lll_aux = lll->aux;
2067 if (lll_aux) {
2068 struct ll_adv_aux_set *aux;
2069 uint8_t aux_handle;
2070
2071 aux = HDR_LLL2ULL(lll_aux);
2072 aux_handle = ull_adv_aux_handle_get(aux);
2073 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2074 TICKER_USER_ID_ULL_HIGH,
2075 (TICKER_ID_ADV_AUX_BASE + aux_handle),
2076 ticker_stop_aux_op_cb, adv);
2077 } else {
2078 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2079 TICKER_USER_ID_ULL_HIGH,
2080 (TICKER_ID_ADV_BASE + handle),
2081 ticker_stop_ext_op_cb, adv);
2082 }
2083
2084 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2085 (ret == TICKER_STATUS_BUSY));
2086 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2087 }
2088 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
2089
2090 const uint8_t *ull_adv_pdu_update_addrs(struct ll_adv_set *adv,
2091 struct pdu_adv *pdu)
2092 {
2093 const uint8_t *adv_addr;
2094
2095 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2096 struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
2097 struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
2098 struct pdu_adv_ext_hdr hdr_flags;
2099
2100 if (com_hdr->ext_hdr_len) {
2101 hdr_flags = *hdr;
2102 } else {
2103 *(uint8_t *)&hdr_flags = 0U;
2104 }
2105 #endif
2106
2107 adv_addr = adva_update(adv, pdu);
2108
2109 /* Update TargetA only if directed advertising PDU is supplied. Note
2110 * that AUX_SCAN_REQ does not have TargetA flag set so it will be
2111 * ignored here as expected.
2112 */
2113 if ((pdu->type == PDU_ADV_TYPE_DIRECT_IND) ||
2114 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2115 ((pdu->type == PDU_ADV_TYPE_EXT_IND) && hdr_flags.tgt_addr) ||
2116 #endif
2117 0) {
2118 tgta_update(adv, pdu);
2119 }
2120
2121 return adv_addr;
2122 }
2123
2124 uint8_t ull_adv_time_update(struct ll_adv_set *adv, struct pdu_adv *pdu,
2125 struct pdu_adv *pdu_scan)
2126 {
2127 uint32_t volatile ret_cb;
2128 uint32_t ticks_minus;
2129 uint32_t ticks_plus;
2130 struct lll_adv *lll;
2131 uint32_t time_ticks;
2132 uint8_t phy_flags;
2133 uint16_t time_us;
2134 uint8_t chan_map;
2135 uint8_t chan_cnt;
2136 uint32_t ret;
2137 uint8_t phy;
2138
2139 lll = &adv->lll;
2140
2141 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2142 phy = lll->phy_p;
2143 phy_flags = lll->phy_flags;
2144 #else
2145 phy = PHY_1M;
2146 phy_flags = 0U;
2147 #endif
2148
2149 chan_map = lll->chan_map;
2150 chan_cnt = util_ones_count_get(&chan_map, sizeof(chan_map));
2151 time_us = adv_time_get(pdu, pdu_scan, chan_cnt, phy, phy_flags);
2152 time_ticks = HAL_TICKER_US_TO_TICKS(time_us);
2153 if (adv->ull.ticks_slot > time_ticks) {
2154 ticks_minus = adv->ull.ticks_slot - time_ticks;
2155 ticks_plus = 0U;
2156 } else if (adv->ull.ticks_slot < time_ticks) {
2157 ticks_minus = 0U;
2158 ticks_plus = time_ticks - adv->ull.ticks_slot;
2159 } else {
2160 return BT_HCI_ERR_SUCCESS;
2161 }
2162
2163 ret_cb = TICKER_STATUS_BUSY;
2164 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
2165 TICKER_USER_ID_THREAD,
2166 (TICKER_ID_ADV_BASE +
2167 ull_adv_handle_get(adv)),
2168 0, 0, ticks_plus, ticks_minus, 0, 0,
2169 ull_ticker_status_give, (void *)&ret_cb);
2170 ret = ull_ticker_status_take(ret, &ret_cb);
2171 if (ret != TICKER_STATUS_SUCCESS) {
2172 return BT_HCI_ERR_CMD_DISALLOWED;
2173 }
2174
2175 adv->ull.ticks_slot = time_ticks;
2176
2177 return BT_HCI_ERR_SUCCESS;
2178 }
2179
2180 static int init_reset(void)
2181 {
2182 uint8_t handle;
2183
2184 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL) && \
2185 !defined(CONFIG_BT_CTLR_ADV_EXT)
2186 ll_adv[0].lll.tx_pwr_lvl = RADIO_TXP_DEFAULT;
2187 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL && !CONFIG_BT_CTLR_ADV_EXT */
2188
2189 for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
2190 lll_adv_data_init(&ll_adv[handle].lll.adv_data);
2191
2192 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2193 /* scan_rsp is not init'ed until we know if it is a legacy or extended scan rsp */
2194 memset(&ll_adv[handle].lll.scan_rsp, 0, sizeof(ll_adv[handle].lll.scan_rsp));
2195 #else
2196 lll_adv_data_init(&ll_adv[handle].lll.scan_rsp);
2197 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
2198
2199 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
2200 /* Pointer to DF configuration must be cleared on reset. In other case it will point
2201 * to a memory pool address that should be released. It may be used by the pool
2202 * itself. In such situation it may cause error.
2203 */
2204 ll_adv[handle].df_cfg = NULL;
2205 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
2206 }
2207
2208 /* Make sure that set #0 is initialized with empty legacy PDUs. This is
2209 * especially important if legacy HCI interface is used for advertising
2210 * because it allows to enable advertising without any configuration,
2211 * thus we need to have PDUs already initialized.
2212 */
2213 init_set(&ll_adv[0]);
2214
2215 return 0;
2216 }
2217
2218 static inline struct ll_adv_set *is_disabled_get(uint8_t handle)
2219 {
2220 struct ll_adv_set *adv;
2221
2222 adv = ull_adv_set_get(handle);
2223 if (!adv || adv->is_enabled) {
2224 return NULL;
2225 }
2226
2227 return adv;
2228 }
2229
2230 static uint16_t adv_time_get(struct pdu_adv *pdu, struct pdu_adv *pdu_scan,
2231 uint8_t adv_chn_cnt, uint8_t phy,
2232 uint8_t phy_flags)
2233 {
2234 uint16_t time_us = EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
2235
2236 /* NOTE: 16-bit value is sufficient to calculate the maximum radio
2237 * event time reservation for PDUs on primary advertising
2238 * channels (37, 38, and 39 channel indices of 1M and Coded PHY).
2239 */
2240
2241 /* Calculate the PDU Tx Time and hence the radio event length */
2242 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2243 if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2244 time_us += PDU_AC_US(pdu->len, phy, phy_flags) * adv_chn_cnt +
2245 EVENT_RX_TX_TURNAROUND(phy) * (adv_chn_cnt - 1);
2246 } else
2247 #endif
2248 {
2249 uint16_t adv_size =
2250 PDU_OVERHEAD_SIZE(PHY_1M) + ADVA_SIZE;
2251 const uint16_t conn_ind_us =
2252 BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2253 INITA_SIZE + ADVA_SIZE + LLDATA_SIZE), PHY_1M);
2254 const uint8_t scan_req_us =
2255 BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2256 SCANA_SIZE + ADVA_SIZE), PHY_1M);
2257 const uint16_t scan_rsp_us =
2258 BYTES2US((PDU_OVERHEAD_SIZE(PHY_1M) +
2259 ADVA_SIZE + pdu_scan->len), PHY_1M);
2260 const uint8_t rx_to_us = EVENT_RX_TO_US(PHY_1M);
2261 const uint8_t rxtx_turn_us = EVENT_RX_TX_TURNAROUND(PHY_1M);
2262
2263 if (pdu->type == PDU_ADV_TYPE_NONCONN_IND) {
2264 adv_size += pdu->len;
2265 time_us += BYTES2US(adv_size, PHY_1M) * adv_chn_cnt +
2266 rxtx_turn_us * (adv_chn_cnt - 1);
2267 } else {
2268 if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
2269 adv_size += TARGETA_SIZE;
2270 time_us += conn_ind_us;
2271 } else if (pdu->type == PDU_ADV_TYPE_ADV_IND) {
2272 adv_size += pdu->len;
2273 time_us += MAX(scan_req_us + EVENT_IFS_MAX_US +
2274 scan_rsp_us, conn_ind_us);
2275 } else if (pdu->type == PDU_ADV_TYPE_SCAN_IND) {
2276 adv_size += pdu->len;
2277 time_us += scan_req_us + EVENT_IFS_MAX_US +
2278 scan_rsp_us;
2279 }
2280
2281 time_us += (BYTES2US(adv_size, PHY_1M) +
2282 EVENT_IFS_MAX_US + rx_to_us +
2283 rxtx_turn_us) * (adv_chn_cnt - 1) +
2284 BYTES2US(adv_size, PHY_1M) + EVENT_IFS_MAX_US;
2285 }
2286 }
2287
2288 return time_us;
2289 }
2290
2291 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2292 uint32_t remainder, uint16_t lazy, uint8_t force,
2293 void *param)
2294 {
2295 static memq_link_t link;
2296 static struct mayfly mfy = {0, 0, &link, NULL, lll_adv_prepare};
2297 static struct lll_prepare_param p;
2298 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2299 struct ticker_ext_context *context = param;
2300 struct ll_adv_set *adv = context->context;
2301 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2302 struct ll_adv_set *adv = param;
2303 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2304 uint32_t random_delay;
2305 struct lll_adv *lll;
2306 uint32_t ret;
2307 uint8_t ref;
2308
2309 DEBUG_RADIO_PREPARE_A(1);
2310
2311 lll = &adv->lll;
2312
2313 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2314 if (lll->aux) {
2315 /* Check if we are about to exceed the duration or max events limit
2316 * Usually this will be handled in ull_adv_done(), but in cases where
2317 * the extended advertising events overlap (ie. several primary advertisings
2318 * point to the same AUX_ADV_IND packet) the ticker will not be stopped
2319 * in time. To handle this, we simply ignore the extra ticker callback and
2320 * wait for the usual ull_adv_done() handling to run
2321 */
2322 if ((adv->max_events && adv->event_counter >= adv->max_events) ||
2323 (adv->remain_duration_us &&
2324 adv->remain_duration_us <= (uint64_t)adv->interval * ADV_INT_UNIT_US)) {
2325 return;
2326 }
2327 }
2328 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2329
2330 if (IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) ||
2331 (lazy != TICKER_LAZY_MUST_EXPIRE)) {
2332 /* Increment prepare reference count */
2333 ref = ull_ref_inc(&adv->ull);
2334 LL_ASSERT(ref);
2335
2336 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2337 defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2338 if (adv->lll.aux) {
2339 uint32_t ticks_to_expire;
2340 uint32_t other_remainder;
2341
2342 LL_ASSERT(context->other_expire_info);
2343
2344 /* Adjust ticks to expire based on remainder value */
2345 ticks_to_expire = context->other_expire_info->ticks_to_expire;
2346 other_remainder = context->other_expire_info->remainder;
2347 hal_ticker_remove_jitter(&ticks_to_expire, &other_remainder);
2348
2349 /* Store the ticks and remainder offset for aux ptr population in LLL */
2350 adv->lll.aux->ticks_pri_pdu_offset = ticks_to_expire;
2351 adv->lll.aux->us_pri_pdu_offset = other_remainder;
2352 }
2353 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) &&
2354 * CONFIG_BT_TICKER_EXT_EXPIRE_INFO
2355 */
2356
2357 /* Append timing parameters */
2358 p.ticks_at_expire = ticks_at_expire;
2359 p.remainder = remainder;
2360 p.lazy = lazy;
2361 p.force = force;
2362 p.param = lll;
2363 mfy.param = &p;
2364
2365 /* Kick LLL prepare */
2366 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2367 TICKER_USER_ID_LLL, 0, &mfy);
2368 LL_ASSERT(!ret);
2369
2370 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) && \
2371 !defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2372 if (adv->lll.aux) {
2373 ull_adv_aux_offset_get(adv);
2374 }
2375 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2376 * !CONFIG_BT_TICKER_EXT_EXPIRE_INFO
2377 */
2378
2379 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2380 adv->ticks_at_expire = ticks_at_expire;
2381 adv->delay_at_expire = adv->delay;
2382 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2383 }
2384
2385 /* Apply adv random delay */
2386 #if defined(CONFIG_BT_PERIPHERAL)
2387 if (!lll->is_hdcd)
2388 #endif /* CONFIG_BT_PERIPHERAL */
2389 {
2390 /* Apply random delay in range [0..ULL_ADV_RANDOM_DELAY] */
2391 random_delay = ticker_update_rand(adv, ULL_ADV_RANDOM_DELAY,
2392 0, 0, ticker_update_op_cb);
2393
2394 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2395 if (adv->remain_duration_us && adv->event_counter > 0U) {
2396 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2397 /* ticks_drift is always 0 with JIT scheduling, populate manually */
2398 ticks_drift = adv->delay_at_expire;
2399 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2400 uint32_t interval_us = (uint64_t)adv->interval * ADV_INT_UNIT_US;
2401 uint32_t elapsed_us = interval_us * (lazy + 1U) +
2402 HAL_TICKER_TICKS_TO_US(ticks_drift);
2403
2404 /* End advertising if the added random delay pushes us beyond the limit */
2405 if (adv->remain_duration_us > elapsed_us + interval_us +
2406 HAL_TICKER_TICKS_TO_US(random_delay)) {
2407 adv->remain_duration_us -= elapsed_us;
2408 } else {
2409 adv->remain_duration_us = interval_us;
2410 }
2411 }
2412
2413 adv->event_counter += (lazy + 1U);
2414 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2415 }
2416
2417 DEBUG_RADIO_PREPARE_A(1);
2418 }
2419
2420 static void ticker_update_op_cb(uint32_t status, void *param)
2421 {
2422 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
2423 param == ull_disable_mark_get());
2424 }
2425
2426 #if defined(CONFIG_BT_PERIPHERAL)
2427 static void ticker_stop_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2428 uint32_t remainder, uint16_t lazy, uint8_t force,
2429 void *param)
2430 {
2431 struct ll_adv_set *adv = param;
2432 uint8_t handle;
2433 uint32_t ret;
2434
2435 handle = ull_adv_handle_get(adv);
2436 LL_ASSERT(handle < BT_CTLR_ADV_SET);
2437
2438 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2439 TICKER_ID_ADV_BASE + handle,
2440 ticker_stop_op_cb, adv);
2441 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2442 (ret == TICKER_STATUS_BUSY));
2443 }
2444
2445 static void ticker_stop_op_cb(uint32_t status, void *param)
2446 {
2447 static memq_link_t link;
2448 static struct mayfly mfy = {0, 0, &link, NULL, adv_disable};
2449 uint32_t ret;
2450
2451 /* Ignore if race between thread and ULL */
2452 if (status != TICKER_STATUS_SUCCESS) {
2453 /* TODO: detect race */
2454
2455 return;
2456 }
2457
2458 #if defined(CONFIG_BT_HCI_MESH_EXT)
2459 /* FIXME: why is this here for Mesh commands? */
2460 if (param) {
2461 return;
2462 }
2463 #endif /* CONFIG_BT_HCI_MESH_EXT */
2464
2465 /* Check if any pending LLL events that need to be aborted */
2466 mfy.param = param;
2467 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2468 TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2469 LL_ASSERT(!ret);
2470 }
2471
2472 static void adv_disable(void *param)
2473 {
2474 struct ll_adv_set *adv;
2475 struct ull_hdr *hdr;
2476
2477 /* Check ref count to determine if any pending LLL events in pipeline */
2478 adv = param;
2479 hdr = &adv->ull;
2480 if (ull_ref_get(hdr)) {
2481 static memq_link_t link;
2482 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2483 uint32_t ret;
2484
2485 mfy.param = &adv->lll;
2486
2487 /* Setup disabled callback to be called when ref count
2488 * returns to zero.
2489 */
2490 LL_ASSERT(!hdr->disabled_cb);
2491 hdr->disabled_param = mfy.param;
2492 hdr->disabled_cb = disabled_cb;
2493
2494 /* Trigger LLL disable */
2495 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2496 TICKER_USER_ID_LLL, 0, &mfy);
2497 LL_ASSERT(!ret);
2498 } else {
2499 /* No pending LLL events */
2500 disabled_cb(&adv->lll);
2501 }
2502 }
2503
2504 static void disabled_cb(void *param)
2505 {
2506 struct ll_adv_set *adv;
2507 struct node_rx_pdu *rx;
2508 struct node_rx_cc *cc;
2509 memq_link_t *link;
2510
2511 adv = ((struct lll_hdr *)param)->parent;
2512
2513 LL_ASSERT(adv->link_cc_free);
2514 link = adv->link_cc_free;
2515 adv->link_cc_free = NULL;
2516
2517 LL_ASSERT(adv->node_rx_cc_free);
2518 rx = adv->node_rx_cc_free;
2519 adv->node_rx_cc_free = NULL;
2520
2521 rx->hdr.type = NODE_RX_TYPE_CONNECTION;
2522 rx->hdr.handle = 0xffff;
2523
2524 cc = (void *)rx->pdu;
2525 memset(cc, 0x00, sizeof(struct node_rx_cc));
2526 cc->status = BT_HCI_ERR_ADV_TIMEOUT;
2527
2528 rx->hdr.rx_ftr.param = param;
2529
2530 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2531 if (adv->lll.node_rx_adv_term) {
2532 uint8_t handle;
2533
2534 ll_rx_put(link, rx);
2535
2536 handle = ull_adv_handle_get(adv);
2537 LL_ASSERT(handle < BT_CTLR_ADV_SET);
2538
2539 rx = (void *)adv->lll.node_rx_adv_term;
2540 rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
2541 rx->hdr.handle = handle;
2542 rx->hdr.rx_ftr.param_adv_term.status = BT_HCI_ERR_ADV_TIMEOUT;
2543 rx->hdr.rx_ftr.param_adv_term.conn_handle = 0xffff;
2544 rx->hdr.rx_ftr.param_adv_term.num_events = adv->event_counter;
2545
2546 link = rx->hdr.link;
2547 }
2548 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2549
2550 ll_rx_put_sched(link, rx);
2551 }
2552
2553 static void conn_release(struct ll_adv_set *adv)
2554 {
2555 struct lll_conn *lll = adv->lll.conn;
2556 memq_link_t *link;
2557
2558 LL_ASSERT(!lll->link_tx_free);
2559 link = memq_deinit(&lll->memq_tx.head, &lll->memq_tx.tail);
2560 LL_ASSERT(link);
2561 lll->link_tx_free = link;
2562
2563 ll_conn_release(lll->hdr.parent);
2564 adv->lll.conn = NULL;
2565
2566 ll_rx_release(adv->node_rx_cc_free);
2567 adv->node_rx_cc_free = NULL;
2568 ll_rx_link_release(adv->link_cc_free);
2569 adv->link_cc_free = NULL;
2570 }
2571 #endif /* CONFIG_BT_PERIPHERAL */
2572
2573 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2574 static uint8_t leg_adv_type_get(uint8_t evt_prop)
2575 {
2576 /* We take advantage of the fact that 2 LS bits
2577 * of evt_prop can be used in a lookup to return
2578 * PDU type value in the pdu_adv_type[] lookup.
2579 */
2580 uint8_t const leg_adv_type[] = {
2581 0x03, /* index of PDU_ADV_TYPE_NONCONN_IND in pdu_adv_type[] */
2582 0x04, /* index of PDU_ADV_TYPE_DIRECT_IND in pdu_adv_type[] */
2583 0x02, /* index of PDU_ADV_TYPE_SCAN_IND in pdu_adv_type[] */
2584 0x00 /* index of PDU_ADV_TYPE_ADV_IND in pdu_adv_type[] */
2585 };
2586
2587 /* if high duty cycle directed */
2588 if (evt_prop & BT_HCI_LE_ADV_PROP_HI_DC_CONN) {
2589 /* index of PDU_ADV_TYPE_DIRECT_IND in pdu_adv_type[] */
2590 return 0x01;
2591 }
2592
2593 return leg_adv_type[evt_prop & 0x03];
2594 }
2595
2596 static void adv_max_events_duration_set(struct ll_adv_set *adv,
2597 uint16_t duration,
2598 uint8_t max_ext_adv_evts)
2599 {
2600 adv->event_counter = 0;
2601 adv->max_events = max_ext_adv_evts;
2602 adv->remain_duration_us = (uint32_t)duration * 10U * USEC_PER_MSEC;
2603 }
2604
2605 static void ticker_stop_aux_op_cb(uint32_t status, void *param)
2606 {
2607 static memq_link_t link;
2608 static struct mayfly mfy = {0, 0, &link, NULL, aux_disable};
2609 uint32_t ret;
2610
2611 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2612
2613 /* Check if any pending LLL events that need to be aborted */
2614 mfy.param = param;
2615 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2616 TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2617 LL_ASSERT(!ret);
2618 }
2619
2620 static void aux_disable(void *param)
2621 {
2622 struct lll_adv_aux *lll_aux;
2623 struct ll_adv_aux_set *aux;
2624 struct ll_adv_set *adv;
2625 struct ull_hdr *hdr;
2626
2627 adv = param;
2628 lll_aux = adv->lll.aux;
2629 aux = HDR_LLL2ULL(lll_aux);
2630 hdr = &aux->ull;
2631 if (ull_ref_get(hdr)) {
2632 LL_ASSERT(!hdr->disabled_cb);
2633 hdr->disabled_param = adv;
2634 hdr->disabled_cb = aux_disabled_cb;
2635 } else {
2636 aux_disabled_cb(param);
2637 }
2638 }
2639
2640 static void aux_disabled_cb(void *param)
2641 {
2642 uint8_t handle;
2643 uint32_t ret;
2644
2645 handle = ull_adv_handle_get(param);
2646 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2647 TICKER_USER_ID_ULL_HIGH,
2648 (TICKER_ID_ADV_BASE + handle),
2649 ticker_stop_ext_op_cb, param);
2650 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
2651 (ret == TICKER_STATUS_BUSY));
2652 }
2653
2654 static void ticker_stop_ext_op_cb(uint32_t status, void *param)
2655 {
2656 static memq_link_t link;
2657 static struct mayfly mfy = {0, 0, &link, NULL, ext_disable};
2658 uint32_t ret;
2659
2660 /* Ignore if race between thread and ULL */
2661 if (status != TICKER_STATUS_SUCCESS) {
2662 /* TODO: detect race */
2663
2664 return;
2665 }
2666
2667 /* Check if any pending LLL events that need to be aborted */
2668 mfy.param = param;
2669 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2670 TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2671 LL_ASSERT(!ret);
2672 }
2673
2674 static void ext_disable(void *param)
2675 {
2676 struct ll_adv_set *adv;
2677 struct ull_hdr *hdr;
2678
2679 /* Check ref count to determine if any pending LLL events in pipeline */
2680 adv = param;
2681 hdr = &adv->ull;
2682 if (ull_ref_get(hdr)) {
2683 static memq_link_t link;
2684 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2685 uint32_t ret;
2686
2687 mfy.param = &adv->lll;
2688
2689 /* Setup disabled callback to be called when ref count
2690 * returns to zero.
2691 */
2692 LL_ASSERT(!hdr->disabled_cb);
2693 hdr->disabled_param = mfy.param;
2694 hdr->disabled_cb = ext_disabled_cb;
2695
2696 /* Trigger LLL disable */
2697 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2698 TICKER_USER_ID_LLL, 0, &mfy);
2699 LL_ASSERT(!ret);
2700 } else {
2701 /* No pending LLL events */
2702 ext_disabled_cb(&adv->lll);
2703 }
2704 }
2705
2706 static void ext_disabled_cb(void *param)
2707 {
2708 struct lll_adv *lll = (void *)param;
2709 struct node_rx_hdr *rx_hdr = (void *)lll->node_rx_adv_term;
2710
2711 /* Under race condition, if a connection has been established then
2712 * node_rx is already utilized to send terminate event on connection
2713 */
2714 if (!rx_hdr) {
2715 return;
2716 }
2717
2718 /* NOTE: parameters are already populated on disable, just enqueue here
2719 */
2720 ll_rx_put_sched(rx_hdr->link, rx_hdr);
2721 }
2722 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2723
2724 static inline uint8_t disable(uint8_t handle)
2725 {
2726 uint32_t volatile ret_cb;
2727 struct ll_adv_set *adv;
2728 uint32_t ret;
2729 void *mark;
2730 int err;
2731
2732 adv = ull_adv_is_enabled_get(handle);
2733 if (!adv) {
2734 /* Bluetooth Specification v5.0 Vol 2 Part E Section 7.8.9
2735 * Disabling advertising when it is already disabled has no
2736 * effect.
2737 */
2738 if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_ENABLE_STRICT)) {
2739 return 0;
2740 }
2741
2742 return BT_HCI_ERR_CMD_DISALLOWED;
2743 }
2744
2745 #if defined(CONFIG_BT_PERIPHERAL)
2746 if (adv->lll.conn) {
2747 /* Indicate to LLL that a cancellation is requested */
2748 adv->lll.conn->periph.cancelled = 1U;
2749 cpu_dmb();
2750
2751 /* Check if a connection was initiated (connection
2752 * establishment race between LLL and ULL).
2753 */
2754 if (unlikely(adv->lll.conn->periph.initiated)) {
2755 return BT_HCI_ERR_CMD_DISALLOWED;
2756 }
2757 }
2758 #endif /* CONFIG_BT_PERIPHERAL */
2759
2760 mark = ull_disable_mark(adv);
2761 LL_ASSERT(mark == adv);
2762
2763 #if defined(CONFIG_BT_PERIPHERAL)
2764 if (adv->lll.is_hdcd) {
2765 ret_cb = TICKER_STATUS_BUSY;
2766 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2767 TICKER_USER_ID_THREAD, TICKER_ID_ADV_STOP,
2768 ull_ticker_status_give, (void *)&ret_cb);
2769 ret = ull_ticker_status_take(ret, &ret_cb);
2770 if (ret) {
2771 mark = ull_disable_unmark(adv);
2772 LL_ASSERT(mark == adv);
2773
2774 return BT_HCI_ERR_CMD_DISALLOWED;
2775 }
2776 }
2777 #endif /* CONFIG_BT_PERIPHERAL */
2778
2779 ret_cb = TICKER_STATUS_BUSY;
2780 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
2781 TICKER_ID_ADV_BASE + handle,
2782 ull_ticker_status_give, (void *)&ret_cb);
2783 ret = ull_ticker_status_take(ret, &ret_cb);
2784 if (ret) {
2785 mark = ull_disable_unmark(adv);
2786 LL_ASSERT(mark == adv);
2787
2788 return BT_HCI_ERR_CMD_DISALLOWED;
2789 }
2790
2791 err = ull_disable(&adv->lll);
2792 LL_ASSERT(!err || (err == -EALREADY));
2793
2794 mark = ull_disable_unmark(adv);
2795 LL_ASSERT(mark == adv);
2796
2797 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2798 struct lll_adv_aux *lll_aux = adv->lll.aux;
2799
2800 if (lll_aux) {
2801 struct ll_adv_aux_set *aux;
2802
2803 aux = HDR_LLL2ULL(lll_aux);
2804
2805 err = ull_adv_aux_stop(aux);
2806 if (err && (err != -EALREADY)) {
2807 return BT_HCI_ERR_CMD_DISALLOWED;
2808 }
2809 }
2810 #endif /* CONFIG_BT_CTLR_ADV_EXT && (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2811
2812 #if defined(CONFIG_BT_PERIPHERAL)
2813 if (adv->lll.conn) {
2814 conn_release(adv);
2815 }
2816 #endif /* CONFIG_BT_PERIPHERAL */
2817
2818 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2819 struct lll_adv *lll = &adv->lll;
2820
2821 if (lll->node_rx_adv_term) {
2822 struct node_rx_pdu *node_rx_adv_term =
2823 (void *)lll->node_rx_adv_term;
2824
2825 lll->node_rx_adv_term = NULL;
2826
2827 ll_rx_link_release(node_rx_adv_term->hdr.link);
2828 ll_rx_release(node_rx_adv_term);
2829 }
2830 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2831
2832 adv->is_enabled = 0U;
2833
2834 #if defined(CONFIG_BT_CTLR_PRIVACY)
2835 if (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled_get(0)) {
2836 ull_filter_adv_scan_state_cb(0);
2837 }
2838 #endif /* CONFIG_BT_CTLR_PRIVACY */
2839
2840 return 0;
2841 }
2842
2843 static uint8_t adv_scan_pdu_addr_update(struct ll_adv_set *adv,
2844 struct pdu_adv *pdu,
2845 struct pdu_adv *pdu_scan)
2846 {
2847 struct pdu_adv *pdu_adv_to_update;
2848 struct lll_adv *lll;
2849
2850 pdu_adv_to_update = NULL;
2851 lll = &adv->lll;
2852
2853 if (0) {
2854 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2855 } else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2856 struct pdu_adv_com_ext_adv *pri_com_hdr;
2857 struct pdu_adv_ext_hdr pri_hdr_flags;
2858 struct pdu_adv_ext_hdr *pri_hdr;
2859
2860 pri_com_hdr = (void *)&pdu->adv_ext_ind;
2861 pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
2862 if (pri_com_hdr->ext_hdr_len) {
2863 pri_hdr_flags = *pri_hdr;
2864 } else {
2865 *(uint8_t *)&pri_hdr_flags = 0U;
2866 }
2867
2868 if (pri_com_hdr->adv_mode & BT_HCI_LE_ADV_PROP_SCAN) {
2869 struct pdu_adv *sr = lll_adv_scan_rsp_peek(lll);
2870
2871 if (!sr->len) {
2872 return BT_HCI_ERR_CMD_DISALLOWED;
2873 }
2874 }
2875
2876 /* AdvA, fill here at enable */
2877 if (pri_hdr_flags.adv_addr) {
2878 pdu_adv_to_update = pdu;
2879 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
2880 } else if (pri_hdr_flags.aux_ptr) {
2881 struct pdu_adv_com_ext_adv *sec_com_hdr;
2882 struct pdu_adv_ext_hdr sec_hdr_flags;
2883 struct pdu_adv_ext_hdr *sec_hdr;
2884 struct pdu_adv *sec_pdu;
2885
2886 sec_pdu = lll_adv_aux_data_peek(lll->aux);
2887
2888 sec_com_hdr = (void *)&sec_pdu->adv_ext_ind;
2889 sec_hdr = (void *)sec_com_hdr->ext_hdr_adv_data;
2890 if (sec_com_hdr->ext_hdr_len) {
2891 sec_hdr_flags = *sec_hdr;
2892 } else {
2893 *(uint8_t *)&sec_hdr_flags = 0U;
2894 }
2895
2896 if (sec_hdr_flags.adv_addr) {
2897 pdu_adv_to_update = sec_pdu;
2898 }
2899 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
2900 }
2901 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2902 } else {
2903 pdu_adv_to_update = pdu;
2904 }
2905
2906 if (pdu_adv_to_update) {
2907 const uint8_t *adv_addr;
2908
2909 adv_addr = ull_adv_pdu_update_addrs(adv, pdu_adv_to_update);
2910
2911 /* In case the local IRK was not set or no match was
2912 * found the fallback address was used instead, check
2913 * that a valid address has been set.
2914 */
2915 if (pdu_adv_to_update->tx_addr &&
2916 !mem_nz((void *)adv_addr, BDADDR_SIZE)) {
2917 return BT_HCI_ERR_INVALID_PARAM;
2918 }
2919
2920 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2921 /* Do not update scan response for extended non-scannable since
2922 * there may be no scan response set.
2923 */
2924 if ((pdu->type != PDU_ADV_TYPE_EXT_IND) ||
2925 (pdu->adv_ext_ind.adv_mode & BT_HCI_LE_ADV_PROP_SCAN)) {
2926 #else
2927 if (1) {
2928 #endif
2929 ull_adv_pdu_update_addrs(adv, pdu_scan);
2930 }
2931
2932 }
2933
2934 return 0;
2935 }
2936
2937 static inline uint8_t *adv_pdu_adva_get(struct pdu_adv *pdu)
2938 {
2939 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2940 struct pdu_adv_com_ext_adv *com_hdr = (void *)&pdu->adv_ext_ind;
2941 struct pdu_adv_ext_hdr *hdr = (void *)com_hdr->ext_hdr_adv_data;
2942 struct pdu_adv_ext_hdr hdr_flags;
2943
2944 if (com_hdr->ext_hdr_len) {
2945 hdr_flags = *hdr;
2946 } else {
2947 *(uint8_t *)&hdr_flags = 0U;
2948 }
2949
2950 /* All extended PDUs have AdvA at the same offset in common header */
2951 if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
2952 LL_ASSERT(hdr_flags.adv_addr);
2953
2954 return &com_hdr->ext_hdr_adv_data[1];
2955 }
2956 #endif
2957
2958 /* All legacy PDUs have AdvA at the same offset */
2959 return pdu->adv_ind.addr;
2960 }
2961
2962 static const uint8_t *adva_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
2963 {
2964 #if defined(CONFIG_BT_CTLR_PRIVACY)
2965 const uint8_t *rpa = ull_filter_adva_get(adv->lll.rl_idx);
2966 #else
2967 const uint8_t *rpa = NULL;
2968 #endif
2969 const uint8_t *own_id_addr;
2970 const uint8_t *tx_addr;
2971 uint8_t *adv_addr;
2972
2973 if (!rpa || IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)) {
2974 if (0) {
2975 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2976 } else if (ll_adv_cmds_is_ext() && pdu->tx_addr) {
2977 own_id_addr = adv->rnd_addr;
2978 #endif
2979 } else {
2980 own_id_addr = ll_addr_get(pdu->tx_addr);
2981 }
2982 }
2983
2984 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
2985 (void)memcpy(adv->own_id_addr, own_id_addr, BDADDR_SIZE);
2986 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
2987
2988 if (rpa) {
2989 pdu->tx_addr = 1;
2990 tx_addr = rpa;
2991 } else {
2992 tx_addr = own_id_addr;
2993 }
2994
2995 adv_addr = adv_pdu_adva_get(pdu);
2996 memcpy(adv_addr, tx_addr, BDADDR_SIZE);
2997
2998 return adv_addr;
2999 }
3000
3001 static void tgta_update(struct ll_adv_set *adv, struct pdu_adv *pdu)
3002 {
3003 #if defined(CONFIG_BT_CTLR_PRIVACY)
3004 const uint8_t *rx_addr = NULL;
3005 uint8_t *tgt_addr;
3006
3007 rx_addr = ull_filter_tgta_get(adv->lll.rl_idx);
3008 if (rx_addr) {
3009 pdu->rx_addr = 1;
3010
3011 /* TargetA always follows AdvA in all PDUs */
3012 tgt_addr = adv_pdu_adva_get(pdu) + BDADDR_SIZE;
3013 memcpy(tgt_addr, rx_addr, BDADDR_SIZE);
3014 }
3015 #endif
3016
3017 /* NOTE: identity TargetA is set when configuring advertising set, no
3018 * need to update if LL Privacy is not supported.
3019 */
3020 }
3021
3022 static void init_pdu(struct pdu_adv *pdu, uint8_t pdu_type)
3023 {
3024 /* TODO: Add support for extended advertising PDU if needed */
3025 pdu->type = pdu_type;
3026 pdu->rfu = 0;
3027 pdu->chan_sel = 0;
3028 pdu->tx_addr = 0;
3029 pdu->rx_addr = 0;
3030 pdu->len = BDADDR_SIZE;
3031 }
3032
3033 static void init_set(struct ll_adv_set *adv)
3034 {
3035 adv->interval = BT_LE_ADV_INTERVAL_DEFAULT;
3036 #if defined(CONFIG_BT_CTLR_PRIVACY)
3037 adv->own_addr_type = BT_ADDR_LE_PUBLIC;
3038 #endif /* CONFIG_BT_CTLR_PRIVACY */
3039 adv->lll.chan_map = BT_LE_ADV_CHAN_MAP_ALL;
3040 adv->lll.filter_policy = BT_LE_ADV_FP_NO_FILTER;
3041 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
3042 adv->delay = 0U;
3043 #endif /* ONFIG_BT_CTLR_JIT_SCHEDULING */
3044
3045 init_pdu(lll_adv_data_peek(&ll_adv[0].lll), PDU_ADV_TYPE_ADV_IND);
3046
3047 #if !defined(CONFIG_BT_CTLR_ADV_EXT)
3048 init_pdu(lll_adv_scan_rsp_peek(&ll_adv[0].lll), PDU_ADV_TYPE_SCAN_RSP);
3049 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
3050 }
3051