1 /*
2 * Copyright (c) 2017-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr.h>
8 #include <soc.h>
9 #include <bluetooth/hci.h>
10 #include <sys/byteorder.h>
11
12 #include "hal/cpu.h"
13 #include "hal/ticker.h"
14
15 #include "util/util.h"
16 #include "util/mem.h"
17 #include "util/memq.h"
18 #include "util/mayfly.h"
19
20 #include "ticker/ticker.h"
21
22 #include "pdu.h"
23
24 #include "lll.h"
25 #include "lll_clock.h"
26 #include "lll/lll_vendor.h"
27 #include "lll/lll_adv_types.h"
28 #include "lll_adv.h"
29 #include "lll/lll_adv_pdu.h"
30 #include "lll_adv_sync.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_chan.h"
33
34 #include "ull_adv_types.h"
35
36 #include "ull_internal.h"
37 #include "ull_chan_internal.h"
38 #include "ull_adv_internal.h"
39
40 #include "ll.h"
41
42 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
43 #define LOG_MODULE_NAME bt_ctlr_ull_adv_sync
44 #include "common/log.h"
45 #include "hal/debug.h"
46
47 static int init_reset(void);
48 static inline struct ll_adv_sync_set *sync_acquire(void);
49 static inline void sync_release(struct ll_adv_sync_set *sync);
50 static inline uint16_t sync_handle_get(struct ll_adv_sync_set *sync);
51 static inline uint8_t sync_remove(struct ll_adv_sync_set *sync,
52 struct ll_adv_set *adv, uint8_t enable);
53 static uint8_t sync_chm_update(uint8_t handle);
54 static uint16_t sync_time_get(struct ll_adv_sync_set *sync,
55 struct pdu_adv *pdu);
56
57 static void mfy_sync_offset_get(void *param);
58 static inline struct pdu_adv_sync_info *sync_info_get(struct pdu_adv *pdu);
59 static inline void sync_info_offset_fill(struct pdu_adv_sync_info *si,
60 uint32_t ticks_offset,
61 uint32_t start_us);
62 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
63 uint32_t remainder, uint16_t lazy, uint8_t force,
64 void *param);
65 static void ticker_op_cb(uint32_t status, void *param);
66
67 static struct ll_adv_sync_set ll_adv_sync_pool[CONFIG_BT_CTLR_ADV_SYNC_SET];
68 static void *adv_sync_free;
69
ull_adv_sync_pdu_init(struct pdu_adv * pdu,uint8_t ext_hdr_flags)70 void ull_adv_sync_pdu_init(struct pdu_adv *pdu, uint8_t ext_hdr_flags)
71 {
72 struct pdu_adv_com_ext_adv *com_hdr;
73 struct pdu_adv_ext_hdr *ext_hdr;
74 uint8_t *dptr;
75 uint8_t len;
76
77 pdu->type = PDU_ADV_TYPE_AUX_SYNC_IND;
78 pdu->rfu = 0U;
79 pdu->chan_sel = 0U;
80
81 pdu->tx_addr = 0U;
82 pdu->rx_addr = 0U;
83
84 com_hdr = &pdu->adv_ext_ind;
85 /* Non-connectable and Non-scannable adv mode */
86 com_hdr->adv_mode = 0U;
87
88 ext_hdr = &com_hdr->ext_hdr;
89 *(uint8_t *)ext_hdr = ext_hdr_flags;
90 dptr = ext_hdr->data;
91
92 LL_ASSERT(!(ext_hdr_flags & (ULL_ADV_PDU_HDR_FIELD_ADVA |
93 ULL_ADV_PDU_HDR_FIELD_TARGETA |
94 ULL_ADV_PDU_HDR_FIELD_ADI |
95 ULL_ADV_PDU_HDR_FIELD_SYNC_INFO)));
96
97 if (ext_hdr_flags & ULL_ADV_PDU_HDR_FIELD_CTE_INFO) {
98 dptr += sizeof(struct pdu_cte_info);
99 }
100 if (ext_hdr_flags & ULL_ADV_PDU_HDR_FIELD_AUX_PTR) {
101 dptr += sizeof(struct pdu_adv_aux_ptr);
102 }
103 if (ext_hdr_flags & ULL_ADV_PDU_HDR_FIELD_TX_POWER) {
104 dptr += sizeof(uint8_t);
105 }
106
107 /* Calc tertiary PDU len */
108 len = ull_adv_aux_hdr_len_calc(com_hdr, &dptr);
109 ull_adv_aux_hdr_len_fill(com_hdr, len);
110
111 pdu->len = len;
112 }
113
114 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
adv_sync_pdu_init_from_prev_pdu(struct pdu_adv * pdu,struct pdu_adv * pdu_prev,uint16_t ext_hdr_flags_add,uint16_t ext_hdr_flags_rem)115 static uint8_t adv_sync_pdu_init_from_prev_pdu(struct pdu_adv *pdu,
116 struct pdu_adv *pdu_prev,
117 uint16_t ext_hdr_flags_add,
118 uint16_t ext_hdr_flags_rem)
119 {
120 struct pdu_adv_com_ext_adv *com_hdr_prev;
121 struct pdu_adv_ext_hdr *ext_hdr_prev;
122 struct pdu_adv_com_ext_adv *com_hdr;
123 struct pdu_adv_ext_hdr *ext_hdr;
124 uint8_t ext_hdr_flags_prev;
125 uint8_t ext_hdr_flags;
126 uint8_t *dptr_prev;
127 uint8_t len_prev;
128 uint8_t *dptr;
129 uint8_t len;
130
131 /* Copy complete header, assume it was set properly in old PDU */
132 *(uint8_t *)pdu = *(uint8_t *)pdu_prev;
133
134 com_hdr_prev = &pdu_prev->adv_ext_ind;
135 com_hdr = &pdu->adv_ext_ind;
136
137 com_hdr->adv_mode = 0U;
138
139 ext_hdr_prev = &com_hdr_prev->ext_hdr;
140 ext_hdr = &com_hdr->ext_hdr;
141
142 if (com_hdr_prev->ext_hdr_len) {
143 ext_hdr_flags_prev = *(uint8_t *) ext_hdr_prev;
144 } else {
145 ext_hdr_flags_prev = 0;
146 }
147 ext_hdr_flags = ext_hdr_flags_prev |
148 (ext_hdr_flags_add & (~ext_hdr_flags_rem));
149
150 *(uint8_t *)ext_hdr = ext_hdr_flags;
151
152 LL_ASSERT(!ext_hdr->adv_addr);
153 LL_ASSERT(!ext_hdr->tgt_addr);
154 LL_ASSERT(!ext_hdr->adi);
155 LL_ASSERT(!ext_hdr->sync_info);
156
157 dptr = ext_hdr->data;
158 dptr_prev = ext_hdr_prev->data;
159
160 /* Note: skip length verification of ext header writes as we assume that
161 * all PDUs are large enough to store at least complete ext header.
162 */
163
164 /* Copy CTEInfo, if applicable */
165 if (ext_hdr->cte_info) {
166 if (ext_hdr_prev->cte_info) {
167 memcpy(dptr, dptr_prev, sizeof(struct pdu_cte_info));
168 }
169 dptr += sizeof(struct pdu_cte_info);
170 }
171 if (ext_hdr_prev->cte_info) {
172 dptr_prev += sizeof(struct pdu_cte_info);
173 }
174
175 /* Add AuxPtr, if applicable. Do not copy since it will be updated later
176 * anyway.
177 */
178 if (ext_hdr->aux_ptr) {
179 dptr += sizeof(struct pdu_adv_aux_ptr);
180 }
181 if (ext_hdr_prev->aux_ptr) {
182 dptr_prev += sizeof(struct pdu_adv_aux_ptr);
183 }
184
185 /* Copy TxPower, if applicable */
186 if (ext_hdr->tx_pwr) {
187 if (ext_hdr_prev->tx_pwr) {
188 memcpy(dptr, dptr_prev, sizeof(uint8_t));
189 }
190 dptr += sizeof(uint8_t);
191 }
192 if (ext_hdr_prev->tx_pwr) {
193 dptr_prev += sizeof(uint8_t);
194 }
195
196 LL_ASSERT(ext_hdr_prev >= 0);
197
198 /* Copy ACAD */
199 len = com_hdr_prev->ext_hdr_len - (dptr_prev - (uint8_t *)ext_hdr_prev);
200 memcpy(dptr, dptr_prev, len);
201 dptr += len;
202
203 /* Check populated ext header length excluding length itself. If 0, then
204 * there was neither field nor ACAD populated and we skip ext header
205 * entirely.
206 */
207 len = dptr - ext_hdr->data;
208 if (len == 0) {
209 com_hdr->ext_hdr_len = 0;
210 } else {
211 com_hdr->ext_hdr_len = len +
212 offsetof(struct pdu_adv_ext_hdr, data);
213 }
214
215 /* Both PDUs have now ext header length calculated properly, reset
216 * pointers to start of AD.
217 */
218 dptr = &com_hdr->ext_hdr_adv_data[com_hdr->ext_hdr_len];
219 dptr_prev = &com_hdr_prev->ext_hdr_adv_data[com_hdr_prev->ext_hdr_len];
220
221 /* Calculate length of AD to copy and AD length available in new PDU */
222 len_prev = pdu_prev->len - (dptr_prev - pdu_prev->payload);
223 len = PDU_AC_PAYLOAD_SIZE_MAX - (dptr - pdu->payload);
224
225 /* TODO: we should allow partial copy and let caller refragment data */
226 if (len < len_prev) {
227 return BT_HCI_ERR_PACKET_TOO_LONG;
228 }
229
230 /* Copy AD */
231 if (!(ext_hdr_flags_rem & ULL_ADV_PDU_HDR_FIELD_AD_DATA)) {
232 len = MIN(len, len_prev);
233 memcpy(dptr, dptr_prev, len);
234 dptr += len;
235 }
236
237 /* Finalize PDU */
238 pdu->len = dptr - pdu->payload;
239
240 return 0;
241 }
242
243 /* Note: Function made global because it is temporarily not used and causes compilation warning.
244 * It will be used when fragmentation of periodic advertising PDU is implemented.
245 */
adv_sync_pdu_ad_data_set(struct pdu_adv * pdu,const uint8_t * data,uint8_t len)246 uint8_t adv_sync_pdu_ad_data_set(struct pdu_adv *pdu, const uint8_t *data, uint8_t len)
247 {
248 struct pdu_adv_com_ext_adv *com_hdr;
249 uint8_t len_max;
250 uint8_t *dptr;
251
252 com_hdr = &pdu->adv_ext_ind;
253
254 dptr = &com_hdr->ext_hdr_adv_data[com_hdr->ext_hdr_len];
255
256 len_max = PDU_AC_PAYLOAD_SIZE_MAX - (dptr - pdu->payload);
257 /* TODO: we should allow partial copy and let caller refragment data */
258 if (len > len_max) {
259 return BT_HCI_ERR_PACKET_TOO_LONG;
260 }
261
262 memcpy(dptr, data, len);
263 dptr += len;
264
265 pdu->len = dptr - pdu->payload;
266
267 return 0;
268 }
269
ull_adv_sync_pdu_cte_info_set(struct pdu_adv * pdu,const struct pdu_cte_info * cte_info)270 uint8_t ull_adv_sync_pdu_cte_info_set(struct pdu_adv *pdu, const struct pdu_cte_info *cte_info)
271 {
272 struct pdu_adv_com_ext_adv *com_hdr;
273 struct pdu_adv_ext_hdr *ext_hdr;
274 uint8_t *dptr;
275
276 com_hdr = &pdu->adv_ext_ind;
277 ext_hdr = &com_hdr->ext_hdr;
278 dptr = ext_hdr->data;
279
280 /* Periodic adv PDUs do not have AdvA/TargetA */
281 LL_ASSERT(!ext_hdr->adv_addr);
282 LL_ASSERT(!ext_hdr->tgt_addr);
283
284 if (ext_hdr->cte_info) {
285 memcpy(dptr, cte_info, sizeof(*cte_info));
286 }
287
288 return 0;
289 }
290
adv_sync_pdu_duplicate_chain(struct pdu_adv * pdu)291 static struct pdu_adv *adv_sync_pdu_duplicate_chain(struct pdu_adv *pdu)
292 {
293 struct pdu_adv *pdu_dup = NULL;
294 uint8_t err;
295
296 while (pdu) {
297 struct pdu_adv *pdu_new;
298
299 pdu_new = lll_adv_pdu_alloc_pdu_adv();
300
301 /* We make exact copy of old PDU, there's really nothing that
302 * can go wrong there assuming original PDU was created properly
303 */
304 err = adv_sync_pdu_init_from_prev_pdu(pdu_new, pdu, 0, 0);
305 LL_ASSERT(err == 0);
306
307 if (pdu_dup) {
308 lll_adv_pdu_linked_append_end(pdu_new, pdu_dup);
309 } else {
310 pdu_dup = pdu_new;
311 }
312
313 pdu = lll_adv_pdu_linked_next_get(pdu);
314 }
315
316 return pdu_dup;
317 }
318 #endif /* CONFIG_BT_CTLR_ADV_PDU_LINK */
319
ll_adv_sync_param_set(uint8_t handle,uint16_t interval,uint16_t flags)320 uint8_t ll_adv_sync_param_set(uint8_t handle, uint16_t interval, uint16_t flags)
321 {
322 void *extra_data_prev, *extra_data;
323 struct pdu_adv *pdu_prev, *pdu;
324 struct lll_adv_sync *lll_sync;
325 struct ll_adv_sync_set *sync;
326 struct ll_adv_set *adv;
327 uint8_t err, ter_idx;
328
329 adv = ull_adv_is_created_get(handle);
330 if (!adv) {
331 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
332 }
333
334 lll_sync = adv->lll.sync;
335 if (!lll_sync) {
336 struct pdu_adv *ter_pdu;
337 struct lll_adv *lll;
338 uint8_t chm_last;
339 int err;
340
341 sync = sync_acquire();
342 if (!sync) {
343 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
344 }
345
346 lll = &adv->lll;
347 lll_sync = &sync->lll;
348 lll->sync = lll_sync;
349 lll_sync->adv = lll;
350
351 lll_adv_data_reset(&lll_sync->data);
352 err = lll_adv_data_init(&lll_sync->data);
353 if (err) {
354 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
355 }
356
357 /* NOTE: ull_hdr_init(&sync->ull); is done on start */
358 lll_hdr_init(lll_sync, sync);
359
360 err = util_aa_le32(lll_sync->access_addr);
361 LL_ASSERT(!err);
362
363 lll_sync->data_chan_id = lll_chan_id(lll_sync->access_addr);
364 chm_last = lll_sync->chm_first;
365 lll_sync->chm_last = chm_last;
366 lll_sync->chm[chm_last].data_chan_count =
367 ull_chan_map_get(lll_sync->chm[chm_last].data_chan_map);
368
369 lll_csrand_get(lll_sync->crc_init, sizeof(lll_sync->crc_init));
370
371 lll_sync->latency_prepare = 0;
372 lll_sync->latency_event = 0;
373 lll_sync->event_counter = 0;
374
375 sync->is_enabled = 0U;
376 sync->is_started = 0U;
377
378 ter_pdu = lll_adv_sync_data_peek(lll_sync, NULL);
379 ull_adv_sync_pdu_init(ter_pdu, 0);
380 } else {
381 sync = HDR_LLL2ULL(lll_sync);
382 }
383
384 /* Periodic Advertising is already started */
385 if (sync->is_started) {
386 return BT_HCI_ERR_CMD_DISALLOWED;
387 }
388
389 sync->interval = interval;
390
391 err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST, &pdu_prev, &pdu,
392 &extra_data_prev, &extra_data, &ter_idx);
393 if (err) {
394 return err;
395 }
396
397 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
398 if (extra_data) {
399 ull_adv_sync_extra_data_set_clear(extra_data_prev, extra_data,
400 0, 0, NULL);
401 }
402 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
403
404 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu, 0, 0, NULL);
405 if (err) {
406 return err;
407 }
408
409 lll_adv_sync_data_enqueue(lll_sync, ter_idx);
410
411 return 0;
412 }
413
ll_adv_sync_ad_data_set(uint8_t handle,uint8_t op,uint8_t len,uint8_t const * const data)414 uint8_t ll_adv_sync_ad_data_set(uint8_t handle, uint8_t op, uint8_t len,
415 uint8_t const *const data)
416 {
417 uint8_t hdr_data[ULL_ADV_HDR_DATA_LEN_SIZE +
418 ULL_ADV_HDR_DATA_DATA_PTR_SIZE];
419 void *extra_data_prev, *extra_data;
420 struct pdu_adv *pdu_prev, *pdu;
421 struct lll_adv_sync *lll_sync;
422 struct ll_adv_sync_set *sync;
423 struct ll_adv_set *adv;
424 uint8_t ter_idx;
425 uint8_t err;
426
427 /* TODO: handle other op values */
428 if (op != BT_HCI_LE_EXT_ADV_OP_COMPLETE_DATA &&
429 op != BT_HCI_LE_EXT_ADV_OP_UNCHANGED_DATA) {
430 /* FIXME: error code */
431 return BT_HCI_ERR_CMD_DISALLOWED;
432 }
433
434 adv = ull_adv_is_created_get(handle);
435 if (!adv) {
436 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
437 }
438
439 lll_sync = adv->lll.sync;
440 if (!lll_sync) {
441 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
442 }
443
444 hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET] = len;
445 (void)memcpy((void *)&hdr_data[ULL_ADV_HDR_DATA_DATA_PTR_OFFSET], &data,
446 ULL_ADV_HDR_DATA_DATA_PTR_SIZE);
447
448 err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST, &pdu_prev, &pdu,
449 &extra_data_prev, &extra_data, &ter_idx);
450 if (err) {
451 return err;
452 }
453
454 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
455 if (extra_data) {
456 ull_adv_sync_extra_data_set_clear(extra_data_prev, extra_data,
457 ULL_ADV_PDU_HDR_FIELD_AD_DATA, 0, NULL);
458 }
459 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
460
461 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu, ULL_ADV_PDU_HDR_FIELD_AD_DATA, 0,
462 hdr_data);
463 if (err) {
464 return err;
465 }
466
467 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
468 /* alloc() will return the same PDU as peek() in case there was PDU
469 * queued but not switched to current before alloc() - no need to deal
470 * with chain as it's already there. In other case we need to duplicate
471 * chain from current PDU and append it to new PDU.
472 */
473 if (pdu != pdu_prev) {
474 struct pdu_adv *next, *next_dup;
475
476 LL_ASSERT(lll_adv_pdu_linked_next_get(pdu) == NULL);
477
478 next = lll_adv_pdu_linked_next_get(pdu_prev);
479 next_dup = adv_sync_pdu_duplicate_chain(next);
480
481 lll_adv_pdu_linked_append(next_dup, pdu);
482 }
483 #endif /* CONFIG_BT_CTLR_ADV_PDU_LINK */
484
485 sync = HDR_LLL2ULL(lll_sync);
486 if (sync->is_started) {
487 err = ull_adv_sync_time_update(sync, pdu);
488 if (err) {
489 return err;
490 }
491 }
492
493 lll_adv_sync_data_enqueue(lll_sync, ter_idx);
494
495 return err;
496 }
497
ll_adv_sync_enable(uint8_t handle,uint8_t enable)498 uint8_t ll_adv_sync_enable(uint8_t handle, uint8_t enable)
499 {
500 struct lll_adv_sync *lll_sync;
501 struct ll_adv_sync_set *sync;
502 uint8_t sync_got_enabled;
503 struct ll_adv_set *adv;
504 uint8_t pri_idx;
505 uint8_t err;
506
507 adv = ull_adv_is_created_get(handle);
508 if (!adv) {
509 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
510 }
511
512 lll_sync = adv->lll.sync;
513 if (!lll_sync) {
514 return BT_HCI_ERR_CMD_DISALLOWED;
515 }
516
517 /* TODO: Add Periodic Advertising ADI Support feature */
518 if (enable > 1U) {
519 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
520 }
521
522 sync = HDR_LLL2ULL(lll_sync);
523
524 if (!enable) {
525 if (!sync->is_enabled) {
526 return BT_HCI_ERR_CMD_DISALLOWED;
527 }
528
529 if (!sync->is_started) {
530 sync->is_enabled = 0U;
531
532 return 0;
533 }
534
535 err = sync_remove(sync, adv, 0U);
536 return err;
537 }
538
539 /* TODO: Check for periodic data being complete */
540
541 /* TODO: Check packet too long */
542
543 sync_got_enabled = 0U;
544 if (sync->is_enabled) {
545 /* TODO: Enabling an already enabled advertising changes its
546 * random address.
547 */
548 } else {
549 sync_got_enabled = 1U;
550 }
551
552 if (adv->is_enabled && !sync->is_started) {
553 struct pdu_adv_sync_info *sync_info;
554 uint8_t value[1 + sizeof(sync_info)];
555 uint32_t ticks_slot_overhead_aux;
556 struct lll_adv_aux *lll_aux;
557 struct ll_adv_aux_set *aux;
558 uint32_t ticks_anchor_sync;
559 uint32_t ticks_anchor_aux;
560 uint32_t ret;
561
562 lll_aux = adv->lll.aux;
563
564 /* Add sync_info into auxiliary PDU */
565 err = ull_adv_aux_hdr_set_clear(adv,
566 ULL_ADV_PDU_HDR_FIELD_SYNC_INFO,
567 0, value, NULL, &pri_idx);
568 if (err) {
569 return err;
570 }
571
572 /* First byte in the length-value encoded parameter is size of
573 * sync_info structure, followed by pointer to sync_info in the
574 * PDU.
575 */
576 memcpy(&sync_info, &value[1], sizeof(sync_info));
577 ull_adv_sync_info_fill(sync, sync_info);
578
579 if (lll_aux) {
580 /* FIXME: Find absolute ticks until after auxiliary PDU
581 * on air to place the periodic advertising PDU.
582 */
583 ticks_anchor_aux = 0U; /* unused in this path */
584 ticks_slot_overhead_aux = 0U; /* unused in this path */
585 ticks_anchor_sync = ticker_ticks_now_get();
586 aux = NULL;
587 } else {
588 lll_aux = adv->lll.aux;
589 aux = HDR_LLL2ULL(lll_aux);
590 ticks_anchor_aux = ticker_ticks_now_get();
591 ticks_slot_overhead_aux = ull_adv_aux_evt_init(aux);
592 ticks_anchor_sync =
593 ticks_anchor_aux + ticks_slot_overhead_aux +
594 aux->ull.ticks_slot +
595 HAL_TICKER_US_TO_TICKS(EVENT_MAFS_US);
596 }
597
598 ret = ull_adv_sync_start(adv, sync, ticks_anchor_sync);
599 if (ret) {
600 sync_remove(sync, adv, 1U);
601
602 return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
603 }
604
605 sync->is_started = 1U;
606
607 lll_adv_data_enqueue(&adv->lll, pri_idx);
608
609 if (aux) {
610 /* Keep aux interval equal or higher than primary PDU
611 * interval.
612 */
613 aux->interval = adv->interval +
614 (HAL_TICKER_TICKS_TO_US(
615 ULL_ADV_RANDOM_DELAY) /
616 ADV_INT_UNIT_US);
617
618 ret = ull_adv_aux_start(aux, ticks_anchor_aux,
619 ticks_slot_overhead_aux);
620 if (ret) {
621 sync_remove(sync, adv, 1U);
622
623 return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
624 }
625
626 aux->is_started = 1U;
627 }
628 }
629
630 if (sync_got_enabled) {
631 sync->is_enabled = sync_got_enabled;
632 }
633
634 return 0;
635 }
636
ull_adv_sync_init(void)637 int ull_adv_sync_init(void)
638 {
639 int err;
640
641 err = init_reset();
642 if (err) {
643 return err;
644 }
645
646 return 0;
647 }
648
ull_adv_sync_reset(void)649 int ull_adv_sync_reset(void)
650 {
651 struct lll_adv_sync *lll_sync;
652 struct ll_adv_sync_set *sync;
653 struct ll_adv_set *adv;
654 uint8_t handle;
655 int err;
656
657 for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
658 adv = ull_adv_is_created_get(handle);
659 if (!adv) {
660 continue;
661 }
662
663 lll_sync = adv->lll.sync;
664 if (!lll_sync) {
665 continue;
666 }
667
668 sync = HDR_LLL2ULL(lll_sync);
669
670 if (!sync->is_started) {
671 sync->is_enabled = 0U;
672
673 continue;
674 }
675
676 err = sync_remove(sync, adv, 0U);
677 if (err) {
678 return err;
679 }
680 }
681
682 return 0;
683 }
684
ull_adv_sync_reset_finalize(void)685 int ull_adv_sync_reset_finalize(void)
686 {
687 int err;
688
689 err = init_reset();
690 if (err) {
691 return err;
692 }
693
694 return 0;
695 }
696
ull_adv_sync_lll_handle_get(struct lll_adv_sync * lll)697 uint16_t ull_adv_sync_lll_handle_get(struct lll_adv_sync *lll)
698 {
699 return sync_handle_get((void *)lll->hdr.parent);
700 }
701
ull_adv_sync_release(struct ll_adv_sync_set * sync)702 void ull_adv_sync_release(struct ll_adv_sync_set *sync)
703 {
704 lll_adv_sync_data_release(&sync->lll);
705 sync_release(sync);
706 }
707
ull_adv_sync_start(struct ll_adv_set * adv,struct ll_adv_sync_set * sync,uint32_t ticks_anchor)708 uint32_t ull_adv_sync_start(struct ll_adv_set *adv,
709 struct ll_adv_sync_set *sync,
710 uint32_t ticks_anchor)
711 {
712 struct lll_adv_sync *lll_sync;
713 uint32_t ticks_slot_overhead;
714 uint32_t ticks_slot_offset;
715 uint32_t volatile ret_cb;
716 struct pdu_adv *ter_pdu;
717 uint32_t interval_us;
718 uint8_t sync_handle;
719 uint32_t time_us;
720 uint32_t ret;
721
722 ull_hdr_init(&sync->ull);
723
724 lll_sync = &sync->lll;
725 ter_pdu = lll_adv_sync_data_peek(lll_sync, NULL);
726
727 /* Calculate the PDU Tx Time and hence the radio event length */
728 time_us = sync_time_get(sync, ter_pdu);
729
730 /* TODO: active_to_start feature port */
731 sync->ull.ticks_active_to_start = 0U;
732 sync->ull.ticks_prepare_to_start =
733 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
734 sync->ull.ticks_preempt_to_start =
735 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
736 sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS(time_us);
737
738 ticks_slot_offset = MAX(sync->ull.ticks_active_to_start,
739 sync->ull.ticks_prepare_to_start);
740 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
741 ticks_slot_overhead = ticks_slot_offset;
742 } else {
743 ticks_slot_overhead = 0U;
744 }
745 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
746
747 interval_us = (uint32_t)sync->interval * CONN_INT_UNIT_US;
748
749 sync_handle = sync_handle_get(sync);
750
751 ret_cb = TICKER_STATUS_BUSY;
752 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
753 (TICKER_ID_ADV_SYNC_BASE + sync_handle),
754 (ticks_anchor - ticks_slot_offset), 0U,
755 HAL_TICKER_US_TO_TICKS(interval_us),
756 HAL_TICKER_REMAINDER(interval_us), TICKER_NULL_LAZY,
757 (sync->ull.ticks_slot + ticks_slot_overhead),
758 ticker_cb, sync,
759 ull_ticker_status_give, (void *)&ret_cb);
760 ret = ull_ticker_status_take(ret, &ret_cb);
761
762 return ret;
763 }
764
ull_adv_sync_time_update(struct ll_adv_sync_set * sync,struct pdu_adv * pdu)765 uint8_t ull_adv_sync_time_update(struct ll_adv_sync_set *sync,
766 struct pdu_adv *pdu)
767 {
768 uint32_t volatile ret_cb;
769 uint32_t ticks_minus;
770 uint32_t ticks_plus;
771 uint32_t time_ticks;
772 uint16_t time_us;
773 uint32_t ret;
774
775 time_us = sync_time_get(sync, pdu);
776 time_ticks = HAL_TICKER_US_TO_TICKS(time_us);
777 if (sync->ull.ticks_slot > time_ticks) {
778 ticks_minus = sync->ull.ticks_slot - time_ticks;
779 ticks_plus = 0U;
780 } else if (sync->ull.ticks_slot < time_ticks) {
781 ticks_minus = 0U;
782 ticks_plus = time_ticks - sync->ull.ticks_slot;
783 } else {
784 return BT_HCI_ERR_SUCCESS;
785 }
786
787 ret_cb = TICKER_STATUS_BUSY;
788 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
789 TICKER_USER_ID_THREAD,
790 (TICKER_ID_ADV_SYNC_BASE + sync_handle_get(sync)),
791 0, 0, ticks_plus, ticks_minus, 0, 0,
792 ull_ticker_status_give, (void *)&ret_cb);
793 ret = ull_ticker_status_take(ret, &ret_cb);
794 if (ret != TICKER_STATUS_SUCCESS) {
795 return BT_HCI_ERR_CMD_DISALLOWED;
796 }
797
798 sync->ull.ticks_slot = time_ticks;
799
800 return BT_HCI_ERR_SUCCESS;
801 }
802
ull_adv_sync_chm_update(void)803 uint8_t ull_adv_sync_chm_update(void)
804 {
805 uint8_t handle;
806
807 handle = CONFIG_BT_CTLR_ADV_SYNC_SET;
808 while (handle--) {
809 (void)sync_chm_update(handle);
810 }
811
812 /* TODO: Should failure due to Channel Map Update being already in
813 * progress be returned to caller?
814 */
815 return 0;
816 }
817
ull_adv_sync_chm_complete(struct node_rx_hdr * rx)818 void ull_adv_sync_chm_complete(struct node_rx_hdr *rx)
819 {
820 uint8_t hdr_data[ULL_ADV_HDR_DATA_LEN_SIZE +
821 ULL_ADV_HDR_DATA_ACAD_PTR_SIZE];
822 struct lll_adv_sync *lll_sync;
823 struct pdu_adv *pdu_prev;
824 struct ll_adv_set *adv;
825 struct pdu_adv *pdu;
826 uint8_t others_len;
827 uint8_t acad_len;
828 uint8_t *others;
829 uint8_t ter_idx;
830 uint8_t ad_len;
831 uint8_t *acad;
832 uint8_t *ad;
833 uint8_t len;
834 uint8_t err;
835
836 /* Allocate next Sync PDU */
837 pdu_prev = NULL;
838 pdu = NULL;
839 lll_sync = rx->rx_ftr.param;
840 adv = HDR_LLL2ULL(lll_sync->adv);
841 err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
842 &pdu_prev, &pdu, NULL, NULL, &ter_idx);
843 LL_ASSERT(!err);
844
845 /* Get the size of current ACAD, first octet returns the old length and
846 * followed by pointer to previous offset to ACAD in the PDU.
847 */
848 hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET] = 0U;
849 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu,
850 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
851 &hdr_data);
852 LL_ASSERT(!err);
853
854 /* Dev assert if ACAD empty */
855 LL_ASSERT(hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET]);
856
857 /* Get the pointer, prev content and size of current ACAD */
858 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu,
859 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
860 &hdr_data);
861 LL_ASSERT(!err);
862
863 /* Find the Channel Map Update Indication */
864 acad_len = hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET];
865 len = acad_len;
866 (void)memcpy(&acad, &hdr_data[ULL_ADV_HDR_DATA_ACAD_PTR_OFFSET],
867 sizeof(acad));
868 ad = acad;
869 do {
870 ad_len = ad[0];
871 if (ad_len && (ad[1] == BT_DATA_CHANNEL_MAP_UPDATE_IND)) {
872 break;
873 }
874
875 ad_len += 1U;
876
877 LL_ASSERT(ad_len < len);
878
879 ad += ad_len;
880 len -= ad_len;
881 } while (len);
882
883 /* Remove Channel Map Update Indication by moving other AD types that
884 * are after it.
885 */
886 ad_len += 1U;
887 others = ad + ad_len;
888 acad_len -= ad_len;
889 others_len = acad_len - (ad - acad);
890 (void)memmove(ad, others, others_len);
891
892 /* Adjust the next PDU for ACAD length, this is done by using the next
893 * PDU to copy ACAD into same next PDU.
894 */
895 hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET] = acad_len;
896 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu, pdu,
897 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
898 &hdr_data);
899 LL_ASSERT(!err);
900
901 lll_adv_sync_data_enqueue(lll_sync, ter_idx);
902 }
903
ull_adv_sync_info_fill(struct ll_adv_sync_set * sync,struct pdu_adv_sync_info * si)904 void ull_adv_sync_info_fill(struct ll_adv_sync_set *sync,
905 struct pdu_adv_sync_info *si)
906 {
907 struct lll_adv_sync *lll_sync;
908
909 /* NOTE: sync offset and offset unit filled by secondary prepare.
910 *
911 * If sync_info is part of ADV PDU the offs_adjust field
912 * is always set to 0.
913 */
914 si->offs_units = OFFS_UNIT_VALUE_30_US;
915 si->offs_adjust = 0U;
916 si->offs = 0U;
917
918 /* Fill the interval, access address and CRC init */
919 si->interval = sys_cpu_to_le16(sync->interval);
920 lll_sync = &sync->lll;
921 memcpy(&si->aa, lll_sync->access_addr, sizeof(si->aa));
922 memcpy(si->crc_init, lll_sync->crc_init, sizeof(si->crc_init));
923
924 /* NOTE: Filled by secondary prepare */
925 si->evt_cntr = 0U;
926 }
927
ull_adv_sync_offset_get(struct ll_adv_set * adv)928 void ull_adv_sync_offset_get(struct ll_adv_set *adv)
929 {
930 static memq_link_t link;
931 static struct mayfly mfy = {0, 0, &link, NULL, mfy_sync_offset_get};
932 uint32_t ret;
933
934 mfy.param = adv;
935 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
936 &mfy);
937 LL_ASSERT(!ret);
938 }
939
ull_adv_sync_pdu_alloc(struct ll_adv_set * adv,enum ull_adv_pdu_extra_data_flag extra_data_flag,struct pdu_adv ** ter_pdu_prev,struct pdu_adv ** ter_pdu_new,void ** extra_data_prev,void ** extra_data_new,uint8_t * ter_idx)940 uint8_t ull_adv_sync_pdu_alloc(struct ll_adv_set *adv,
941 enum ull_adv_pdu_extra_data_flag extra_data_flag,
942 struct pdu_adv **ter_pdu_prev, struct pdu_adv **ter_pdu_new,
943 void **extra_data_prev, void **extra_data_new, uint8_t *ter_idx)
944 {
945 struct pdu_adv *pdu_prev, *pdu_new;
946 struct lll_adv_sync *lll_sync;
947 void *ed_prev;
948 #if defined(CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY)
949 void *ed_new;
950 #endif
951
952 lll_sync = adv->lll.sync;
953 if (!lll_sync) {
954 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
955 }
956
957 /* Get reference to previous periodic advertising PDU data */
958 pdu_prev = lll_adv_sync_data_peek(lll_sync, &ed_prev);
959
960 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
961 /* Get reference to new periodic advertising PDU data buffer */
962 if (extra_data_flag == ULL_ADV_PDU_EXTRA_DATA_ALLOC_ALWAYS ||
963 (extra_data_flag == ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST && ed_prev)) {
964 /* If there was an extra data in past PDU data or it is required
965 * by the hdr_add_fields then allocate memmory for it.
966 */
967 pdu_new = lll_adv_sync_data_alloc(lll_sync, &ed_new,
968 ter_idx);
969 if (!pdu_new) {
970 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
971 }
972 } else {
973 ed_new = NULL;
974 #else
975 {
976 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
977 pdu_new = lll_adv_sync_data_alloc(lll_sync, NULL, ter_idx);
978 if (!pdu_new) {
979 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
980 }
981 }
982
983 #if defined(CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY)
984 if (extra_data_prev) {
985 *extra_data_prev = ed_prev;
986 }
987 if (extra_data_new) {
988 *extra_data_new = ed_new;
989 }
990 #endif /* CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY */
991
992 *ter_pdu_prev = pdu_prev;
993 *ter_pdu_new = pdu_new;
994
995 return 0;
996 }
997
998 /* @brief Set or clear fields in extended advertising header and store
999 * extra_data if requested.
1000 *
1001 * @param[in] lll_sync Reference to periodic advertising sync.
1002 * @param[in] ter_pdu_prev Pointer to previous PDU.
1003 * @param[in] ter_pdu_ Pointer to PDU to fill fileds.
1004 * @param[in] hdr_add_fields Flag with information which fields add.
1005 * @param[in] hdr_rem_fields Flag with information which fields remove.
1006 * @param[in] hdr_data Pointer to data to be added to header. Content
1007 * depends on the value of @p hdr_add_fields.
1008 *
1009 * @Note
1010 * @p hdr_data content depends on the flag provided by @p hdr_add_fields:
1011 * - ULL_ADV_PDU_HDR_FIELD_CTE_INFO:
1012 * # @p hdr_data points to single byte with CTEInfo field
1013 * - ULL_ADV_PDU_HDR_FIELD_AD_DATA:
1014 * # @p hdr_data points to memory where first byte
1015 * is size of advertising data, following byte is a pointer to actual
1016 * advertising data.
1017 * - ULL_ADV_PDU_HDR_FIELD_AUX_PTR:
1018 * # @p hdr_data parameter is not used
1019 * - ULL_ADV_PDU_HDR_FIELD_ACAD:
1020 * # @p hdr_data points to memory where first byte is size of ACAD, second
1021 * byte is used to return offset to ACAD field.
1022 * # @p hdr_data memory returns previous ACAD length back in the first byte
1023 * and offset to new ACAD in the next PDU.
1024 *
1025 * @return Zero in case of success, other value in case of failure.
1026 */
1027 uint8_t ull_adv_sync_pdu_set_clear(struct lll_adv_sync *lll_sync,
1028 struct pdu_adv *ter_pdu_prev,
1029 struct pdu_adv *ter_pdu,
1030 uint16_t hdr_add_fields,
1031 uint16_t hdr_rem_fields,
1032 void *hdr_data)
1033 {
1034 struct pdu_adv_com_ext_adv *ter_com_hdr, *ter_com_hdr_prev;
1035 struct pdu_adv_ext_hdr ter_hdr = { 0 }, ter_hdr_prev = { 0 };
1036 uint8_t *ter_dptr, *ter_dptr_prev;
1037 uint8_t acad_len_prev;
1038 uint8_t ter_len_prev;
1039 uint8_t hdr_buf_len;
1040 uint16_t ter_len;
1041 uint8_t *ad_data;
1042 uint8_t acad_len;
1043 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1044 uint8_t cte_info;
1045 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1046 uint8_t ad_len;
1047
1048 /* Get common pointers from reference to previous tertiary PDU data */
1049 ter_com_hdr_prev = (void *)&ter_pdu_prev->adv_ext_ind;
1050 if (ter_com_hdr_prev->ext_hdr_len != 0) {
1051 ter_hdr_prev = ter_com_hdr_prev->ext_hdr;
1052 }
1053 ter_dptr_prev = ter_com_hdr_prev->ext_hdr.data;
1054
1055 /* Set common fields in reference to new tertiary PDU data buffer */
1056 ter_pdu->type = ter_pdu_prev->type;
1057 ter_pdu->rfu = 0U;
1058 ter_pdu->chan_sel = 0U;
1059
1060 ter_pdu->tx_addr = ter_pdu_prev->tx_addr;
1061 ter_pdu->rx_addr = ter_pdu_prev->rx_addr;
1062
1063 /* Get common pointers from current tertiary PDU data.
1064 * It is possbile that the current tertiary is the same as
1065 * previous one. It may happen if update periodic advertising
1066 * chain in place.
1067 */
1068 ter_com_hdr = (void *)&ter_pdu->adv_ext_ind;
1069 ter_com_hdr->adv_mode = ter_com_hdr_prev->adv_mode;
1070 ter_dptr = ter_com_hdr->ext_hdr.data;
1071
1072 /* No AdvA in AUX_SYNC_IND */
1073 /* No TargetA in AUX_SYNC_IND */
1074
1075 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1076 /* If requested add or update CTEInfo */
1077 if (hdr_add_fields & ULL_ADV_PDU_HDR_FIELD_CTE_INFO) {
1078 ter_hdr.cte_info = 1;
1079 cte_info = *(uint8_t *)hdr_data;
1080 hdr_data = (uint8_t *)hdr_data + 1;
1081 ter_dptr += sizeof(struct pdu_cte_info);
1082 /* If CTEInfo exists in prev and is not requested to be removed */
1083 } else if (!(hdr_rem_fields & ULL_ADV_PDU_HDR_FIELD_CTE_INFO) &&
1084 ter_hdr_prev.cte_info) {
1085 ter_hdr.cte_info = 1;
1086 ter_dptr += sizeof(struct pdu_cte_info);
1087 }
1088
1089 /* If CTEInfo exists in prev PDU */
1090 if (ter_hdr_prev.cte_info) {
1091 ter_dptr_prev += sizeof(struct pdu_cte_info);
1092 }
1093 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1094
1095 /* No ADI in AUX_SYNC_IND */
1096
1097 /* AuxPtr - will be added if AUX_CHAIN_IND is required */
1098 if ((hdr_add_fields & ULL_ADV_PDU_HDR_FIELD_AUX_PTR) ||
1099 (!(hdr_rem_fields & ULL_ADV_PDU_HDR_FIELD_AUX_PTR) &&
1100 ter_hdr_prev.aux_ptr)) {
1101 ter_hdr.aux_ptr = 1;
1102 }
1103 if (ter_hdr.aux_ptr) {
1104 ter_dptr += sizeof(struct pdu_adv_aux_ptr);
1105 }
1106 if (ter_hdr_prev.aux_ptr) {
1107 ter_dptr_prev += sizeof(struct pdu_adv_aux_ptr);
1108 }
1109
1110 /* No SyncInfo in AUX_SYNC_IND */
1111
1112 /* Tx Power flag */
1113 if (ter_hdr_prev.tx_pwr) {
1114 ter_dptr_prev++;
1115
1116 ter_hdr.tx_pwr = 1;
1117 ter_dptr++;
1118 }
1119
1120 /* Calc previous ACAD len and update PDU len */
1121 ter_len_prev = ter_dptr_prev - (uint8_t *)ter_com_hdr_prev;
1122 hdr_buf_len = ter_com_hdr_prev->ext_hdr_len +
1123 PDU_AC_EXT_HEADER_SIZE_MIN;
1124 if (ter_len_prev <= hdr_buf_len) {
1125 /* There are some data, except ACAD, in extended header if ter_len_prev
1126 * equals to hdr_buf_len. There is ACAD if the size of ter_len_prev
1127 * is smaller than hdr_buf_len.
1128 */
1129 acad_len_prev = hdr_buf_len - ter_len_prev;
1130 ter_len_prev += acad_len_prev;
1131 ter_dptr_prev += acad_len_prev;
1132 } else {
1133 /* There are no data in extended header, all flags are zeros. */
1134 acad_len_prev = 0;
1135 /* NOTE: If no flags are set then extended header length will be
1136 * zero. Under this condition the current ter_len_prev
1137 * value will be greater than extended header length,
1138 * hence set ter_len_prev to size of the length/mode
1139 * field.
1140 */
1141 ter_len_prev = PDU_AC_EXT_HEADER_SIZE_MIN;
1142 ter_dptr_prev = (uint8_t *)ter_com_hdr_prev + ter_len_prev;
1143 }
1144
1145 /* Did we parse beyond PDU length? */
1146 if (ter_len_prev > ter_pdu_prev->len) {
1147 /* we should not encounter invalid length */
1148 return BT_HCI_ERR_UNSPECIFIED;
1149 }
1150
1151 /* Add/Retain/Remove ACAD */
1152 if (hdr_add_fields & ULL_ADV_PDU_HDR_FIELD_ACAD) {
1153 acad_len = *(uint8_t *)hdr_data;
1154 /* return prev ACAD length */
1155 *(uint8_t *)hdr_data = acad_len_prev;
1156 hdr_data = (uint8_t *)hdr_data + 1;
1157 /* return the pointer to ACAD offset */
1158 memcpy(hdr_data, &ter_dptr, sizeof(ter_dptr));
1159 hdr_data = (uint8_t *)hdr_data + sizeof(ter_dptr);
1160 ter_dptr += acad_len;
1161 } else if (!(hdr_rem_fields & ULL_ADV_PDU_HDR_FIELD_ACAD)) {
1162 acad_len = acad_len_prev;
1163 ter_dptr += acad_len_prev;
1164 } else {
1165 acad_len = 0U;
1166 }
1167
1168 /* Calc current tertiary PDU len */
1169 ter_len = ull_adv_aux_hdr_len_calc(ter_com_hdr, &ter_dptr);
1170 ull_adv_aux_hdr_len_fill(ter_com_hdr, ter_len);
1171
1172 /* Get Adv data from function parameters */
1173 if (hdr_add_fields & ULL_ADV_PDU_HDR_FIELD_AD_DATA) {
1174 ad_data = hdr_data;
1175 ad_len = *ad_data;
1176 ++ad_data;
1177
1178 ad_data = (void *)sys_get_le32(ad_data);
1179 } else if (!(hdr_rem_fields & ULL_ADV_PDU_HDR_FIELD_AD_DATA)) {
1180 ad_len = ter_pdu_prev->len - ter_len_prev;
1181 ad_data = ter_dptr_prev;
1182 } else {
1183 ad_len = 0;
1184 ad_data = NULL;
1185 }
1186
1187 /* Add AD len to tertiary PDU length */
1188 ter_len += ad_len;
1189
1190 /* Check AdvData overflow */
1191 if (ter_len > PDU_AC_PAYLOAD_SIZE_MAX) {
1192 return BT_HCI_ERR_PACKET_TOO_LONG;
1193 }
1194
1195 /* set the tertiary PDU len */
1196 ter_pdu->len = ter_len;
1197
1198 /* Start filling tertiary PDU payload based on flags from here
1199 * ==============================================================
1200 */
1201
1202 /* Fill AdvData in tertiary PDU */
1203 memmove(ter_dptr, ad_data, ad_len);
1204
1205 /* Early exit if no flags set */
1206 if (!ter_com_hdr->ext_hdr_len) {
1207 return 0;
1208 }
1209
1210 /* Retain ACAD in tertiary PDU */
1211 ter_dptr_prev -= acad_len_prev;
1212 if (acad_len) {
1213 ter_dptr -= acad_len;
1214 memmove(ter_dptr, ter_dptr_prev, acad_len_prev);
1215 }
1216
1217 /* Tx Power */
1218 if (ter_hdr.tx_pwr) {
1219 *--ter_dptr = *--ter_dptr_prev;
1220 }
1221
1222 /* No SyncInfo in AUX_SYNC_IND */
1223
1224 /* AuxPtr */
1225 if (ter_hdr.aux_ptr) {
1226 /* ToDo Update setup of aux_ptr - check documentation */
1227 if (ter_hdr_prev.aux_ptr) {
1228 ter_dptr_prev -= sizeof(struct pdu_adv_aux_ptr);
1229 ter_dptr -= sizeof(struct pdu_adv_aux_ptr);
1230 memmove(ter_dptr, ter_dptr_prev,
1231 sizeof(struct pdu_adv_aux_ptr));
1232 } else {
1233 ull_adv_aux_ptr_fill(&ter_dptr, lll_sync->adv->phy_s);
1234 }
1235 }
1236
1237 /* No ADI in AUX_SYNC_IND*/
1238
1239 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1240 if (ter_hdr.cte_info) {
1241 if (hdr_add_fields & ULL_ADV_PDU_HDR_FIELD_CTE_INFO) {
1242 *--ter_dptr = cte_info;
1243 } else {
1244 *--ter_dptr = *--ter_dptr_prev;
1245 }
1246 }
1247 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1248
1249 /* No TargetA in AUX_SYNC_IND */
1250 /* No AdvA in AUX_SYNC_IND */
1251
1252 if (ter_com_hdr->ext_hdr_len != 0) {
1253 ter_com_hdr->ext_hdr = ter_hdr;
1254 }
1255
1256 return 0;
1257 }
1258
1259 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1260 /* @brief Set or clear fields in extended advertising header and store
1261 * extra_data if requested.
1262 *
1263 * @param[in] extra_data_prev Pointer to previous content of extra_data.
1264 * @param[in] hdr_add_fields Flag with information which fields add.
1265 * @param[in] hdr_rem_fields Flag with information which fields remove.
1266 * @param[in] data Pointer to data to be stored in extra_data.
1267 * Content depends on the data depends on
1268 * @p hdr_add_fields.
1269 *
1270 * @Note
1271 * @p data depends on the flag provided by @p hdr_add_fields.
1272 * Information about content of value may be found in description of
1273 * @ref ull_adv_sync_pdu_set_clear.
1274 *
1275 * @return Zero in case of success, other value in case of failure.
1276 */
1277 void ull_adv_sync_extra_data_set_clear(void *extra_data_prev,
1278 void *extra_data_new,
1279 uint16_t hdr_add_fields,
1280 uint16_t hdr_rem_fields,
1281 void *data)
1282 {
1283 /* Currently only CTE enable requires extra_data. Due to that fact
1284 * CTE additional data are just copied to extra_data memory.
1285 */
1286 if (hdr_add_fields & ULL_ADV_PDU_HDR_FIELD_CTE_INFO) {
1287 memcpy(extra_data_new, data, sizeof(struct lll_df_adv_cfg));
1288 } else if (!(hdr_rem_fields & ULL_ADV_PDU_HDR_FIELD_CTE_INFO) ||
1289 extra_data_prev) {
1290 memmove(extra_data_new, extra_data_prev,
1291 sizeof(struct lll_df_adv_cfg));
1292 }
1293 }
1294 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1295
1296 static int init_reset(void)
1297 {
1298 /* Initialize adv sync pool. */
1299 mem_init(ll_adv_sync_pool, sizeof(struct ll_adv_sync_set),
1300 sizeof(ll_adv_sync_pool) / sizeof(struct ll_adv_sync_set),
1301 &adv_sync_free);
1302
1303 return 0;
1304 }
1305
1306 static inline struct ll_adv_sync_set *sync_acquire(void)
1307 {
1308 return mem_acquire(&adv_sync_free);
1309 }
1310
1311 static inline void sync_release(struct ll_adv_sync_set *sync)
1312 {
1313 mem_release(sync, &adv_sync_free);
1314 }
1315
1316 static inline uint16_t sync_handle_get(struct ll_adv_sync_set *sync)
1317 {
1318 return mem_index_get(sync, ll_adv_sync_pool,
1319 sizeof(struct ll_adv_sync_set));
1320 }
1321
1322 static uint8_t sync_stop(struct ll_adv_sync_set *sync)
1323 {
1324 uint8_t sync_handle;
1325 int err;
1326
1327 sync_handle = sync_handle_get(sync);
1328
1329 err = ull_ticker_stop_with_mark(TICKER_ID_ADV_SYNC_BASE + sync_handle,
1330 sync, &sync->lll);
1331 LL_ASSERT(err == 0 || err == -EALREADY);
1332 if (err) {
1333 return BT_HCI_ERR_CMD_DISALLOWED;
1334 }
1335
1336 return 0;
1337 }
1338
1339 static inline uint8_t sync_remove(struct ll_adv_sync_set *sync,
1340 struct ll_adv_set *adv, uint8_t enable)
1341 {
1342 uint8_t pri_idx;
1343 uint8_t err;
1344
1345 /* Remove sync_info from auxiliary PDU */
1346 err = ull_adv_aux_hdr_set_clear(adv, 0,
1347 ULL_ADV_PDU_HDR_FIELD_SYNC_INFO,
1348 NULL, NULL, &pri_idx);
1349 if (err) {
1350 return err;
1351 }
1352
1353 lll_adv_data_enqueue(&adv->lll, pri_idx);
1354
1355 if (sync->is_started) {
1356 /* TODO: we removed sync info, but if sync_stop() fails, what do
1357 * we do?
1358 */
1359 err = sync_stop(sync);
1360 if (err) {
1361 return err;
1362 }
1363
1364 sync->is_started = 0U;
1365 }
1366
1367 if (!enable) {
1368 sync->is_enabled = 0U;
1369 }
1370
1371 return 0U;
1372 }
1373
1374 static uint8_t sync_chm_update(uint8_t handle)
1375 {
1376 uint8_t hdr_data[ULL_ADV_HDR_DATA_LEN_SIZE +
1377 ULL_ADV_HDR_DATA_ACAD_PTR_SIZE];
1378 struct pdu_adv_sync_chm_upd_ind *chm_upd_ind;
1379 struct lll_adv_sync *lll_sync;
1380 struct pdu_adv *pdu_prev;
1381 struct ll_adv_set *adv;
1382 uint8_t acad_len_prev;
1383 struct pdu_adv *pdu;
1384 uint16_t instant;
1385 uint8_t chm_last;
1386 uint8_t ter_idx;
1387 uint8_t *acad;
1388 uint8_t err;
1389
1390 /* Check for valid advertising instance */
1391 adv = ull_adv_is_created_get(handle);
1392 if (!adv) {
1393 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
1394 }
1395
1396 /* Check for valid periodic advertising */
1397 lll_sync = adv->lll.sync;
1398 if (!lll_sync) {
1399 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
1400 }
1401
1402 /* Fail if already in progress */
1403 if (lll_sync->chm_last != lll_sync->chm_first) {
1404 return BT_HCI_ERR_CMD_DISALLOWED;
1405 }
1406
1407 /* Allocate next Sync PDU */
1408 err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
1409 &pdu_prev, &pdu, NULL, NULL, &ter_idx);
1410 if (err) {
1411 return err;
1412 }
1413
1414 /* Try to allocate ACAD for channel map update indication, previous
1415 * ACAD length with be returned back.
1416 */
1417 hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET] = sizeof(*chm_upd_ind) + 2U;
1418 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu,
1419 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
1420 &hdr_data);
1421 if (err) {
1422 return err;
1423 }
1424
1425 /* Check if there are other ACAD data previously */
1426 acad_len_prev = hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET];
1427 if (acad_len_prev) {
1428 /* Append to end of other ACAD already present */
1429 hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET] = acad_len_prev +
1430 sizeof(*chm_upd_ind) +
1431 2U;
1432
1433 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu,
1434 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
1435 &hdr_data);
1436 if (err) {
1437 return err;
1438 }
1439 }
1440
1441 /* Populate the AD data length and opcode */
1442 (void)memcpy(&acad, &hdr_data[ULL_ADV_HDR_DATA_ACAD_PTR_OFFSET],
1443 sizeof(acad));
1444 acad += acad_len_prev;
1445 acad[0] = sizeof(*chm_upd_ind) + 1U;
1446 acad[1] = BT_DATA_CHANNEL_MAP_UPDATE_IND;
1447
1448 /* Populate the Channel Map Indication structure */
1449 chm_upd_ind = (void *)&acad[2];
1450 (void)ull_chan_map_get(chm_upd_ind->chm);
1451 instant = lll_sync->event_counter + 6U;
1452 chm_upd_ind->instant = sys_cpu_to_le16(instant);
1453
1454 /* Update the LLL to reflect the Channel Map and Instant to use */
1455 chm_last = lll_sync->chm_last + 1;
1456 if (chm_last == DOUBLE_BUFFER_SIZE) {
1457 chm_last = 0U;
1458 }
1459 lll_sync->chm[chm_last].data_chan_count =
1460 ull_chan_map_get(lll_sync->chm[chm_last].data_chan_map);
1461 lll_sync->chm_instant = instant;
1462
1463 /* Commit the Channel Map Indication in the ACAD field of Periodic
1464 * Advertising
1465 */
1466 lll_adv_sync_data_enqueue(lll_sync, ter_idx);
1467
1468 /* Initiate the Channel Map Indication */
1469 lll_sync->chm_last = chm_last;
1470
1471 return 0;
1472 }
1473
1474 static uint16_t sync_time_get(struct ll_adv_sync_set *sync,
1475 struct pdu_adv *pdu)
1476 {
1477 struct lll_adv_sync *lll_sync;
1478 struct lll_adv *lll;
1479 uint32_t time_us;
1480
1481 lll_sync = &sync->lll;
1482 lll = lll_sync->adv;
1483 time_us = PDU_AC_US(pdu->len, lll->phy_s, lll->phy_flags) +
1484 EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1485
1486 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
1487 struct ll_adv_set *adv = HDR_LLL2ULL(lll);
1488 struct lll_df_adv_cfg *df_cfg = adv->df_cfg;
1489
1490 if (df_cfg && df_cfg->is_enabled) {
1491 time_us += CTE_LEN_US(df_cfg->cte_length);
1492 }
1493 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
1494
1495 return time_us;
1496 }
1497
1498 static void mfy_sync_offset_get(void *param)
1499 {
1500 struct ll_adv_set *adv = param;
1501 struct lll_adv_sync *lll_sync;
1502 struct ll_adv_sync_set *sync;
1503 struct pdu_adv_sync_info *si;
1504 uint32_t ticks_to_expire;
1505 uint32_t ticks_current;
1506 struct pdu_adv *pdu;
1507 uint8_t chm_first;
1508 uint8_t ticker_id;
1509 uint16_t lazy;
1510 uint8_t retry;
1511 uint8_t id;
1512
1513 lll_sync = adv->lll.sync;
1514 sync = HDR_LLL2ULL(lll_sync);
1515 ticker_id = TICKER_ID_ADV_SYNC_BASE + sync_handle_get(sync);
1516
1517 id = TICKER_NULL;
1518 ticks_to_expire = 0U;
1519 ticks_current = 0U;
1520 retry = 4U;
1521 do {
1522 uint32_t volatile ret_cb;
1523 uint32_t ticks_previous;
1524 uint32_t ret;
1525 bool success;
1526
1527 ticks_previous = ticks_current;
1528
1529 ret_cb = TICKER_STATUS_BUSY;
1530 ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR,
1531 TICKER_USER_ID_ULL_LOW,
1532 &id, &ticks_current,
1533 &ticks_to_expire, &lazy,
1534 NULL, NULL,
1535 ticker_op_cb, (void *)&ret_cb);
1536 if (ret == TICKER_STATUS_BUSY) {
1537 while (ret_cb == TICKER_STATUS_BUSY) {
1538 ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1539 TICKER_USER_ID_ULL_LOW);
1540 }
1541 }
1542
1543 success = (ret_cb == TICKER_STATUS_SUCCESS);
1544 LL_ASSERT(success);
1545
1546 LL_ASSERT((ticks_current == ticks_previous) || retry--);
1547
1548 LL_ASSERT(id != TICKER_NULL);
1549 } while (id != ticker_id);
1550
1551 /* NOTE: as remainder not used in scheduling primary PDU
1552 * packet timer starts transmission after 1 tick hence the +1.
1553 */
1554 lll_sync->ticks_offset = ticks_to_expire + 1;
1555
1556 pdu = lll_adv_aux_data_latest_peek(adv->lll.aux);
1557 si = sync_info_get(pdu);
1558 sync_info_offset_fill(si, ticks_to_expire, 0);
1559 si->evt_cntr = lll_sync->event_counter + lll_sync->latency_prepare +
1560 lazy;
1561
1562 /* Fill the correct channel map to use if at or past the instant */
1563 if (lll_sync->chm_first != lll_sync->chm_last) {
1564 uint16_t instant_latency;
1565
1566 instant_latency = (si->evt_cntr - lll_sync->chm_instant) &
1567 EVENT_INSTANT_MAX;
1568 if (instant_latency <= EVENT_INSTANT_LATENCY_MAX) {
1569 chm_first = lll_sync->chm_last;
1570 } else {
1571 chm_first = lll_sync->chm_first;
1572 }
1573 } else {
1574 chm_first = lll_sync->chm_first;
1575 }
1576 (void)memcpy(si->sca_chm, lll_sync->chm[chm_first].data_chan_map,
1577 sizeof(si->sca_chm));
1578 si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
1579 ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
1580 si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] |=
1581 ((lll_clock_sca_local_get() <<
1582 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS) &
1583 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK);
1584 }
1585
1586 static inline struct pdu_adv_sync_info *sync_info_get(struct pdu_adv *pdu)
1587 {
1588 struct pdu_adv_com_ext_adv *p;
1589 struct pdu_adv_ext_hdr *h;
1590 uint8_t *ptr;
1591
1592 p = (void *)&pdu->adv_ext_ind;
1593 h = (void *)p->ext_hdr_adv_data;
1594 ptr = h->data;
1595
1596 if (h->adv_addr) {
1597 ptr += BDADDR_SIZE;
1598 }
1599
1600 if (h->adi) {
1601 ptr += sizeof(struct pdu_adv_adi);
1602 }
1603
1604 if (h->aux_ptr) {
1605 ptr += sizeof(struct pdu_adv_aux_ptr);
1606 }
1607
1608 return (void *)ptr;
1609 }
1610
1611 static inline void sync_info_offset_fill(struct pdu_adv_sync_info *si,
1612 uint32_t ticks_offset,
1613 uint32_t start_us)
1614 {
1615 uint32_t offs;
1616
1617 offs = HAL_TICKER_TICKS_TO_US(ticks_offset) - start_us;
1618 offs = offs / OFFS_UNIT_30_US;
1619 if (!!(offs >> 13)) {
1620 si->offs = offs / (OFFS_UNIT_300_US / OFFS_UNIT_30_US);
1621 si->offs_units = OFFS_UNIT_VALUE_300_US;
1622 } else {
1623 si->offs = offs;
1624 si->offs_units = OFFS_UNIT_VALUE_30_US;
1625 }
1626 }
1627
1628 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1629 uint32_t remainder, uint16_t lazy, uint8_t force,
1630 void *param)
1631 {
1632 static memq_link_t link;
1633 static struct mayfly mfy = {0, 0, &link, NULL, lll_adv_sync_prepare};
1634 static struct lll_prepare_param p;
1635 struct ll_adv_sync_set *sync = param;
1636 struct lll_adv_sync *lll;
1637 uint32_t ret;
1638 uint8_t ref;
1639
1640 DEBUG_RADIO_PREPARE_A(1);
1641
1642 lll = &sync->lll;
1643
1644 /* Increment prepare reference count */
1645 ref = ull_ref_inc(&sync->ull);
1646 LL_ASSERT(ref);
1647
1648 /* Append timing parameters */
1649 p.ticks_at_expire = ticks_at_expire;
1650 p.remainder = remainder;
1651 p.lazy = lazy;
1652 p.force = force;
1653 p.param = lll;
1654 mfy.param = &p;
1655
1656 /* Kick LLL prepare */
1657 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1658 TICKER_USER_ID_LLL, 0, &mfy);
1659 LL_ASSERT(!ret);
1660
1661 DEBUG_RADIO_PREPARE_A(1);
1662 }
1663
1664 static void ticker_op_cb(uint32_t status, void *param)
1665 {
1666 *((uint32_t volatile *)param) = status;
1667 }
1668