1 /*
2 * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stddef.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 #include <zephyr/sys/byteorder.h>
12
13 #include "hal/cpu.h"
14 #include "hal/ecb.h"
15 #include "hal/ccm.h"
16 #include "hal/ticker.h"
17
18 #include "util/util.h"
19 #include "util/mem.h"
20 #include "util/memq.h"
21 #include "util/mfifo.h"
22 #include "util/mayfly.h"
23 #include "util/dbuf.h"
24
25 #include "ticker/ticker.h"
26
27 #include "pdu_df.h"
28 #include "lll/pdu_vendor.h"
29 #include "pdu.h"
30
31 #include "lll.h"
32 #include "lll_clock.h"
33 #include "lll/lll_df_types.h"
34 #include "lll_conn.h"
35 #include "lll_conn_iso.h"
36 #include "lll/lll_vendor.h"
37
38 #include "ll_sw/ull_tx_queue.h"
39
40 #include "isoal.h"
41 #include "ull_iso_types.h"
42 #include "ull_conn_types.h"
43 #include "ull_conn_iso_types.h"
44
45 #if defined(CONFIG_BT_CTLR_USER_EXT)
46 #include "ull_vendor.h"
47 #endif /* CONFIG_BT_CTLR_USER_EXT */
48
49 #include "ull_internal.h"
50 #include "ull_llcp_internal.h"
51 #include "ull_sched_internal.h"
52 #include "ull_chan_internal.h"
53 #include "ull_conn_internal.h"
54 #include "ull_peripheral_internal.h"
55 #include "ull_central_internal.h"
56
57 #include "ull_iso_internal.h"
58 #include "ull_conn_iso_internal.h"
59 #include "ull_peripheral_iso_internal.h"
60 #include "lll/lll_adv_types.h"
61 #include "lll_adv.h"
62 #include "ull_adv_types.h"
63 #include "ull_adv_internal.h"
64 #include "lll_sync.h"
65 #include "lll_sync_iso.h"
66 #include "ull_sync_types.h"
67 #include "lll_scan.h"
68 #include "ull_scan_types.h"
69 #include "ull_sync_internal.h"
70
71 #include "ll.h"
72 #include "ll_feat.h"
73 #include "ll_settings.h"
74
75 #include "ll_sw/ull_llcp.h"
76 #include "ll_sw/ull_llcp_features.h"
77
78 #include "hal/debug.h"
79
80 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
81 #include <zephyr/logging/log.h>
82 LOG_MODULE_REGISTER(bt_ctlr_ull_conn);
83
84 static int init_reset(void);
85 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
86 static void tx_demux_sched(struct ll_conn *conn);
87 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
88 static void tx_demux(void *param);
89 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx);
90
91 static void ticker_update_conn_op_cb(uint32_t status, void *param);
92 static void ticker_stop_conn_op_cb(uint32_t status, void *param);
93 static void ticker_start_conn_op_cb(uint32_t status, void *param);
94
95 static void conn_setup_adv_scan_disabled_cb(void *param);
96 static inline void disable(uint16_t handle);
97 static void conn_cleanup(struct ll_conn *conn, uint8_t reason);
98 static void conn_cleanup_finalize(struct ll_conn *conn);
99 static void tx_ull_flush(struct ll_conn *conn);
100 static void ticker_stop_op_cb(uint32_t status, void *param);
101 static void conn_disable(void *param);
102 static void disabled_cb(void *param);
103 static void tx_lll_flush(void *param);
104
105 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
106 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx);
107 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
108
109 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
110 /* Connection context pointer used as CPR mutex to serialize connection
111 * parameter requests procedures across simultaneous connections so that
112 * offsets exchanged to the peer do not get changed.
113 */
114 struct ll_conn *conn_upd_curr;
115 #endif /* defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) */
116
117 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
118 static uint8_t force_md_cnt_calc(struct lll_conn *lll_conn, uint32_t tx_rate);
119 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
120
121 #if !defined(BT_CTLR_USER_TX_BUFFER_OVERHEAD)
122 #define BT_CTLR_USER_TX_BUFFER_OVERHEAD 0
123 #endif /* BT_CTLR_USER_TX_BUFFER_OVERHEAD */
124
125 #define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
126 offsetof(struct pdu_data, lldata) + \
127 (LL_LENGTH_OCTETS_TX_MAX + \
128 BT_CTLR_USER_TX_BUFFER_OVERHEAD))
129
130 #define CONN_DATA_BUFFERS CONFIG_BT_BUF_ACL_TX_COUNT
131
132 static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx), CONN_DATA_BUFFERS);
133 static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx),
134 (CONN_DATA_BUFFERS +
135 LLCP_TX_CTRL_BUF_COUNT));
136
137 static struct {
138 void *free;
139 uint8_t pool[CONN_TX_BUF_SIZE * CONN_DATA_BUFFERS];
140 } mem_conn_tx;
141
142 static struct {
143 void *free;
144 uint8_t pool[sizeof(memq_link_t) *
145 (CONN_DATA_BUFFERS +
146 LLCP_TX_CTRL_BUF_COUNT)];
147 } mem_link_tx;
148
149 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
150 static uint16_t default_tx_octets;
151 static uint16_t default_tx_time;
152 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
153
154 #if defined(CONFIG_BT_CTLR_PHY)
155 static uint8_t default_phy_tx;
156 static uint8_t default_phy_rx;
157 #endif /* CONFIG_BT_CTLR_PHY */
158
159 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
160 static struct past_params default_past_params;
161 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
162
163 static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN];
164 static void *conn_free;
165
ll_conn_acquire(void)166 struct ll_conn *ll_conn_acquire(void)
167 {
168 return mem_acquire(&conn_free);
169 }
170
ll_conn_release(struct ll_conn * conn)171 void ll_conn_release(struct ll_conn *conn)
172 {
173 mem_release(conn, &conn_free);
174 }
175
ll_conn_handle_get(struct ll_conn * conn)176 uint16_t ll_conn_handle_get(struct ll_conn *conn)
177 {
178 return mem_index_get(conn, conn_pool, sizeof(struct ll_conn));
179 }
180
ll_conn_get(uint16_t handle)181 struct ll_conn *ll_conn_get(uint16_t handle)
182 {
183 return mem_get(conn_pool, sizeof(struct ll_conn), handle);
184 }
185
ll_connected_get(uint16_t handle)186 struct ll_conn *ll_connected_get(uint16_t handle)
187 {
188 struct ll_conn *conn;
189
190 if (handle >= CONFIG_BT_MAX_CONN) {
191 return NULL;
192 }
193
194 conn = ll_conn_get(handle);
195 if (conn->lll.handle != handle) {
196 return NULL;
197 }
198
199 return conn;
200 }
201
ll_conn_free_count_get(void)202 uint16_t ll_conn_free_count_get(void)
203 {
204 return mem_free_count_get(conn_free);
205 }
206
ll_tx_mem_acquire(void)207 void *ll_tx_mem_acquire(void)
208 {
209 return mem_acquire(&mem_conn_tx.free);
210 }
211
ll_tx_mem_release(void * tx)212 void ll_tx_mem_release(void *tx)
213 {
214 mem_release(tx, &mem_conn_tx.free);
215 }
216
ll_tx_mem_enqueue(uint16_t handle,void * tx)217 int ll_tx_mem_enqueue(uint16_t handle, void *tx)
218 {
219 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
220 #define BT_CTLR_THROUGHPUT_PERIOD 1000000000UL
221 static uint32_t tx_rate;
222 static uint32_t tx_cnt;
223 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
224 struct lll_tx *lll_tx;
225 struct ll_conn *conn;
226 uint8_t idx;
227
228 conn = ll_connected_get(handle);
229 if (!conn) {
230 return -EINVAL;
231 }
232
233 idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx);
234 if (!lll_tx) {
235 return -ENOBUFS;
236 }
237
238 lll_tx->handle = handle;
239 lll_tx->node = tx;
240
241 MFIFO_ENQUEUE(conn_tx, idx);
242
243 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
244 if (ull_ref_get(&conn->ull)) {
245 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
246 if (tx_cnt >= CONFIG_BT_BUF_ACL_TX_COUNT) {
247 uint8_t previous, force_md_cnt;
248
249 force_md_cnt = force_md_cnt_calc(&conn->lll, tx_rate);
250 previous = lll_conn_force_md_cnt_set(force_md_cnt);
251 if (previous != force_md_cnt) {
252 LOG_INF("force_md_cnt: old= %u, new= %u.", previous, force_md_cnt);
253 }
254 }
255 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
256
257 tx_demux_sched(conn);
258
259 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
260 } else {
261 lll_conn_force_md_cnt_set(0U);
262 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
263 }
264 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
265
266 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
267 ull_periph_latency_cancel(conn, handle);
268 }
269
270 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
271 static uint32_t last_cycle_stamp;
272 static uint32_t tx_len;
273 struct pdu_data *pdu;
274 uint32_t cycle_stamp;
275 uint64_t delta;
276
277 cycle_stamp = k_cycle_get_32();
278 delta = k_cyc_to_ns_floor64(cycle_stamp - last_cycle_stamp);
279 if (delta > BT_CTLR_THROUGHPUT_PERIOD) {
280 LOG_INF("incoming Tx: count= %u, len= %u, rate= %u bps.", tx_cnt, tx_len, tx_rate);
281
282 last_cycle_stamp = cycle_stamp;
283 tx_cnt = 0U;
284 tx_len = 0U;
285 }
286
287 pdu = (void *)((struct node_tx *)tx)->pdu;
288 tx_len += pdu->len;
289 if (delta == 0) { /* Let's avoid a division by 0 if we happen to have a really fast HCI IF*/
290 delta = 1;
291 }
292 tx_rate = ((uint64_t)tx_len << 3) * BT_CTLR_THROUGHPUT_PERIOD / delta;
293 tx_cnt++;
294 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
295
296 return 0;
297 }
298
ll_conn_update(uint16_t handle,uint8_t cmd,uint8_t status,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout,uint16_t * offset)299 uint8_t ll_conn_update(uint16_t handle, uint8_t cmd, uint8_t status, uint16_t interval_min,
300 uint16_t interval_max, uint16_t latency, uint16_t timeout, uint16_t *offset)
301 {
302 struct ll_conn *conn;
303
304 conn = ll_connected_get(handle);
305 if (!conn) {
306 return BT_HCI_ERR_UNKNOWN_CONN_ID;
307 }
308
309 if (cmd == 0U) {
310 uint8_t err;
311
312 err = ull_cp_conn_update(conn, interval_min, interval_max, latency, timeout,
313 offset);
314 if (err) {
315 return err;
316 }
317
318 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
319 conn->lll.role) {
320 ull_periph_latency_cancel(conn, handle);
321 }
322 } else if (cmd == 2U) {
323 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
324 if (status == 0U) {
325 ull_cp_conn_param_req_reply(conn);
326 } else {
327 ull_cp_conn_param_req_neg_reply(conn, status);
328 }
329 return BT_HCI_ERR_SUCCESS;
330 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
331 /* CPR feature not supported */
332 return BT_HCI_ERR_CMD_DISALLOWED;
333 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
334 } else {
335 return BT_HCI_ERR_UNKNOWN_CMD;
336 }
337
338 return 0;
339 }
340
ll_chm_get(uint16_t handle,uint8_t * chm)341 uint8_t ll_chm_get(uint16_t handle, uint8_t *chm)
342 {
343 struct ll_conn *conn;
344
345 conn = ll_connected_get(handle);
346 if (!conn) {
347 return BT_HCI_ERR_UNKNOWN_CONN_ID;
348 }
349
350 /*
351 * Core Spec 5.2 Vol4: 7.8.20:
352 * The HCI_LE_Read_Channel_Map command returns the current Channel_Map
353 * for the specified Connection_Handle. The returned value indicates the state of
354 * the Channel_Map specified by the last transmitted or received Channel_Map
355 * (in a CONNECT_IND or LL_CHANNEL_MAP_IND message) for the specified
356 * Connection_Handle, regardless of whether the Central has received an
357 * acknowledgment
358 */
359 const uint8_t *pending_chm;
360
361 pending_chm = ull_cp_chan_map_update_pending(conn);
362 if (pending_chm) {
363 memcpy(chm, pending_chm, sizeof(conn->lll.data_chan_map));
364 } else {
365 memcpy(chm, conn->lll.data_chan_map, sizeof(conn->lll.data_chan_map));
366 }
367
368 return 0;
369 }
370
371 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
ll_req_peer_sca(uint16_t handle)372 uint8_t ll_req_peer_sca(uint16_t handle)
373 {
374 struct ll_conn *conn;
375
376 conn = ll_connected_get(handle);
377 if (!conn) {
378 return BT_HCI_ERR_UNKNOWN_CONN_ID;
379 }
380
381 return ull_cp_req_peer_sca(conn);
382 }
383 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
384
is_valid_disconnect_reason(uint8_t reason)385 static bool is_valid_disconnect_reason(uint8_t reason)
386 {
387 switch (reason) {
388 case BT_HCI_ERR_AUTH_FAIL:
389 case BT_HCI_ERR_REMOTE_USER_TERM_CONN:
390 case BT_HCI_ERR_REMOTE_LOW_RESOURCES:
391 case BT_HCI_ERR_REMOTE_POWER_OFF:
392 case BT_HCI_ERR_UNSUPP_REMOTE_FEATURE:
393 case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
394 case BT_HCI_ERR_UNACCEPT_CONN_PARAM:
395 return true;
396 default:
397 return false;
398 }
399 }
400
ll_terminate_ind_send(uint16_t handle,uint8_t reason)401 uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
402 {
403 struct ll_conn *conn;
404 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
405 struct ll_conn_iso_stream *cis;
406 #endif
407
408 if (IS_ACL_HANDLE(handle)) {
409 conn = ll_connected_get(handle);
410
411 /* Is conn still connected? */
412 if (!conn) {
413 return BT_HCI_ERR_CMD_DISALLOWED;
414 }
415
416 if (!is_valid_disconnect_reason(reason)) {
417 return BT_HCI_ERR_INVALID_PARAM;
418 }
419
420 uint8_t err;
421
422 err = ull_cp_terminate(conn, reason);
423 if (err) {
424 return err;
425 }
426
427 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
428 ull_periph_latency_cancel(conn, handle);
429 }
430 return 0;
431 }
432 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
433 if (IS_CIS_HANDLE(handle)) {
434 cis = ll_iso_stream_connected_get(handle);
435 if (!cis) {
436 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
437 /* CIS is not connected - get the unconnected instance */
438 cis = ll_conn_iso_stream_get(handle);
439
440 /* Sanity-check instance to make sure it's created but not connected */
441 if (cis->group && cis->lll.handle == handle && !cis->established) {
442 if (cis->group->state == CIG_STATE_CONFIGURABLE) {
443 /* Disallow if CIG is still in configurable state */
444 return BT_HCI_ERR_CMD_DISALLOWED;
445
446 } else if (cis->group->state == CIG_STATE_INITIATING) {
447 conn = ll_connected_get(cis->lll.acl_handle);
448
449 /* CIS is not yet established - try to cancel procedure */
450 if (ull_cp_cc_cancel(conn)) {
451 /* Successfully canceled - complete disconnect */
452 struct node_rx_pdu *node_terminate;
453
454 node_terminate = ull_pdu_rx_alloc();
455 LL_ASSERT(node_terminate);
456
457 node_terminate->hdr.handle = handle;
458 node_terminate->hdr.type = NODE_RX_TYPE_TERMINATE;
459 *((uint8_t *)node_terminate->pdu) =
460 BT_HCI_ERR_LOCALHOST_TERM_CONN;
461
462 ll_rx_put_sched(node_terminate->hdr.link,
463 node_terminate);
464
465 /* We're no longer initiating a connection */
466 cis->group->state = CIG_STATE_CONFIGURABLE;
467
468 /* This is now a successful disconnection */
469 return BT_HCI_ERR_SUCCESS;
470 }
471
472 /* Procedure could not be canceled in the current
473 * state - let it run its course and enqueue a
474 * terminate procedure.
475 */
476 return ull_cp_cis_terminate(conn, cis, reason);
477 }
478 }
479 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
480 /* Disallow if CIS is not connected */
481 return BT_HCI_ERR_CMD_DISALLOWED;
482 }
483
484 conn = ll_connected_get(cis->lll.acl_handle);
485 /* Disallow if ACL has disconnected */
486 if (!conn) {
487 return BT_HCI_ERR_CMD_DISALLOWED;
488 }
489
490 return ull_cp_cis_terminate(conn, cis, reason);
491 }
492 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
493
494 return BT_HCI_ERR_UNKNOWN_CONN_ID;
495 }
496
497 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
ll_feature_req_send(uint16_t handle)498 uint8_t ll_feature_req_send(uint16_t handle)
499 {
500 struct ll_conn *conn;
501
502 conn = ll_connected_get(handle);
503 if (!conn) {
504 return BT_HCI_ERR_UNKNOWN_CONN_ID;
505 }
506
507 uint8_t err;
508
509 err = ull_cp_feature_exchange(conn, 1U);
510 if (err) {
511 return err;
512 }
513
514 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
515 IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) &&
516 conn->lll.role) {
517 ull_periph_latency_cancel(conn, handle);
518 }
519
520 return 0;
521 }
522 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
523
ll_version_ind_send(uint16_t handle)524 uint8_t ll_version_ind_send(uint16_t handle)
525 {
526 struct ll_conn *conn;
527
528 conn = ll_connected_get(handle);
529 if (!conn) {
530 return BT_HCI_ERR_UNKNOWN_CONN_ID;
531 }
532
533 uint8_t err;
534
535 err = ull_cp_version_exchange(conn);
536 if (err) {
537 return err;
538 }
539
540 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
541 ull_periph_latency_cancel(conn, handle);
542 }
543
544 return 0;
545 }
546
547 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ll_len_validate(uint16_t tx_octets,uint16_t tx_time)548 static bool ll_len_validate(uint16_t tx_octets, uint16_t tx_time)
549 {
550 /* validate if within HCI allowed range */
551 if (!IN_RANGE(tx_octets, PDU_DC_PAYLOAD_SIZE_MIN,
552 PDU_DC_PAYLOAD_SIZE_MAX)) {
553 return false;
554 }
555
556 /* validate if within HCI allowed range */
557 if (!IN_RANGE(tx_time, PDU_DC_PAYLOAD_TIME_MIN,
558 PDU_DC_PAYLOAD_TIME_MAX_CODED)) {
559 return false;
560 }
561
562 return true;
563 }
564
ll_length_req_send(uint16_t handle,uint16_t tx_octets,uint16_t tx_time)565 uint32_t ll_length_req_send(uint16_t handle, uint16_t tx_octets,
566 uint16_t tx_time)
567 {
568 struct ll_conn *conn;
569
570 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
571 !ll_len_validate(tx_octets, tx_time)) {
572 return BT_HCI_ERR_INVALID_PARAM;
573 }
574
575 conn = ll_connected_get(handle);
576 if (!conn) {
577 return BT_HCI_ERR_UNKNOWN_CONN_ID;
578 }
579
580 if (!feature_dle(conn)) {
581 return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
582 }
583
584 uint8_t err;
585
586 err = ull_cp_data_length_update(conn, tx_octets, tx_time);
587 if (err) {
588 return err;
589 }
590
591 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
592 ull_periph_latency_cancel(conn, handle);
593 }
594
595 return 0;
596 }
597
ll_length_default_get(uint16_t * max_tx_octets,uint16_t * max_tx_time)598 void ll_length_default_get(uint16_t *max_tx_octets, uint16_t *max_tx_time)
599 {
600 *max_tx_octets = default_tx_octets;
601 *max_tx_time = default_tx_time;
602 }
603
ll_length_default_set(uint16_t max_tx_octets,uint16_t max_tx_time)604 uint32_t ll_length_default_set(uint16_t max_tx_octets, uint16_t max_tx_time)
605 {
606 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
607 !ll_len_validate(max_tx_octets, max_tx_time)) {
608 return BT_HCI_ERR_INVALID_PARAM;
609 }
610
611 default_tx_octets = max_tx_octets;
612 default_tx_time = max_tx_time;
613
614 return 0;
615 }
616
ll_length_max_get(uint16_t * max_tx_octets,uint16_t * max_tx_time,uint16_t * max_rx_octets,uint16_t * max_rx_time)617 void ll_length_max_get(uint16_t *max_tx_octets, uint16_t *max_tx_time,
618 uint16_t *max_rx_octets, uint16_t *max_rx_time)
619 {
620 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_PHY_CODED)
621 #define PHY (PHY_CODED)
622 #else /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
623 #define PHY (PHY_1M)
624 #endif /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
625 *max_tx_octets = LL_LENGTH_OCTETS_RX_MAX;
626 *max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
627 *max_tx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
628 *max_rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
629 #undef PHY
630 }
631 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
632
633 #if defined(CONFIG_BT_CTLR_PHY)
ll_phy_get(uint16_t handle,uint8_t * tx,uint8_t * rx)634 uint8_t ll_phy_get(uint16_t handle, uint8_t *tx, uint8_t *rx)
635 {
636 struct ll_conn *conn;
637
638 conn = ll_connected_get(handle);
639 if (!conn) {
640 return BT_HCI_ERR_UNKNOWN_CONN_ID;
641 }
642
643 /* TODO: context safe read */
644 *tx = conn->lll.phy_tx;
645 *rx = conn->lll.phy_rx;
646
647 return 0;
648 }
649
ll_phy_default_set(uint8_t tx,uint8_t rx)650 uint8_t ll_phy_default_set(uint8_t tx, uint8_t rx)
651 {
652 /* TODO: validate against supported phy */
653
654 default_phy_tx = tx;
655 default_phy_rx = rx;
656
657 return 0;
658 }
659
ll_phy_req_send(uint16_t handle,uint8_t tx,uint8_t flags,uint8_t rx)660 uint8_t ll_phy_req_send(uint16_t handle, uint8_t tx, uint8_t flags, uint8_t rx)
661 {
662 struct ll_conn *conn;
663
664 conn = ll_connected_get(handle);
665 if (!conn) {
666 return BT_HCI_ERR_UNKNOWN_CONN_ID;
667 }
668
669 if (!feature_phy_2m(conn) && !feature_phy_coded(conn)) {
670 return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
671 }
672
673 uint8_t err;
674
675 err = ull_cp_phy_update(conn, tx, flags, rx, 1U);
676 if (err) {
677 return err;
678 }
679
680 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
681 ull_periph_latency_cancel(conn, handle);
682 }
683
684 return 0;
685 }
686 #endif /* CONFIG_BT_CTLR_PHY */
687
688 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
ll_rssi_get(uint16_t handle,uint8_t * rssi)689 uint8_t ll_rssi_get(uint16_t handle, uint8_t *rssi)
690 {
691 struct ll_conn *conn;
692
693 conn = ll_connected_get(handle);
694 if (!conn) {
695 return BT_HCI_ERR_UNKNOWN_CONN_ID;
696 }
697
698 *rssi = conn->lll.rssi_latest;
699
700 return 0;
701 }
702 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
703
704 #if defined(CONFIG_BT_CTLR_LE_PING)
ll_apto_get(uint16_t handle,uint16_t * apto)705 uint8_t ll_apto_get(uint16_t handle, uint16_t *apto)
706 {
707 struct ll_conn *conn;
708
709 conn = ll_connected_get(handle);
710 if (!conn) {
711 return BT_HCI_ERR_UNKNOWN_CONN_ID;
712 }
713
714 if (conn->lll.interval >= BT_HCI_LE_INTERVAL_MIN) {
715 *apto = conn->apto_reload * conn->lll.interval *
716 CONN_INT_UNIT_US / (10U * USEC_PER_MSEC);
717 } else {
718 *apto = conn->apto_reload * (conn->lll.interval + 1U) *
719 CONN_LOW_LAT_INT_UNIT_US / (10U * USEC_PER_MSEC);
720 }
721
722 return 0;
723 }
724
ll_apto_set(uint16_t handle,uint16_t apto)725 uint8_t ll_apto_set(uint16_t handle, uint16_t apto)
726 {
727 struct ll_conn *conn;
728
729 conn = ll_connected_get(handle);
730 if (!conn) {
731 return BT_HCI_ERR_UNKNOWN_CONN_ID;
732 }
733
734 if (conn->lll.interval >= BT_HCI_LE_INTERVAL_MIN) {
735 conn->apto_reload =
736 RADIO_CONN_EVENTS(apto * 10U * USEC_PER_MSEC,
737 conn->lll.interval *
738 CONN_INT_UNIT_US);
739 } else {
740 conn->apto_reload =
741 RADIO_CONN_EVENTS(apto * 10U * USEC_PER_MSEC,
742 (conn->lll.interval + 1U) *
743 CONN_LOW_LAT_INT_UNIT_US);
744 }
745
746 return 0;
747 }
748 #endif /* CONFIG_BT_CTLR_LE_PING */
749
ull_conn_init(void)750 int ull_conn_init(void)
751 {
752 int err;
753
754 err = init_reset();
755 if (err) {
756 return err;
757 }
758
759 return 0;
760 }
761
ull_conn_reset(void)762 int ull_conn_reset(void)
763 {
764 uint16_t handle;
765 int err;
766
767 #if defined(CONFIG_BT_CENTRAL)
768 /* Reset initiator */
769 (void)ull_central_reset();
770 #endif /* CONFIG_BT_CENTRAL */
771
772 for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
773 disable(handle);
774 }
775
776 /* Re-initialize the Tx mfifo */
777 MFIFO_INIT(conn_tx);
778
779 /* Re-initialize the Tx Ack mfifo */
780 MFIFO_INIT(conn_ack);
781
782 err = init_reset();
783 if (err) {
784 return err;
785 }
786
787 return 0;
788 }
789
ull_conn_lll_get(uint16_t handle)790 struct lll_conn *ull_conn_lll_get(uint16_t handle)
791 {
792 struct ll_conn *conn;
793
794 conn = ll_conn_get(handle);
795
796 return &conn->lll;
797 }
798
799 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_conn_default_tx_octets_get(void)800 uint16_t ull_conn_default_tx_octets_get(void)
801 {
802 return default_tx_octets;
803 }
804
805 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_tx_time_get(void)806 uint16_t ull_conn_default_tx_time_get(void)
807 {
808 return default_tx_time;
809 }
810 #endif /* CONFIG_BT_CTLR_PHY */
811 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
812
813 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_phy_tx_get(void)814 uint8_t ull_conn_default_phy_tx_get(void)
815 {
816 return default_phy_tx;
817 }
818
ull_conn_default_phy_rx_get(void)819 uint8_t ull_conn_default_phy_rx_get(void)
820 {
821 return default_phy_rx;
822 }
823 #endif /* CONFIG_BT_CTLR_PHY */
824
825 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
ull_conn_default_past_param_set(uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)826 void ull_conn_default_past_param_set(uint8_t mode, uint16_t skip, uint16_t timeout,
827 uint8_t cte_type)
828 {
829 default_past_params.mode = mode;
830 default_past_params.skip = skip;
831 default_past_params.timeout = timeout;
832 default_past_params.cte_type = cte_type;
833 }
834
ull_conn_default_past_param_get(void)835 struct past_params ull_conn_default_past_param_get(void)
836 {
837 return default_past_params;
838 }
839 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
840
841 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
ull_conn_peer_connected(uint8_t const own_id_addr_type,uint8_t const * const own_id_addr,uint8_t const peer_id_addr_type,uint8_t const * const peer_id_addr)842 bool ull_conn_peer_connected(uint8_t const own_id_addr_type,
843 uint8_t const *const own_id_addr,
844 uint8_t const peer_id_addr_type,
845 uint8_t const *const peer_id_addr)
846 {
847 uint16_t handle;
848
849 for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
850 struct ll_conn *conn = ll_connected_get(handle);
851
852 if (conn &&
853 conn->peer_id_addr_type == peer_id_addr_type &&
854 !memcmp(conn->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
855 conn->own_id_addr_type == own_id_addr_type &&
856 !memcmp(conn->own_id_addr, own_id_addr, BDADDR_SIZE)) {
857 return true;
858 }
859 }
860
861 return false;
862 }
863 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
864
ull_conn_setup(memq_link_t * rx_link,struct node_rx_pdu * rx)865 void ull_conn_setup(memq_link_t *rx_link, struct node_rx_pdu *rx)
866 {
867 struct node_rx_ftr *ftr;
868 struct ull_hdr *hdr;
869
870 /* Store the link in the node rx so that when done event is
871 * processed it can be used to enqueue node rx towards LL context
872 */
873 rx->hdr.link = rx_link;
874
875 /* NOTE: LLL conn context SHALL be after lll_hdr in
876 * struct lll_adv and struct lll_scan.
877 */
878 ftr = &(rx->rx_ftr);
879
880 /* Check for reference count and decide to setup connection
881 * here or when done event arrives.
882 */
883 hdr = HDR_LLL2ULL(ftr->param);
884 if (ull_ref_get(hdr)) {
885 /* Setup connection in ULL disabled callback,
886 * pass the node rx as disabled callback parameter.
887 */
888 LL_ASSERT(!hdr->disabled_cb);
889 hdr->disabled_param = rx;
890 hdr->disabled_cb = conn_setup_adv_scan_disabled_cb;
891 } else {
892 conn_setup_adv_scan_disabled_cb(rx);
893 }
894 }
895
ull_conn_rx(memq_link_t * link,struct node_rx_pdu ** rx)896 void ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
897 {
898 struct pdu_data *pdu_rx;
899 struct ll_conn *conn;
900
901 conn = ll_connected_get((*rx)->hdr.handle);
902 if (!conn) {
903 /* Mark for buffer for release */
904 (*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
905
906 return;
907 }
908
909 ull_cp_tx_ntf(conn);
910
911 pdu_rx = (void *)(*rx)->pdu;
912
913 switch (pdu_rx->ll_id) {
914 case PDU_DATA_LLID_CTRL:
915 {
916 /* Mark buffer for release */
917 (*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
918
919 ull_cp_rx(conn, link, *rx);
920
921 return;
922 }
923
924 case PDU_DATA_LLID_DATA_CONTINUE:
925 case PDU_DATA_LLID_DATA_START:
926 #if defined(CONFIG_BT_CTLR_LE_ENC)
927 if (conn->pause_rx_data) {
928 conn->llcp_terminate.reason_final =
929 BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
930
931 /* Mark for buffer for release */
932 (*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
933 }
934 #endif /* CONFIG_BT_CTLR_LE_ENC */
935 break;
936
937 case PDU_DATA_LLID_RESV:
938 default:
939 #if defined(CONFIG_BT_CTLR_LE_ENC)
940 if (conn->pause_rx_data) {
941 conn->llcp_terminate.reason_final =
942 BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
943 }
944 #endif /* CONFIG_BT_CTLR_LE_ENC */
945
946 /* Invalid LL id, drop it. */
947
948 /* Mark for buffer for release */
949 (*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
950
951 break;
952 }
953 }
954
ull_conn_llcp(struct ll_conn * conn,uint32_t ticks_at_expire,uint32_t remainder,uint16_t lazy)955 int ull_conn_llcp(struct ll_conn *conn, uint32_t ticks_at_expire,
956 uint32_t remainder, uint16_t lazy)
957 {
958 LL_ASSERT(conn->lll.handle != LLL_HANDLE_INVALID);
959
960 conn->llcp.prep.ticks_at_expire = ticks_at_expire;
961 conn->llcp.prep.remainder = remainder;
962 conn->llcp.prep.lazy = lazy;
963
964 ull_cp_run(conn);
965
966 if (conn->cancel_prepare) {
967 /* Reset signal */
968 conn->cancel_prepare = 0U;
969
970 /* Cancel prepare */
971 return -ECANCELED;
972 }
973
974 /* Continue prepare */
975 return 0;
976 }
977
ull_conn_done(struct node_rx_event_done * done)978 void ull_conn_done(struct node_rx_event_done *done)
979 {
980 uint32_t ticks_drift_minus;
981 uint32_t ticks_drift_plus;
982 uint32_t ticks_slot_minus;
983 uint32_t ticks_slot_plus;
984 uint16_t latency_event;
985 uint16_t elapsed_event;
986 struct lll_conn *lll;
987 struct ll_conn *conn;
988 uint8_t reason_final;
989 uint8_t force_lll;
990 uint16_t lazy;
991 uint8_t force;
992
993 /* Get reference to ULL context */
994 conn = CONTAINER_OF(done->param, struct ll_conn, ull);
995 lll = &conn->lll;
996
997 /* Skip if connection terminated by local host */
998 if (unlikely(lll->handle == LLL_HANDLE_INVALID)) {
999 return;
1000 }
1001
1002 ull_cp_tx_ntf(conn);
1003
1004 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1005 ull_lp_past_conn_evt_done(conn, done);
1006 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1007
1008 #if defined(CONFIG_BT_CTLR_LE_ENC)
1009 /* Check authenticated payload expiry or MIC failure */
1010 switch (done->extra.mic_state) {
1011 case LLL_CONN_MIC_NONE:
1012 #if defined(CONFIG_BT_CTLR_LE_PING)
1013 if (lll->enc_rx && lll->enc_tx) {
1014 uint16_t appto_reload_new;
1015
1016 /* check for change in apto */
1017 appto_reload_new = (conn->apto_reload >
1018 (lll->latency + 6)) ?
1019 (conn->apto_reload -
1020 (lll->latency + 6)) :
1021 conn->apto_reload;
1022 if (conn->appto_reload != appto_reload_new) {
1023 conn->appto_reload = appto_reload_new;
1024 conn->apto_expire = 0U;
1025 }
1026
1027 /* start authenticated payload (pre) timeout */
1028 if (conn->apto_expire == 0U) {
1029 conn->appto_expire = conn->appto_reload;
1030 conn->apto_expire = conn->apto_reload;
1031 }
1032 }
1033 #endif /* CONFIG_BT_CTLR_LE_PING */
1034 break;
1035
1036 case LLL_CONN_MIC_PASS:
1037 #if defined(CONFIG_BT_CTLR_LE_PING)
1038 conn->appto_expire = conn->apto_expire = 0U;
1039 #endif /* CONFIG_BT_CTLR_LE_PING */
1040 break;
1041
1042 case LLL_CONN_MIC_FAIL:
1043 conn->llcp_terminate.reason_final =
1044 BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
1045 break;
1046 }
1047 #endif /* CONFIG_BT_CTLR_LE_ENC */
1048
1049 reason_final = conn->llcp_terminate.reason_final;
1050 if (reason_final) {
1051 conn_cleanup(conn, reason_final);
1052
1053 return;
1054 }
1055
1056 /* Events elapsed used in timeout checks below */
1057 #if defined(CONFIG_BT_CTLR_CONN_META)
1058 /* If event has shallow expiry do not add latency, but rely on
1059 * accumulated lazy count.
1060 */
1061 latency_event = conn->common.is_must_expire ? 0 : lll->latency_event;
1062 #else
1063 latency_event = lll->latency_event;
1064 #endif
1065
1066 /* Peripheral drift compensation calc and new latency or
1067 * central terminate acked
1068 */
1069 ticks_drift_plus = 0U;
1070 ticks_drift_minus = 0U;
1071 ticks_slot_plus = 0U;
1072 ticks_slot_minus = 0U;
1073
1074 if (done->extra.trx_cnt) {
1075 if (0) {
1076 #if defined(CONFIG_BT_PERIPHERAL)
1077 } else if (lll->role) {
1078 if (!conn->periph.drift_skip) {
1079 ull_drift_ticks_get(done, &ticks_drift_plus,
1080 &ticks_drift_minus);
1081
1082 if (ticks_drift_plus || ticks_drift_minus) {
1083 conn->periph.drift_skip =
1084 ull_ref_get(&conn->ull);
1085 }
1086 } else {
1087 conn->periph.drift_skip--;
1088 }
1089
1090 if (!ull_tx_q_peek(&conn->tx_q)) {
1091 ull_conn_tx_demux(UINT8_MAX);
1092 }
1093
1094 if (ull_tx_q_peek(&conn->tx_q) ||
1095 memq_peek(lll->memq_tx.head,
1096 lll->memq_tx.tail, NULL)) {
1097 lll->latency_event = 0U;
1098 } else if (lll->periph.latency_enabled) {
1099 lll->latency_event = lll->latency;
1100 }
1101 #endif /* CONFIG_BT_PERIPHERAL */
1102 }
1103
1104 /* Reset connection failed to establish countdown */
1105 conn->connect_expire = 0U;
1106 }
1107
1108 elapsed_event = latency_event + lll->lazy_prepare + 1U;
1109
1110 /* Reset supervision countdown */
1111 if (done->extra.crc_valid && !done->extra.is_aborted) {
1112 conn->supervision_expire = 0U;
1113 }
1114
1115 /* check connection failed to establish */
1116 else if (conn->connect_expire) {
1117 if (conn->connect_expire > elapsed_event) {
1118 conn->connect_expire -= elapsed_event;
1119 } else {
1120 conn_cleanup(conn, BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
1121
1122 return;
1123 }
1124 }
1125
1126 /* if anchor point not sync-ed, start supervision timeout, and break
1127 * latency if any.
1128 */
1129 else {
1130 /* Start supervision timeout, if not started already */
1131 if (!conn->supervision_expire) {
1132 uint32_t conn_interval_us;
1133
1134 if (conn->lll.interval >= BT_HCI_LE_INTERVAL_MIN) {
1135 conn_interval_us = conn->lll.interval *
1136 CONN_INT_UNIT_US;
1137 } else {
1138 conn_interval_us = (conn->lll.interval + 1U) *
1139 CONN_LOW_LAT_INT_UNIT_US;
1140 }
1141
1142 conn->supervision_expire = RADIO_CONN_EVENTS(
1143 (conn->supervision_timeout * 10U * USEC_PER_MSEC),
1144 conn_interval_us);
1145 }
1146 }
1147
1148 /* check supervision timeout */
1149 force = 0U;
1150 force_lll = 0U;
1151 if (conn->supervision_expire) {
1152 if (conn->supervision_expire > elapsed_event) {
1153 conn->supervision_expire -= elapsed_event;
1154
1155 /* break latency */
1156 lll->latency_event = 0U;
1157
1158 /* Force both central and peripheral when close to
1159 * supervision timeout.
1160 */
1161 if (conn->supervision_expire <= 6U) {
1162 force_lll = 1U;
1163
1164 force = 1U;
1165 }
1166 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
1167 /* use randomness to force peripheral role when anchor
1168 * points are being missed.
1169 */
1170 else if (lll->role) {
1171 if (latency_event) {
1172 force = 1U;
1173 } else {
1174 force = conn->periph.force & 0x01;
1175
1176 /* rotate force bits */
1177 conn->periph.force >>= 1U;
1178 if (force) {
1179 conn->periph.force |= BIT(31);
1180 }
1181 }
1182 }
1183 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
1184 } else {
1185 conn_cleanup(conn, BT_HCI_ERR_CONN_TIMEOUT);
1186
1187 return;
1188 }
1189 }
1190
1191 lll->forced = force_lll;
1192
1193 /* check procedure timeout */
1194 uint8_t error_code;
1195
1196 if (-ETIMEDOUT == ull_cp_prt_elapse(conn, elapsed_event, &error_code)) {
1197 conn_cleanup(conn, error_code);
1198
1199 return;
1200 }
1201
1202 #if defined(CONFIG_BT_CTLR_LE_PING)
1203 /* check apto */
1204 if (conn->apto_expire != 0U) {
1205 if (conn->apto_expire > elapsed_event) {
1206 conn->apto_expire -= elapsed_event;
1207 } else {
1208 struct node_rx_hdr *rx;
1209
1210 rx = ll_pdu_rx_alloc();
1211 if (rx) {
1212 conn->apto_expire = 0U;
1213
1214 rx->handle = lll->handle;
1215 rx->type = NODE_RX_TYPE_APTO;
1216
1217 /* enqueue apto event into rx queue */
1218 ll_rx_put_sched(rx->link, rx);
1219 } else {
1220 conn->apto_expire = 1U;
1221 }
1222 }
1223 }
1224
1225 /* check appto */
1226 if (conn->appto_expire != 0U) {
1227 if (conn->appto_expire > elapsed_event) {
1228 conn->appto_expire -= elapsed_event;
1229 } else {
1230 conn->appto_expire = 0U;
1231
1232 /* Initiate LE_PING procedure */
1233 ull_cp_le_ping(conn);
1234 }
1235 }
1236 #endif /* CONFIG_BT_CTLR_LE_PING */
1237
1238 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1239 /* Check if the CTE_REQ procedure is periodic and counter has been started.
1240 * req_expire is set when: new CTE_REQ is started, after completion of last periodic run.
1241 */
1242 if (conn->llcp.cte_req.req_interval != 0U && conn->llcp.cte_req.req_expire != 0U) {
1243 if (conn->llcp.cte_req.req_expire > elapsed_event) {
1244 conn->llcp.cte_req.req_expire -= elapsed_event;
1245 } else {
1246 uint8_t err;
1247
1248 /* Set req_expire to zero to mark that new periodic CTE_REQ was started.
1249 * The counter is re-started after completion of this run.
1250 */
1251 conn->llcp.cte_req.req_expire = 0U;
1252
1253 err = ull_cp_cte_req(conn, conn->llcp.cte_req.min_cte_len,
1254 conn->llcp.cte_req.cte_type);
1255
1256 if (err == BT_HCI_ERR_CMD_DISALLOWED) {
1257 /* Conditions has changed e.g. PHY was changed to CODED.
1258 * New CTE REQ is not possible. Disable the periodic requests.
1259 */
1260 ull_cp_cte_req_set_disable(conn);
1261 }
1262 }
1263 }
1264 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1265
1266 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1267 /* generate RSSI event */
1268 if (lll->rssi_sample_count == 0U) {
1269 struct node_rx_pdu *rx;
1270 struct pdu_data *pdu_data_rx;
1271
1272 rx = ll_pdu_rx_alloc();
1273 if (rx) {
1274 lll->rssi_reported = lll->rssi_latest;
1275 lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT;
1276
1277 /* Prepare the rx packet structure */
1278 rx->hdr.handle = lll->handle;
1279 rx->hdr.type = NODE_RX_TYPE_RSSI;
1280
1281 /* prepare connection RSSI structure */
1282 pdu_data_rx = (void *)rx->pdu;
1283 pdu_data_rx->rssi = lll->rssi_reported;
1284
1285 /* enqueue connection RSSI structure into queue */
1286 ll_rx_put_sched(rx->hdr.link, rx);
1287 }
1288 }
1289 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1290
1291 /* check if latency needs update */
1292 lazy = 0U;
1293 if ((force) || (latency_event != lll->latency_event)) {
1294 lazy = lll->latency_event + 1U;
1295 }
1296
1297 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
1298 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) || defined(CONFIG_BT_CTLR_PHY)
1299 if (lll->evt_len_upd) {
1300 uint32_t ready_delay, rx_time, tx_time, ticks_slot, slot_us;
1301
1302 lll->evt_len_upd = 0;
1303
1304 #if defined(CONFIG_BT_CTLR_PHY)
1305 ready_delay = (lll->role) ?
1306 lll_radio_rx_ready_delay_get(lll->phy_rx, PHY_FLAGS_S8) :
1307 lll_radio_tx_ready_delay_get(lll->phy_tx, lll->phy_flags);
1308
1309 #if defined(CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX)
1310 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1311 tx_time = lll->dle.eff.max_tx_time;
1312 rx_time = lll->dle.eff.max_rx_time;
1313
1314 #else /* CONFIG_BT_CTLR_DATA_LENGTH */
1315 tx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1316 PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
1317 rx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1318 PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
1319 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1320
1321 #else /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
1322 tx_time = PDU_MAX_US(0U, 0U, lll->phy_tx);
1323 rx_time = PDU_MAX_US(0U, 0U, lll->phy_rx);
1324 #endif /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
1325
1326 #else /* CONFIG_BT_CTLR_PHY */
1327 ready_delay = (lll->role) ?
1328 lll_radio_rx_ready_delay_get(0, 0) :
1329 lll_radio_tx_ready_delay_get(0, 0);
1330 #if defined(CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX)
1331 tx_time = PDU_DC_MAX_US(lll->dle.eff.max_tx_octets, 0);
1332 rx_time = PDU_DC_MAX_US(lll->dle.eff.max_rx_octets, 0);
1333
1334 #else /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
1335 tx_time = PDU_MAX_US(0U, 0U, PHY_1M);
1336 rx_time = PDU_MAX_US(0U, 0U, PHY_1M);
1337 #endif /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
1338 #endif /* CONFIG_BT_CTLR_PHY */
1339
1340 /* Calculate event time reservation */
1341 slot_us = tx_time + rx_time;
1342 slot_us += lll->tifs_rx_us + (EVENT_CLOCK_JITTER_US << 1);
1343 slot_us += ready_delay;
1344
1345 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX) ||
1346 !conn->lll.role) {
1347 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1348 }
1349
1350 ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1351 if (ticks_slot > conn->ull.ticks_slot) {
1352 ticks_slot_plus = ticks_slot - conn->ull.ticks_slot;
1353 } else {
1354 ticks_slot_minus = conn->ull.ticks_slot - ticks_slot;
1355 }
1356 conn->ull.ticks_slot = ticks_slot;
1357 }
1358 #endif /* CONFIG_BT_CTLR_DATA_LENGTH || CONFIG_BT_CTLR_PHY */
1359 #else /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1360 ticks_slot_plus = 0;
1361 ticks_slot_minus = 0;
1362 #endif /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1363
1364 /* update conn ticker */
1365 if (ticks_drift_plus || ticks_drift_minus ||
1366 ticks_slot_plus || ticks_slot_minus ||
1367 lazy || force) {
1368 uint8_t ticker_id = TICKER_ID_CONN_BASE + lll->handle;
1369 struct ll_conn *conn_ll = lll->hdr.parent;
1370 uint32_t ticker_status;
1371
1372 /* Call to ticker_update can fail under the race
1373 * condition where in the peripheral role is being stopped but
1374 * at the same time it is preempted by peripheral event that
1375 * gets into close state. Accept failure when peripheral role
1376 * is being stopped.
1377 */
1378 ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
1379 TICKER_USER_ID_ULL_HIGH,
1380 ticker_id,
1381 ticks_drift_plus, ticks_drift_minus,
1382 ticks_slot_plus, ticks_slot_minus,
1383 lazy, force,
1384 ticker_update_conn_op_cb,
1385 conn_ll);
1386 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1387 (ticker_status == TICKER_STATUS_BUSY) ||
1388 ((void *)conn_ll == ull_disable_mark_get()));
1389 }
1390 }
1391
1392 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
ull_conn_lll_tx_demux_sched(struct lll_conn * lll)1393 void ull_conn_lll_tx_demux_sched(struct lll_conn *lll)
1394 {
1395 static memq_link_t link;
1396 static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1397
1398 mfy.param = HDR_LLL2ULL(lll);
1399
1400 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1U, &mfy);
1401 }
1402 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
1403
ull_conn_tx_demux(uint8_t count)1404 void ull_conn_tx_demux(uint8_t count)
1405 {
1406 do {
1407 struct lll_tx *lll_tx;
1408 struct ll_conn *conn;
1409
1410 lll_tx = MFIFO_DEQUEUE_GET(conn_tx);
1411 if (!lll_tx) {
1412 break;
1413 }
1414
1415 conn = ll_connected_get(lll_tx->handle);
1416 if (conn) {
1417 struct node_tx *tx = lll_tx->node;
1418
1419 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1420 if (empty_data_start_release(conn, tx)) {
1421 goto ull_conn_tx_demux_release;
1422 }
1423 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1424
1425 ull_tx_q_enqueue_data(&conn->tx_q, tx);
1426 } else {
1427 struct node_tx *tx = lll_tx->node;
1428 struct pdu_data *p = (void *)tx->pdu;
1429
1430 p->ll_id = PDU_DATA_LLID_RESV;
1431 ll_tx_ack_put(LLL_HANDLE_INVALID, tx);
1432 }
1433
1434 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1435 ull_conn_tx_demux_release:
1436 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1437
1438 MFIFO_DEQUEUE(conn_tx);
1439 } while (--count);
1440 }
1441
ull_conn_tx_lll_enqueue(struct ll_conn * conn,uint8_t count)1442 void ull_conn_tx_lll_enqueue(struct ll_conn *conn, uint8_t count)
1443 {
1444 while (count--) {
1445 struct node_tx *tx;
1446 memq_link_t *link;
1447
1448 tx = tx_ull_dequeue(conn, NULL);
1449 if (!tx) {
1450 /* No more tx nodes available */
1451 break;
1452 }
1453
1454 link = mem_acquire(&mem_link_tx.free);
1455 LL_ASSERT(link);
1456
1457 /* Enqueue towards LLL */
1458 memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1459 }
1460 }
1461
ull_conn_link_tx_release(void * link)1462 void ull_conn_link_tx_release(void *link)
1463 {
1464 mem_release(link, &mem_link_tx.free);
1465 }
1466
ull_conn_ack_last_idx_get(void)1467 uint8_t ull_conn_ack_last_idx_get(void)
1468 {
1469 return mfifo_fifo_conn_ack.l;
1470 }
1471
ull_conn_ack_peek(uint8_t * ack_last,uint16_t * handle,struct node_tx ** tx)1472 memq_link_t *ull_conn_ack_peek(uint8_t *ack_last, uint16_t *handle,
1473 struct node_tx **tx)
1474 {
1475 struct lll_tx *lll_tx;
1476
1477 lll_tx = MFIFO_DEQUEUE_GET(conn_ack);
1478 if (!lll_tx) {
1479 return NULL;
1480 }
1481
1482 *ack_last = mfifo_fifo_conn_ack.l;
1483
1484 *handle = lll_tx->handle;
1485 *tx = lll_tx->node;
1486
1487 return (*tx)->link;
1488 }
1489
ull_conn_ack_by_last_peek(uint8_t last,uint16_t * handle,struct node_tx ** tx)1490 memq_link_t *ull_conn_ack_by_last_peek(uint8_t last, uint16_t *handle,
1491 struct node_tx **tx)
1492 {
1493 struct lll_tx *lll_tx;
1494
1495 lll_tx = mfifo_dequeue_get(mfifo_fifo_conn_ack.m, mfifo_conn_ack.s,
1496 mfifo_fifo_conn_ack.f, last);
1497 if (!lll_tx) {
1498 return NULL;
1499 }
1500
1501 *handle = lll_tx->handle;
1502 *tx = lll_tx->node;
1503
1504 return (*tx)->link;
1505 }
1506
ull_conn_ack_dequeue(void)1507 void *ull_conn_ack_dequeue(void)
1508 {
1509 return MFIFO_DEQUEUE(conn_ack);
1510 }
1511
ull_conn_lll_ack_enqueue(uint16_t handle,struct node_tx * tx)1512 void ull_conn_lll_ack_enqueue(uint16_t handle, struct node_tx *tx)
1513 {
1514 struct lll_tx *lll_tx;
1515 uint8_t idx;
1516
1517 idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&lll_tx);
1518 LL_ASSERT(lll_tx);
1519
1520 lll_tx->handle = handle;
1521 lll_tx->node = tx;
1522
1523 MFIFO_ENQUEUE(conn_ack, idx);
1524 }
1525
ull_conn_tx_ack(uint16_t handle,memq_link_t * link,struct node_tx * tx)1526 void ull_conn_tx_ack(uint16_t handle, memq_link_t *link, struct node_tx *tx)
1527 {
1528 struct pdu_data *pdu_tx;
1529
1530 pdu_tx = (void *)tx->pdu;
1531 LL_ASSERT(pdu_tx->len);
1532
1533 if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1534 if (handle != LLL_HANDLE_INVALID) {
1535 struct ll_conn *conn = ll_conn_get(handle);
1536
1537 ull_cp_tx_ack(conn, tx);
1538 }
1539
1540 /* release ctrl mem if points to itself */
1541 if (link->next == (void *)tx) {
1542 LL_ASSERT(link->next);
1543
1544 struct ll_conn *conn = ll_connected_get(handle);
1545
1546 ull_cp_release_tx(conn, tx);
1547 return;
1548 } else if (!tx) {
1549 /* Tx Node re-used to enqueue new ctrl PDU */
1550 return;
1551 }
1552 LL_ASSERT(!link->next);
1553 } else if (handle == LLL_HANDLE_INVALID) {
1554 pdu_tx->ll_id = PDU_DATA_LLID_RESV;
1555 } else {
1556 LL_ASSERT(handle != LLL_HANDLE_INVALID);
1557 }
1558
1559 ll_tx_ack_put(handle, tx);
1560 }
1561
ull_conn_lll_max_tx_octets_get(struct lll_conn * lll)1562 uint16_t ull_conn_lll_max_tx_octets_get(struct lll_conn *lll)
1563 {
1564 uint16_t max_tx_octets;
1565
1566 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1567 #if defined(CONFIG_BT_CTLR_PHY)
1568 switch (lll->phy_tx_time) {
1569 default:
1570 case PHY_1M:
1571 /* 1M PHY, 1us = 1 bit, hence divide by 8.
1572 * Deduct 10 bytes for preamble (1), access address (4),
1573 * header (2), and CRC (3).
1574 */
1575 max_tx_octets = (lll->dle.eff.max_tx_time >> 3) - 10;
1576 break;
1577
1578 case PHY_2M:
1579 /* 2M PHY, 1us = 2 bits, hence divide by 4.
1580 * Deduct 11 bytes for preamble (2), access address (4),
1581 * header (2), and CRC (3).
1582 */
1583 max_tx_octets = (lll->dle.eff.max_tx_time >> 2) - 11;
1584 break;
1585
1586 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1587 case PHY_CODED:
1588 if (lll->phy_flags & 0x01) {
1589 /* S8 Coded PHY, 8us = 1 bit, hence divide by
1590 * 64.
1591 * Subtract time for preamble (80), AA (256),
1592 * CI (16), TERM1 (24), CRC (192) and
1593 * TERM2 (24), total 592 us.
1594 * Subtract 2 bytes for header.
1595 */
1596 max_tx_octets = ((lll->dle.eff.max_tx_time - 592) >>
1597 6) - 2;
1598 } else {
1599 /* S2 Coded PHY, 2us = 1 bit, hence divide by
1600 * 16.
1601 * Subtract time for preamble (80), AA (256),
1602 * CI (16), TERM1 (24), CRC (48) and
1603 * TERM2 (6), total 430 us.
1604 * Subtract 2 bytes for header.
1605 */
1606 max_tx_octets = ((lll->dle.eff.max_tx_time - 430) >>
1607 4) - 2;
1608 }
1609 break;
1610 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1611 }
1612
1613 #if defined(CONFIG_BT_CTLR_LE_ENC)
1614 if (lll->enc_tx) {
1615 /* deduct the MIC */
1616 max_tx_octets -= 4U;
1617 }
1618 #endif /* CONFIG_BT_CTLR_LE_ENC */
1619
1620 if (max_tx_octets > lll->dle.eff.max_tx_octets) {
1621 max_tx_octets = lll->dle.eff.max_tx_octets;
1622 }
1623
1624 #else /* !CONFIG_BT_CTLR_PHY */
1625 max_tx_octets = lll->dle.eff.max_tx_octets;
1626 #endif /* !CONFIG_BT_CTLR_PHY */
1627 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
1628 max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1629 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
1630 return max_tx_octets;
1631 }
1632
1633 /**
1634 * @brief Initialize pdu_data members that are read only in lower link layer.
1635 *
1636 * @param pdu Pointer to pdu_data object to be initialized
1637 */
ull_pdu_data_init(struct pdu_data * pdu)1638 void ull_pdu_data_init(struct pdu_data *pdu)
1639 {
1640 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1641 pdu->cp = 0U;
1642 pdu->octet3.resv[0] = 0U;
1643 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1644 }
1645
init_reset(void)1646 static int init_reset(void)
1647 {
1648 /* Initialize conn pool. */
1649 mem_init(conn_pool, sizeof(struct ll_conn),
1650 sizeof(conn_pool) / sizeof(struct ll_conn), &conn_free);
1651
1652 /* Initialize tx pool. */
1653 mem_init(mem_conn_tx.pool, CONN_TX_BUF_SIZE, CONN_DATA_BUFFERS,
1654 &mem_conn_tx.free);
1655
1656 /* Initialize tx link pool. */
1657 mem_init(mem_link_tx.pool, sizeof(memq_link_t),
1658 (CONN_DATA_BUFFERS +
1659 LLCP_TX_CTRL_BUF_COUNT),
1660 &mem_link_tx.free);
1661
1662 /* Initialize control procedure system. */
1663 ull_cp_init();
1664
1665 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1666 /* Reset CPR mutex */
1667 cpr_active_reset();
1668 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1669
1670 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1671 /* Initialize the DLE defaults */
1672 default_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1673 default_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
1674 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1675
1676 #if defined(CONFIG_BT_CTLR_PHY)
1677 /* Initialize the PHY defaults */
1678 default_phy_tx = PHY_1M;
1679 default_phy_rx = PHY_1M;
1680
1681 #if defined(CONFIG_BT_CTLR_PHY_2M)
1682 default_phy_tx |= PHY_2M;
1683 default_phy_rx |= PHY_2M;
1684 #endif /* CONFIG_BT_CTLR_PHY_2M */
1685
1686 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1687 default_phy_tx |= PHY_CODED;
1688 default_phy_rx |= PHY_CODED;
1689 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1690 #endif /* CONFIG_BT_CTLR_PHY */
1691
1692 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1693 memset(&default_past_params, 0, sizeof(struct past_params));
1694 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1695
1696 return 0;
1697 }
1698
1699 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
tx_demux_sched(struct ll_conn * conn)1700 static void tx_demux_sched(struct ll_conn *conn)
1701 {
1702 static memq_link_t link;
1703 static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1704
1705 mfy.param = conn;
1706
1707 mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1708 }
1709 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
1710
tx_demux(void * param)1711 static void tx_demux(void *param)
1712 {
1713 ull_conn_tx_demux(1);
1714
1715 ull_conn_tx_lll_enqueue(param, 1);
1716 }
1717
tx_ull_dequeue(struct ll_conn * conn,struct node_tx * unused)1718 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *unused)
1719 {
1720 struct node_tx *tx = NULL;
1721
1722 tx = ull_tx_q_dequeue(&conn->tx_q);
1723 if (tx) {
1724 struct pdu_data *pdu_tx;
1725
1726 pdu_tx = (void *)tx->pdu;
1727 if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1728 /* Mark the tx node as belonging to the ctrl pool */
1729 tx->next = tx;
1730 } else {
1731 /* Mark the tx node as belonging to the data pool */
1732 tx->next = NULL;
1733 }
1734 }
1735 return tx;
1736 }
1737
ticker_update_conn_op_cb(uint32_t status,void * param)1738 static void ticker_update_conn_op_cb(uint32_t status, void *param)
1739 {
1740 /* Peripheral drift compensation succeeds, or it fails in a race condition
1741 * when disconnecting or connection update (race between ticker_update
1742 * and ticker_stop calls).
1743 */
1744 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1745 param == ull_update_mark_get() ||
1746 param == ull_disable_mark_get());
1747 }
1748
ticker_stop_conn_op_cb(uint32_t status,void * param)1749 static void ticker_stop_conn_op_cb(uint32_t status, void *param)
1750 {
1751 void *p;
1752
1753 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1754
1755 p = ull_update_mark(param);
1756 LL_ASSERT(p == param);
1757 }
1758
ticker_start_conn_op_cb(uint32_t status,void * param)1759 static void ticker_start_conn_op_cb(uint32_t status, void *param)
1760 {
1761 void *p;
1762
1763 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1764
1765 p = ull_update_unmark(param);
1766 LL_ASSERT(p == param);
1767 }
1768
conn_setup_adv_scan_disabled_cb(void * param)1769 static void conn_setup_adv_scan_disabled_cb(void *param)
1770 {
1771 struct node_rx_ftr *ftr;
1772 struct node_rx_pdu *rx;
1773 struct lll_conn *lll;
1774
1775 /* NOTE: LLL conn context SHALL be after lll_hdr in
1776 * struct lll_adv and struct lll_scan.
1777 */
1778 rx = param;
1779 ftr = &(rx->rx_ftr);
1780 lll = *((struct lll_conn **)((uint8_t *)ftr->param +
1781 sizeof(struct lll_hdr)));
1782
1783 if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING)) {
1784 struct ull_hdr *hdr;
1785
1786 /* Prevent fast ADV re-scheduling from re-triggering */
1787 hdr = HDR_LLL2ULL(ftr->param);
1788 hdr->disabled_cb = NULL;
1789 }
1790
1791 switch (lll->role) {
1792 #if defined(CONFIG_BT_CENTRAL)
1793 case 0:
1794 ull_central_setup(rx, ftr, lll);
1795 break;
1796 #endif /* CONFIG_BT_CENTRAL */
1797
1798 #if defined(CONFIG_BT_PERIPHERAL)
1799 case 1:
1800 ull_periph_setup(rx, ftr, lll);
1801 break;
1802 #endif /* CONFIG_BT_PERIPHERAL */
1803
1804 default:
1805 LL_ASSERT(0);
1806 break;
1807 }
1808 }
1809
disable(uint16_t handle)1810 static inline void disable(uint16_t handle)
1811 {
1812 struct ll_conn *conn;
1813 int err;
1814
1815 conn = ll_conn_get(handle);
1816
1817 err = ull_ticker_stop_with_mark(TICKER_ID_CONN_BASE + handle,
1818 conn, &conn->lll);
1819 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
1820
1821 conn->lll.handle = LLL_HANDLE_INVALID;
1822 conn->lll.link_tx_free = NULL;
1823 }
1824
1825 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
conn_cleanup_iso_cis_released_cb(struct ll_conn * conn)1826 static void conn_cleanup_iso_cis_released_cb(struct ll_conn *conn)
1827 {
1828 struct ll_conn_iso_stream *cis;
1829
1830 cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1831 if (cis) {
1832 struct node_rx_pdu *rx;
1833 uint8_t reason;
1834
1835 /* More associated CISes - stop next */
1836 rx = (void *)&conn->llcp_terminate.node_rx;
1837 reason = *(uint8_t *)rx->pdu;
1838
1839 ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1840 reason);
1841 } else {
1842 /* No more CISes associated with conn - finalize */
1843 conn_cleanup_finalize(conn);
1844 }
1845 }
1846 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1847
conn_cleanup_finalize(struct ll_conn * conn)1848 static void conn_cleanup_finalize(struct ll_conn *conn)
1849 {
1850 struct lll_conn *lll = &conn->lll;
1851 uint32_t ticker_status;
1852
1853 ull_cp_state_set(conn, ULL_CP_DISCONNECTED);
1854
1855 /* Update tx buffer queue handling */
1856 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
1857 ull_cp_update_tx_buffer_queue(conn);
1858 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
1859 ull_cp_release_nodes(conn);
1860
1861 /* flush demux-ed Tx buffer still in ULL context */
1862 tx_ull_flush(conn);
1863
1864 /* Stop Central or Peripheral role ticker */
1865 ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1866 TICKER_USER_ID_ULL_HIGH,
1867 TICKER_ID_CONN_BASE + lll->handle,
1868 ticker_stop_op_cb, conn);
1869 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1870 (ticker_status == TICKER_STATUS_BUSY));
1871
1872 /* Invalidate the connection context */
1873 lll->handle = LLL_HANDLE_INVALID;
1874
1875 /* Demux and flush Tx PDUs that remain enqueued in thread context */
1876 ull_conn_tx_demux(UINT8_MAX);
1877 }
1878
conn_cleanup(struct ll_conn * conn,uint8_t reason)1879 static void conn_cleanup(struct ll_conn *conn, uint8_t reason)
1880 {
1881 struct node_rx_pdu *rx;
1882
1883 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1884 struct ll_conn_iso_stream *cis;
1885 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1886
1887 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1888 /* Reset CPR mutex */
1889 cpr_active_check_and_reset(conn);
1890 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1891
1892 /* Only termination structure is populated here in ULL context
1893 * but the actual enqueue happens in the LLL context in
1894 * tx_lll_flush. The reason being to avoid passing the reason
1895 * value and handle through the mayfly scheduling of the
1896 * tx_lll_flush.
1897 */
1898 rx = (void *)&conn->llcp_terminate.node_rx.rx;
1899 rx->hdr.handle = conn->lll.handle;
1900 rx->hdr.type = NODE_RX_TYPE_TERMINATE;
1901 *((uint8_t *)rx->pdu) = reason;
1902
1903 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1904 cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1905 if (cis) {
1906 /* Stop CIS and defer cleanup to after teardown. */
1907 ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1908 reason);
1909 return;
1910 }
1911 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1912
1913 conn_cleanup_finalize(conn);
1914 }
1915
tx_ull_flush(struct ll_conn * conn)1916 static void tx_ull_flush(struct ll_conn *conn)
1917 {
1918 struct node_tx *tx;
1919
1920 ull_tx_q_resume_data(&conn->tx_q);
1921
1922 tx = tx_ull_dequeue(conn, NULL);
1923 while (tx) {
1924 memq_link_t *link;
1925
1926 link = mem_acquire(&mem_link_tx.free);
1927 LL_ASSERT(link);
1928
1929 /* Enqueue towards LLL */
1930 memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1931
1932 tx = tx_ull_dequeue(conn, NULL);
1933 }
1934 }
1935
ticker_stop_op_cb(uint32_t status,void * param)1936 static void ticker_stop_op_cb(uint32_t status, void *param)
1937 {
1938 static memq_link_t link;
1939 static struct mayfly mfy = {0, 0, &link, NULL, conn_disable};
1940 uint32_t ret;
1941
1942 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1943
1944 /* Check if any pending LLL events that need to be aborted */
1945 mfy.param = param;
1946 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1947 TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1948 LL_ASSERT(!ret);
1949 }
1950
conn_disable(void * param)1951 static void conn_disable(void *param)
1952 {
1953 struct ll_conn *conn;
1954 struct ull_hdr *hdr;
1955
1956 /* Check ref count to determine if any pending LLL events in pipeline */
1957 conn = param;
1958 hdr = &conn->ull;
1959 if (ull_ref_get(hdr)) {
1960 static memq_link_t link;
1961 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1962 uint32_t ret;
1963
1964 mfy.param = &conn->lll;
1965
1966 /* Setup disabled callback to be called when ref count
1967 * returns to zero.
1968 */
1969 LL_ASSERT(!hdr->disabled_cb);
1970 hdr->disabled_param = mfy.param;
1971 hdr->disabled_cb = disabled_cb;
1972
1973 /* Trigger LLL disable */
1974 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1975 TICKER_USER_ID_LLL, 0, &mfy);
1976 LL_ASSERT(!ret);
1977 } else {
1978 /* No pending LLL events */
1979 disabled_cb(&conn->lll);
1980 }
1981 }
1982
disabled_cb(void * param)1983 static void disabled_cb(void *param)
1984 {
1985 static memq_link_t link;
1986 static struct mayfly mfy = {0, 0, &link, NULL, tx_lll_flush};
1987 uint32_t ret;
1988
1989 mfy.param = param;
1990 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1991 TICKER_USER_ID_LLL, 0, &mfy);
1992 LL_ASSERT(!ret);
1993 }
1994
tx_lll_flush(void * param)1995 static void tx_lll_flush(void *param)
1996 {
1997 struct node_rx_pdu *rx;
1998 struct lll_conn *lll;
1999 struct ll_conn *conn;
2000 struct node_tx *tx;
2001 memq_link_t *link;
2002 uint16_t handle;
2003
2004 /* Get reference to ULL context */
2005 lll = param;
2006 conn = HDR_LLL2ULL(lll);
2007 handle = ll_conn_handle_get(conn);
2008
2009 lll_conn_flush(handle, lll);
2010
2011 link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
2012 (void **)&tx);
2013 while (link) {
2014 uint8_t idx;
2015 struct lll_tx *tx_buf;
2016
2017 idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx_buf);
2018 LL_ASSERT(tx_buf);
2019
2020 tx_buf->handle = LLL_HANDLE_INVALID;
2021 tx_buf->node = tx;
2022
2023 /* TX node UPSTREAM, i.e. Tx node ack path */
2024 link->next = tx->next; /* Indicates ctrl pool or data pool */
2025 tx->next = link;
2026
2027 MFIFO_ENQUEUE(conn_ack, idx);
2028
2029 link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
2030 (void **)&tx);
2031 }
2032
2033 /* Get the terminate structure reserved in the connection context.
2034 * The terminate reason and connection handle should already be
2035 * populated before this mayfly function was scheduled.
2036 */
2037 rx = (void *)&conn->llcp_terminate.node_rx;
2038 LL_ASSERT(rx->hdr.link);
2039 link = rx->hdr.link;
2040 rx->hdr.link = NULL;
2041
2042 /* Enqueue the terminate towards ULL context */
2043 ull_rx_put_sched(link, rx);
2044 }
2045
2046 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
empty_data_start_release(struct ll_conn * conn,struct node_tx * tx)2047 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx)
2048 {
2049 struct pdu_data *p = (void *)tx->pdu;
2050
2051 if ((p->ll_id == PDU_DATA_LLID_DATA_START) && !p->len) {
2052 conn->start_empty = 1U;
2053
2054 ll_tx_ack_put(conn->lll.handle, tx);
2055
2056 return -EINVAL;
2057 } else if (p->len && conn->start_empty) {
2058 conn->start_empty = 0U;
2059
2060 if (p->ll_id == PDU_DATA_LLID_DATA_CONTINUE) {
2061 p->ll_id = PDU_DATA_LLID_DATA_START;
2062 }
2063 }
2064
2065 return 0;
2066 }
2067 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
2068
2069 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
force_md_cnt_calc(struct lll_conn * lll_connection,uint32_t tx_rate)2070 static uint8_t force_md_cnt_calc(struct lll_conn *lll_connection, uint32_t tx_rate)
2071 {
2072 uint32_t time_incoming, time_outgoing;
2073 uint8_t force_md_cnt;
2074 uint8_t phy_flags;
2075 uint8_t mic_size;
2076 uint8_t phy;
2077
2078 #if defined(CONFIG_BT_CTLR_PHY)
2079 phy = lll_connection->phy_tx;
2080 phy_flags = lll_connection->phy_flags;
2081 #else /* !CONFIG_BT_CTLR_PHY */
2082 phy = PHY_1M;
2083 phy_flags = 0U;
2084 #endif /* !CONFIG_BT_CTLR_PHY */
2085
2086 #if defined(CONFIG_BT_CTLR_LE_ENC)
2087 mic_size = PDU_MIC_SIZE * lll_connection->enc_tx;
2088 #else /* !CONFIG_BT_CTLR_LE_ENC */
2089 mic_size = 0U;
2090 #endif /* !CONFIG_BT_CTLR_LE_ENC */
2091
2092 time_incoming = (LL_LENGTH_OCTETS_RX_MAX << 3) *
2093 1000000UL / tx_rate;
2094 time_outgoing = PDU_DC_US(LL_LENGTH_OCTETS_RX_MAX, mic_size, phy,
2095 phy_flags) +
2096 PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
2097 (EVENT_IFS_US << 1);
2098
2099 force_md_cnt = 0U;
2100 if (time_incoming > time_outgoing) {
2101 uint32_t delta;
2102 uint32_t time_keep_alive;
2103
2104 delta = (time_incoming << 1) - time_outgoing;
2105 time_keep_alive = (PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
2106 EVENT_IFS_US) << 1;
2107 force_md_cnt = (delta + (time_keep_alive - 1)) /
2108 time_keep_alive;
2109 LOG_DBG("Time: incoming= %u, expected outgoing= %u, delta= %u, "
2110 "keepalive= %u, force_md_cnt = %u.",
2111 time_incoming, time_outgoing, delta, time_keep_alive,
2112 force_md_cnt);
2113 }
2114
2115 return force_md_cnt;
2116 }
2117 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
2118
2119 #if defined(CONFIG_BT_CTLR_LE_ENC)
2120 /**
2121 * @brief Pause the data path of a rx queue.
2122 */
ull_conn_pause_rx_data(struct ll_conn * conn)2123 void ull_conn_pause_rx_data(struct ll_conn *conn)
2124 {
2125 conn->pause_rx_data = 1U;
2126 }
2127
2128 /**
2129 * @brief Resume the data path of a rx queue.
2130 */
ull_conn_resume_rx_data(struct ll_conn * conn)2131 void ull_conn_resume_rx_data(struct ll_conn *conn)
2132 {
2133 conn->pause_rx_data = 0U;
2134 }
2135 #endif /* CONFIG_BT_CTLR_LE_ENC */
2136
ull_conn_event_counter(struct ll_conn * conn)2137 uint16_t ull_conn_event_counter(struct ll_conn *conn)
2138 {
2139 struct lll_conn *lll;
2140 uint16_t event_counter;
2141
2142 lll = &conn->lll;
2143
2144 /* Calculate current event counter. If refcount is non-zero, we have called
2145 * prepare and the LLL implementation has calculated and incremented the event
2146 * counter (RX path). In this case we need to subtract one from the current
2147 * event counter.
2148 * Otherwise we are in the TX path, and we calculate the current event counter
2149 * similar to LLL by taking the expected event counter value plus accumulated
2150 * latency.
2151 */
2152 if (ull_ref_get(&conn->ull)) {
2153 /* We are in post-prepare (RX path). Event counter is already
2154 * calculated and incremented by 1 for next event.
2155 */
2156 event_counter = lll->event_counter - 1;
2157 } else {
2158 event_counter = lll->event_counter + lll->latency_prepare +
2159 conn->llcp.prep.lazy;
2160 }
2161
2162 return event_counter;
2163 }
ull_conn_update_ticker(struct ll_conn * conn,uint32_t ticks_win_offset,uint32_t ticks_slot_overhead,uint32_t periodic_us,uint32_t ticks_at_expire)2164 static void ull_conn_update_ticker(struct ll_conn *conn,
2165 uint32_t ticks_win_offset,
2166 uint32_t ticks_slot_overhead,
2167 uint32_t periodic_us,
2168 uint32_t ticks_at_expire)
2169 {
2170 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2171 /* disable ticker job, in order to chain stop and start
2172 * to avoid RTC being stopped if no tickers active.
2173 */
2174 uint32_t mayfly_was_enabled =
2175 mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW);
2176
2177 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0U);
2178 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2179
2180 /* start periph/central with new timings */
2181 uint8_t ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
2182 uint32_t ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2183 ticker_id_conn, ticker_stop_conn_op_cb, (void *)conn);
2184 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2185 (ticker_status == TICKER_STATUS_BUSY));
2186 ticker_status = ticker_start(
2187 TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH, ticker_id_conn, ticks_at_expire,
2188 ticks_win_offset, HAL_TICKER_US_TO_TICKS(periodic_us),
2189 HAL_TICKER_REMAINDER(periodic_us),
2190 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2191 TICKER_NULL_LAZY,
2192 #else /* !CONFIG_BT_TICKER_LOW_LAT */
2193 TICKER_LAZY_MUST_EXPIRE_KEEP,
2194 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2195 (ticks_slot_overhead + conn->ull.ticks_slot),
2196 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CENTRAL)
2197 conn->lll.role == BT_HCI_ROLE_PERIPHERAL ?
2198 ull_periph_ticker_cb : ull_central_ticker_cb,
2199 #elif defined(CONFIG_BT_PERIPHERAL)
2200 ull_periph_ticker_cb,
2201 #else
2202 ull_central_ticker_cb,
2203 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CENTRAL */
2204 conn, ticker_start_conn_op_cb, (void *)conn);
2205 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2206 (ticker_status == TICKER_STATUS_BUSY));
2207
2208 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2209 /* enable ticker job, if disabled in this function */
2210 if (mayfly_was_enabled) {
2211 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1U);
2212 }
2213 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2214 }
2215
ull_conn_update_parameters(struct ll_conn * conn,uint8_t is_cu_proc,uint8_t win_size,uint32_t win_offset_us,uint16_t interval,uint16_t latency,uint16_t timeout,uint16_t instant)2216 void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_t win_size,
2217 uint32_t win_offset_us, uint16_t interval, uint16_t latency,
2218 uint16_t timeout, uint16_t instant)
2219 {
2220 uint16_t conn_interval_unit_old;
2221 uint16_t conn_interval_unit_new;
2222 uint32_t ticks_win_offset = 0U;
2223 uint16_t conn_interval_old_us;
2224 uint16_t conn_interval_new_us;
2225 uint32_t ticks_slot_overhead;
2226 uint16_t conn_interval_old;
2227 uint16_t conn_interval_new;
2228 uint32_t conn_interval_us;
2229 uint32_t ticks_at_expire;
2230 uint16_t instant_latency;
2231 uint32_t ready_delay_us;
2232 uint16_t event_counter;
2233 uint32_t periodic_us;
2234 uint16_t latency_upd;
2235 struct lll_conn *lll;
2236
2237 lll = &conn->lll;
2238
2239 /* Calculate current event counter */
2240 event_counter = ull_conn_event_counter(conn);
2241
2242 instant_latency = (event_counter - instant) & 0xFFFF;
2243
2244
2245 ticks_at_expire = conn->llcp.prep.ticks_at_expire;
2246
2247 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
2248 /* restore to normal prepare */
2249 if (conn->ull.ticks_prepare_to_start & XON_BITMASK) {
2250 uint32_t ticks_prepare_to_start =
2251 MAX(conn->ull.ticks_active_to_start, conn->ull.ticks_preempt_to_start);
2252
2253 conn->ull.ticks_prepare_to_start &= ~XON_BITMASK;
2254
2255 ticks_at_expire -= (conn->ull.ticks_prepare_to_start - ticks_prepare_to_start);
2256 }
2257 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
2258
2259 #if defined(CONFIG_BT_CTLR_PHY)
2260 ready_delay_us = lll_radio_tx_ready_delay_get(lll->phy_tx,
2261 lll->phy_flags);
2262 #else
2263 ready_delay_us = lll_radio_tx_ready_delay_get(0U, 0U);
2264 #endif
2265
2266 /* compensate for instant_latency due to laziness */
2267 if (lll->interval >= BT_HCI_LE_INTERVAL_MIN) {
2268 conn_interval_old = instant_latency * lll->interval;
2269 conn_interval_unit_old = CONN_INT_UNIT_US;
2270 } else {
2271 conn_interval_old = instant_latency * (lll->interval + 1U);
2272 conn_interval_unit_old = CONN_LOW_LAT_INT_UNIT_US;
2273 }
2274
2275 if (interval >= BT_HCI_LE_INTERVAL_MIN) {
2276 uint16_t max_tx_time;
2277 uint16_t max_rx_time;
2278 uint32_t slot_us;
2279
2280 conn_interval_new = interval;
2281 conn_interval_unit_new = CONN_INT_UNIT_US;
2282 lll->tifs_tx_us = EVENT_IFS_DEFAULT_US;
2283 lll->tifs_rx_us = EVENT_IFS_DEFAULT_US;
2284 lll->tifs_hcto_us = EVENT_IFS_DEFAULT_US;
2285
2286 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && \
2287 defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2288 max_tx_time = lll->dle.eff.max_tx_time;
2289 max_rx_time = lll->dle.eff.max_rx_time;
2290
2291 #else /* !CONFIG_BT_CTLR_DATA_LENGTH ||
2292 * !CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE
2293 */
2294 max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
2295 max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
2296 #if defined(CONFIG_BT_CTLR_PHY)
2297 max_tx_time = MAX(max_tx_time, PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
2298 max_rx_time = MAX(max_rx_time, PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
2299 #endif /* !CONFIG_BT_CTLR_PHY */
2300 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH ||
2301 * !CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE
2302 */
2303
2304 /* Calculate event time reservation */
2305 slot_us = max_tx_time + max_rx_time;
2306 slot_us += lll->tifs_rx_us + (EVENT_CLOCK_JITTER_US << 1);
2307 slot_us += ready_delay_us;
2308
2309 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX) ||
2310 (lll->role == BT_HCI_ROLE_CENTRAL)) {
2311 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
2312 }
2313
2314 conn->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
2315
2316 } else {
2317 conn_interval_new = interval + 1U;
2318 conn_interval_unit_new = CONN_LOW_LAT_INT_UNIT_US;
2319 lll->tifs_tx_us = CONFIG_BT_CTLR_EVENT_IFS_LOW_LAT_US;
2320 lll->tifs_rx_us = CONFIG_BT_CTLR_EVENT_IFS_LOW_LAT_US;
2321 lll->tifs_hcto_us = CONFIG_BT_CTLR_EVENT_IFS_LOW_LAT_US;
2322 /* Reserve only the processing overhead, on overlap the
2323 * is_abort_cb mechanism will ensure to continue the event so
2324 * as to not loose anchor point sync.
2325 */
2326 conn->ull.ticks_slot =
2327 HAL_TICKER_US_TO_TICKS_CEIL(EVENT_OVERHEAD_START_US);
2328 }
2329
2330 conn_interval_us = conn_interval_new * conn_interval_unit_new;
2331 periodic_us = conn_interval_us;
2332
2333 conn_interval_old_us = conn_interval_old * conn_interval_unit_old;
2334 latency_upd = conn_interval_old_us / conn_interval_us;
2335 conn_interval_new_us = latency_upd * conn_interval_us;
2336 if (conn_interval_new_us > conn_interval_old_us) {
2337 ticks_at_expire += HAL_TICKER_US_TO_TICKS(
2338 conn_interval_new_us - conn_interval_old_us);
2339 } else {
2340 ticks_at_expire -= HAL_TICKER_US_TO_TICKS(
2341 conn_interval_old_us - conn_interval_new_us);
2342 }
2343
2344 lll->latency_prepare += conn->llcp.prep.lazy;
2345 lll->latency_prepare -= (instant_latency - latency_upd);
2346
2347 /* calculate the offset */
2348 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
2349 ticks_slot_overhead =
2350 MAX(conn->ull.ticks_active_to_start,
2351 conn->ull.ticks_prepare_to_start);
2352
2353 } else {
2354 ticks_slot_overhead = 0U;
2355 }
2356
2357 /* calculate the window widening and interval */
2358 switch (lll->role) {
2359 #if defined(CONFIG_BT_PERIPHERAL)
2360 case BT_HCI_ROLE_PERIPHERAL:
2361 lll->periph.window_widening_prepare_us -=
2362 lll->periph.window_widening_periodic_us * instant_latency;
2363
2364 lll->periph.window_widening_periodic_us =
2365 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2366 lll_clock_ppm_get(conn->periph.sca)) *
2367 conn_interval_us), 1000000U);
2368 lll->periph.window_widening_max_us = (conn_interval_us >> 1U) - EVENT_IFS_US;
2369 lll->periph.window_size_prepare_us = win_size * CONN_INT_UNIT_US;
2370
2371 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2372 conn->periph.ticks_to_offset = 0U;
2373 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2374
2375 lll->periph.window_widening_prepare_us +=
2376 lll->periph.window_widening_periodic_us * latency_upd;
2377 if (lll->periph.window_widening_prepare_us > lll->periph.window_widening_max_us) {
2378 lll->periph.window_widening_prepare_us = lll->periph.window_widening_max_us;
2379 }
2380
2381 ticks_at_expire -= HAL_TICKER_US_TO_TICKS(lll->periph.window_widening_periodic_us *
2382 latency_upd);
2383 ticks_win_offset = HAL_TICKER_US_TO_TICKS((win_offset_us / CONN_INT_UNIT_US) *
2384 CONN_INT_UNIT_US);
2385 periodic_us -= lll->periph.window_widening_periodic_us;
2386 break;
2387 #endif /* CONFIG_BT_PERIPHERAL */
2388 #if defined(CONFIG_BT_CENTRAL)
2389 case BT_HCI_ROLE_CENTRAL:
2390 ticks_win_offset = HAL_TICKER_US_TO_TICKS(win_offset_us);
2391
2392 /* Workaround: Due to the missing remainder param in
2393 * ticker_start function for first interval; add a
2394 * tick so as to use the ceiled value.
2395 */
2396 ticks_win_offset += 1U;
2397 break;
2398 #endif /*CONFIG_BT_CENTRAL */
2399 default:
2400 LL_ASSERT(0);
2401 break;
2402 }
2403
2404 lll->interval = interval;
2405 lll->latency = latency;
2406
2407 conn->supervision_timeout = timeout;
2408 ull_cp_prt_reload_set(conn, conn_interval_us);
2409
2410 #if defined(CONFIG_BT_CTLR_LE_PING)
2411 /* APTO in no. of connection events */
2412 conn->apto_reload = RADIO_CONN_EVENTS((30U * 1000U * 1000U), conn_interval_us);
2413 /* Dispatch LE Ping PDU 6 connection events (that peer would
2414 * listen to) before 30s timeout
2415 * TODO: "peer listens to" is greater than 30s due to latency
2416 */
2417 conn->appto_reload = (conn->apto_reload > (lll->latency + 6U)) ?
2418 (conn->apto_reload - (lll->latency + 6U)) :
2419 conn->apto_reload;
2420 #endif /* CONFIG_BT_CTLR_LE_PING */
2421
2422 if (is_cu_proc) {
2423 conn->supervision_expire = 0U;
2424 }
2425
2426 /* Update ACL ticker */
2427 ull_conn_update_ticker(conn, ticks_win_offset, ticks_slot_overhead, periodic_us,
2428 ticks_at_expire);
2429 /* Signal that the prepare needs to be canceled */
2430 conn->cancel_prepare = 1U;
2431 }
2432
2433 #if defined(CONFIG_BT_PERIPHERAL)
ull_conn_update_peer_sca(struct ll_conn * conn)2434 void ull_conn_update_peer_sca(struct ll_conn *conn)
2435 {
2436 struct lll_conn *lll;
2437
2438 uint32_t conn_interval_us;
2439 uint32_t periodic_us;
2440
2441 lll = &conn->lll;
2442
2443 /* calculate the window widening and interval */
2444 if (lll->interval >= BT_HCI_LE_INTERVAL_MIN) {
2445 conn_interval_us = lll->interval *
2446 CONN_INT_UNIT_US;
2447 } else {
2448 conn_interval_us = (lll->interval + 1U) *
2449 CONN_LOW_LAT_INT_UNIT_US;
2450 }
2451 periodic_us = conn_interval_us;
2452
2453 lll->periph.window_widening_periodic_us =
2454 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2455 lll_clock_ppm_get(conn->periph.sca)) *
2456 conn_interval_us), 1000000U);
2457
2458 periodic_us -= lll->periph.window_widening_periodic_us;
2459
2460 /* Update ACL ticker */
2461 ull_conn_update_ticker(conn, HAL_TICKER_US_TO_TICKS(periodic_us), 0, periodic_us,
2462 conn->llcp.prep.ticks_at_expire);
2463
2464 }
2465 #endif /* CONFIG_BT_PERIPHERAL */
2466
ull_conn_chan_map_set(struct ll_conn * conn,const uint8_t chm[5])2467 void ull_conn_chan_map_set(struct ll_conn *conn, const uint8_t chm[5])
2468 {
2469 struct lll_conn *lll = &conn->lll;
2470
2471 memcpy(lll->data_chan_map, chm, sizeof(lll->data_chan_map));
2472 lll->data_chan_count = util_ones_count_get(lll->data_chan_map, sizeof(lll->data_chan_map));
2473 }
2474
2475 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2476 static inline void dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2477 uint16_t *max_tx_time)
2478 {
2479 uint8_t phy_select = PHY_1M;
2480 uint16_t rx_time = 0U;
2481 uint16_t tx_time = 0U;
2482
2483 #if defined(CONFIG_BT_CTLR_PHY)
2484 if (conn->llcp.fex.valid && feature_phy_coded(conn)) {
2485 /* If coded PHY is supported on the connection
2486 * this will define the max times
2487 */
2488 phy_select = PHY_CODED;
2489 /* If not, max times should be defined by 1M timing */
2490 }
2491 #endif
2492
2493 rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select);
2494
2495 #if defined(CONFIG_BT_CTLR_PHY)
2496 tx_time = MIN(conn->lll.dle.default_tx_time,
2497 PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select));
2498 #else /* !CONFIG_BT_CTLR_PHY */
2499 tx_time = PDU_DC_MAX_US(conn->lll.dle.default_tx_octets, phy_select);
2500 #endif /* !CONFIG_BT_CTLR_PHY */
2501
2502 /*
2503 * see Vol. 6 Part B chapter 4.5.10
2504 * minimum value for time is 328 us
2505 */
2506 rx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, rx_time);
2507 tx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, tx_time);
2508
2509 *max_rx_time = rx_time;
2510 *max_tx_time = tx_time;
2511 }
2512
ull_dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2513 void ull_dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2514 uint16_t *max_tx_time)
2515 {
2516 dle_max_time_get(conn, max_rx_time, max_tx_time);
2517 }
2518
2519 /*
2520 * TODO: this probably can be optimised for ex. by creating a macro for the
2521 * ull_dle_update_eff function
2522 */
ull_dle_update_eff(struct ll_conn * conn)2523 uint8_t ull_dle_update_eff(struct ll_conn *conn)
2524 {
2525 uint8_t dle_changed = 0U;
2526
2527 /* Note that we must use bitwise or and not logical or */
2528 dle_changed = ull_dle_update_eff_rx(conn);
2529 dle_changed |= ull_dle_update_eff_tx(conn);
2530 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2531 if (dle_changed) {
2532 conn->lll.evt_len_upd = 1U;
2533 }
2534 #endif
2535
2536
2537 return dle_changed;
2538 }
2539
ull_dle_update_eff_rx(struct ll_conn * conn)2540 uint8_t ull_dle_update_eff_rx(struct ll_conn *conn)
2541 {
2542 uint8_t dle_changed = 0U;
2543
2544 const uint16_t eff_rx_octets =
2545 MAX(MIN(conn->lll.dle.local.max_rx_octets, conn->lll.dle.remote.max_tx_octets),
2546 PDU_DC_PAYLOAD_SIZE_MIN);
2547
2548 #if defined(CONFIG_BT_CTLR_PHY)
2549 unsigned int min_eff_rx_time = (conn->lll.phy_rx == PHY_CODED) ?
2550 PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2551
2552 const uint16_t eff_rx_time =
2553 MAX(MIN(conn->lll.dle.local.max_rx_time, conn->lll.dle.remote.max_tx_time),
2554 min_eff_rx_time);
2555
2556 if (eff_rx_time != conn->lll.dle.eff.max_rx_time) {
2557 conn->lll.dle.eff.max_rx_time = eff_rx_time;
2558 dle_changed = 1U;
2559 }
2560 #else
2561 conn->lll.dle.eff.max_rx_time = PDU_DC_MAX_US(eff_rx_octets, PHY_1M);
2562 #endif
2563
2564 if (eff_rx_octets != conn->lll.dle.eff.max_rx_octets) {
2565 conn->lll.dle.eff.max_rx_octets = eff_rx_octets;
2566 dle_changed = 1U;
2567 }
2568 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2569 /* we delay the update of event length to after the DLE procedure is finishede */
2570 if (dle_changed) {
2571 conn->lll.evt_len_upd_delayed = 1;
2572 }
2573 #endif
2574
2575 return dle_changed;
2576 }
2577
ull_dle_update_eff_tx(struct ll_conn * conn)2578 uint8_t ull_dle_update_eff_tx(struct ll_conn *conn)
2579
2580 {
2581 uint8_t dle_changed = 0U;
2582
2583 const uint16_t eff_tx_octets =
2584 MAX(MIN(conn->lll.dle.local.max_tx_octets, conn->lll.dle.remote.max_rx_octets),
2585 PDU_DC_PAYLOAD_SIZE_MIN);
2586
2587 #if defined(CONFIG_BT_CTLR_PHY)
2588 unsigned int min_eff_tx_time = (conn->lll.phy_tx == PHY_CODED) ?
2589 PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2590
2591 const uint16_t eff_tx_time =
2592 MAX(MIN(conn->lll.dle.local.max_tx_time, conn->lll.dle.remote.max_rx_time),
2593 min_eff_tx_time);
2594
2595 if (eff_tx_time != conn->lll.dle.eff.max_tx_time) {
2596 conn->lll.dle.eff.max_tx_time = eff_tx_time;
2597 dle_changed = 1U;
2598 }
2599 #else
2600 conn->lll.dle.eff.max_tx_time = PDU_DC_MAX_US(eff_tx_octets, PHY_1M);
2601 #endif
2602
2603 if (eff_tx_octets != conn->lll.dle.eff.max_tx_octets) {
2604 conn->lll.dle.eff.max_tx_octets = eff_tx_octets;
2605 dle_changed = 1U;
2606 }
2607
2608 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2609 if (dle_changed) {
2610 conn->lll.evt_len_upd = 1U;
2611 }
2612 conn->lll.evt_len_upd |= conn->lll.evt_len_upd_delayed;
2613 conn->lll.evt_len_upd_delayed = 0;
2614 #endif
2615
2616 return dle_changed;
2617 }
2618
ull_len_data_length_trim(uint16_t * tx_octets,uint16_t * tx_time)2619 static void ull_len_data_length_trim(uint16_t *tx_octets, uint16_t *tx_time)
2620 {
2621 #if defined(CONFIG_BT_CTLR_PHY_CODED)
2622 uint16_t tx_time_max =
2623 PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_CODED);
2624 #else /* !CONFIG_BT_CTLR_PHY_CODED */
2625 uint16_t tx_time_max =
2626 PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_1M);
2627 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
2628
2629 /* trim to supported values */
2630 if (*tx_octets > LL_LENGTH_OCTETS_TX_MAX) {
2631 *tx_octets = LL_LENGTH_OCTETS_TX_MAX;
2632 }
2633
2634 if (*tx_time > tx_time_max) {
2635 *tx_time = tx_time_max;
2636 }
2637 }
2638
ull_dle_local_tx_update(struct ll_conn * conn,uint16_t tx_octets,uint16_t tx_time)2639 void ull_dle_local_tx_update(struct ll_conn *conn, uint16_t tx_octets, uint16_t tx_time)
2640 {
2641 /* Trim to supported values */
2642 ull_len_data_length_trim(&tx_octets, &tx_time);
2643
2644 conn->lll.dle.default_tx_octets = tx_octets;
2645
2646 #if defined(CONFIG_BT_CTLR_PHY)
2647 conn->lll.dle.default_tx_time = tx_time;
2648 #endif /* CONFIG_BT_CTLR_PHY */
2649
2650 dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time, &conn->lll.dle.local.max_tx_time);
2651 conn->lll.dle.local.max_tx_octets = conn->lll.dle.default_tx_octets;
2652 }
2653
ull_dle_init(struct ll_conn * conn,uint8_t phy)2654 void ull_dle_init(struct ll_conn *conn, uint8_t phy)
2655 {
2656 #if defined(CONFIG_BT_CTLR_PHY)
2657 const uint16_t max_time_min = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy);
2658 const uint16_t max_time_max = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy);
2659 #endif /* CONFIG_BT_CTLR_PHY */
2660
2661 /* Clear DLE data set */
2662 memset(&conn->lll.dle, 0, sizeof(conn->lll.dle));
2663 /* See BT. 5.2 Spec - Vol 6, Part B, Sect 4.5.10
2664 * Default to locally max supported rx/tx length/time
2665 */
2666 ull_dle_local_tx_update(conn, default_tx_octets, default_tx_time);
2667
2668 conn->lll.dle.local.max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
2669 #if defined(CONFIG_BT_CTLR_PHY)
2670 conn->lll.dle.local.max_rx_time = max_time_max;
2671 #endif /* CONFIG_BT_CTLR_PHY */
2672
2673 /* Default to minimum rx/tx data length/time */
2674 conn->lll.dle.remote.max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2675 conn->lll.dle.remote.max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2676
2677 #if defined(CONFIG_BT_CTLR_PHY)
2678 conn->lll.dle.remote.max_tx_time = max_time_min;
2679 conn->lll.dle.remote.max_rx_time = max_time_min;
2680 #endif /* CONFIG_BT_CTLR_PHY */
2681
2682 /*
2683 * ref. Bluetooth Core Specification version 5.3, Vol. 6,
2684 * Part B, section 4.5.10 we can call ull_dle_update_eff
2685 * for initialisation
2686 */
2687 (void)ull_dle_update_eff(conn);
2688
2689 /* Check whether the controller should perform a data length update after
2690 * connection is established
2691 */
2692 #if defined(CONFIG_BT_CTLR_PHY)
2693 if ((conn->lll.dle.local.max_rx_time != max_time_min ||
2694 conn->lll.dle.local.max_tx_time != max_time_min)) {
2695 conn->lll.dle.update = 1;
2696 } else
2697 #endif
2698 {
2699 if (conn->lll.dle.local.max_tx_octets != PDU_DC_PAYLOAD_SIZE_MIN ||
2700 conn->lll.dle.local.max_rx_octets != PDU_DC_PAYLOAD_SIZE_MIN) {
2701 conn->lll.dle.update = 1;
2702 }
2703 }
2704 }
2705
ull_conn_default_tx_octets_set(uint16_t tx_octets)2706 void ull_conn_default_tx_octets_set(uint16_t tx_octets)
2707 {
2708 default_tx_octets = tx_octets;
2709 }
2710
ull_conn_default_tx_time_set(uint16_t tx_time)2711 void ull_conn_default_tx_time_set(uint16_t tx_time)
2712 {
2713 default_tx_time = tx_time;
2714 }
2715 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2716
2717 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
ticker_op_id_match_func(uint8_t ticker_id,uint32_t ticks_slot,uint32_t ticks_to_expire,void * op_context)2718 static bool ticker_op_id_match_func(uint8_t ticker_id, uint32_t ticks_slot,
2719 uint32_t ticks_to_expire, void *op_context)
2720 {
2721 ARG_UNUSED(ticks_slot);
2722 ARG_UNUSED(ticks_to_expire);
2723
2724 uint8_t match_id = *(uint8_t *)op_context;
2725
2726 return ticker_id == match_id;
2727 }
2728
ticker_get_offset_op_cb(uint32_t status,void * param)2729 static void ticker_get_offset_op_cb(uint32_t status, void *param)
2730 {
2731 *((uint32_t volatile *)param) = status;
2732 }
2733
get_ticker_offset(uint8_t ticker_id,uint16_t * lazy)2734 static uint32_t get_ticker_offset(uint8_t ticker_id, uint16_t *lazy)
2735 {
2736 uint32_t volatile ret_cb;
2737 uint32_t ticks_to_expire;
2738 uint32_t ticks_current;
2739 uint32_t sync_remainder_us;
2740 uint32_t remainder;
2741 uint32_t start_us;
2742 uint32_t ret;
2743 uint8_t id;
2744
2745 id = TICKER_NULL;
2746 ticks_to_expire = 0U;
2747 ticks_current = 0U;
2748
2749 ret_cb = TICKER_STATUS_BUSY;
2750
2751 ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_LOW,
2752 &id, &ticks_current, &ticks_to_expire, &remainder,
2753 lazy, ticker_op_id_match_func, &ticker_id,
2754 ticker_get_offset_op_cb, (void *)&ret_cb);
2755
2756 if (ret == TICKER_STATUS_BUSY) {
2757 while (ret_cb == TICKER_STATUS_BUSY) {
2758 ticker_job_sched(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_LOW);
2759 }
2760 }
2761
2762 LL_ASSERT(ret_cb == TICKER_STATUS_SUCCESS);
2763
2764 /* Reduced a tick for negative remainder and return positive remainder
2765 * value.
2766 */
2767 hal_ticker_remove_jitter(&ticks_to_expire, &remainder);
2768 sync_remainder_us = remainder;
2769
2770 /* Add a tick for negative remainder and return positive remainder
2771 * value.
2772 */
2773 hal_ticker_add_jitter(&ticks_to_expire, &remainder);
2774 start_us = remainder;
2775
2776 return ull_get_wrapped_time_us(HAL_TICKER_TICKS_TO_US(ticks_to_expire),
2777 (sync_remainder_us - start_us));
2778 }
2779
mfy_past_sender_offset_get(void * param)2780 static void mfy_past_sender_offset_get(void *param)
2781 {
2782 uint16_t last_pa_event_counter;
2783 uint32_t ticker_offset_us;
2784 uint16_t pa_event_counter;
2785 uint8_t adv_sync_handle;
2786 uint16_t sync_handle;
2787 struct ll_conn *conn;
2788 uint16_t lazy;
2789
2790 conn = param;
2791
2792 /* Get handle to look for */
2793 ull_lp_past_offset_get_calc_params(conn, &adv_sync_handle, &sync_handle);
2794
2795 if (adv_sync_handle == BT_HCI_ADV_HANDLE_INVALID &&
2796 sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
2797 /* Procedure must have been aborted, do nothing */
2798 return;
2799 }
2800
2801 if (adv_sync_handle != BT_HCI_ADV_HANDLE_INVALID) {
2802 const struct ll_adv_sync_set *adv_sync = ull_adv_sync_get(adv_sync_handle);
2803
2804 LL_ASSERT(adv_sync);
2805
2806 ticker_offset_us = get_ticker_offset(TICKER_ID_ADV_SYNC_BASE + adv_sync_handle,
2807 &lazy);
2808
2809 pa_event_counter = adv_sync->lll.event_counter;
2810 last_pa_event_counter = pa_event_counter - 1;
2811 } else {
2812 const struct ll_sync_set *sync = ull_sync_is_enabled_get(sync_handle);
2813 uint32_t interval_us = sync->interval * PERIODIC_INT_UNIT_US;
2814 uint32_t window_widening_event_us;
2815
2816 LL_ASSERT(sync);
2817
2818 ticker_offset_us = get_ticker_offset(TICKER_ID_SCAN_SYNC_BASE + sync_handle,
2819 &lazy);
2820
2821 if (lazy && ticker_offset_us > interval_us) {
2822
2823 /* Figure out how many events we have actually skipped */
2824 lazy = lazy - (ticker_offset_us / interval_us);
2825
2826 /* Correct offset to point to next event */
2827 ticker_offset_us = ticker_offset_us % interval_us;
2828 }
2829
2830 /* Calculate window widening for next event */
2831 window_widening_event_us = sync->lll.window_widening_event_us +
2832 sync->lll.window_widening_periodic_us * (lazy + 1U);
2833
2834 /* Correct for window widening */
2835 ticker_offset_us += window_widening_event_us;
2836
2837 pa_event_counter = sync->lll.event_counter + lazy;
2838
2839 last_pa_event_counter = pa_event_counter - 1 - lazy;
2840
2841 /* Handle unsuccessful events */
2842 if (sync->timeout_expire) {
2843 last_pa_event_counter -= sync->timeout_reload - sync->timeout_expire;
2844 }
2845 }
2846
2847 ull_lp_past_offset_calc_reply(conn, ticker_offset_us, pa_event_counter,
2848 last_pa_event_counter);
2849 }
2850
ull_conn_past_sender_offset_request(struct ll_conn * conn)2851 void ull_conn_past_sender_offset_request(struct ll_conn *conn)
2852 {
2853 static memq_link_t link;
2854 static struct mayfly mfy = {0, 0, &link, NULL, mfy_past_sender_offset_get};
2855 uint32_t ret;
2856
2857 mfy.param = conn;
2858 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
2859 &mfy);
2860 LL_ASSERT(!ret);
2861 }
2862 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
2863
ull_conn_lll_phy_active(struct ll_conn * conn,uint8_t phys)2864 uint8_t ull_conn_lll_phy_active(struct ll_conn *conn, uint8_t phys)
2865 {
2866 #if defined(CONFIG_BT_CTLR_PHY)
2867 if (!(phys & (conn->lll.phy_tx | conn->lll.phy_rx))) {
2868 #else /* !CONFIG_BT_CTLR_PHY */
2869 if (!(phys & 0x01)) {
2870 #endif /* !CONFIG_BT_CTLR_PHY */
2871 return 0;
2872 }
2873 return 1;
2874 }
2875
2876 uint8_t ull_is_lll_tx_queue_empty(struct ll_conn *conn)
2877 {
2878 return (memq_peek(conn->lll.memq_tx.head, conn->lll.memq_tx.tail, NULL) == NULL);
2879 }
2880