1 /*
2 * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stddef.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 #include <zephyr/sys/byteorder.h>
12
13 #include "hal/cpu.h"
14 #include "hal/ecb.h"
15 #include "hal/ccm.h"
16 #include "hal/ticker.h"
17
18 #include "util/util.h"
19 #include "util/mem.h"
20 #include "util/memq.h"
21 #include "util/mfifo.h"
22 #include "util/mayfly.h"
23 #include "util/dbuf.h"
24
25 #include "ticker/ticker.h"
26
27 #include "pdu_df.h"
28 #include "lll/pdu_vendor.h"
29 #include "pdu.h"
30
31 #include "lll.h"
32 #include "lll_clock.h"
33 #include "lll/lll_df_types.h"
34 #include "lll_conn.h"
35 #include "lll_conn_iso.h"
36 #include "lll/lll_vendor.h"
37
38 #include "ll_sw/ull_tx_queue.h"
39
40 #include "isoal.h"
41 #include "ull_iso_types.h"
42 #include "ull_conn_types.h"
43 #include "ull_conn_iso_types.h"
44
45 #if defined(CONFIG_BT_CTLR_USER_EXT)
46 #include "ull_vendor.h"
47 #endif /* CONFIG_BT_CTLR_USER_EXT */
48
49 #include "ull_internal.h"
50 #include "ull_llcp_internal.h"
51 #include "ull_sched_internal.h"
52 #include "ull_chan_internal.h"
53 #include "ull_conn_internal.h"
54 #include "ull_peripheral_internal.h"
55 #include "ull_central_internal.h"
56
57 #include "ull_iso_internal.h"
58 #include "ull_conn_iso_internal.h"
59 #include "ull_peripheral_iso_internal.h"
60
61
62 #include "ll.h"
63 #include "ll_feat.h"
64 #include "ll_settings.h"
65
66 #include "ll_sw/ull_llcp.h"
67 #include "ll_sw/ull_llcp_features.h"
68
69 #include "hal/debug.h"
70
71 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
72 #include <zephyr/logging/log.h>
73 LOG_MODULE_REGISTER(bt_ctlr_ull_conn);
74
75 static int init_reset(void);
76 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
77 static void tx_demux_sched(struct ll_conn *conn);
78 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
79 static void tx_demux(void *param);
80 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx);
81
82 static void ticker_update_conn_op_cb(uint32_t status, void *param);
83 static void ticker_stop_conn_op_cb(uint32_t status, void *param);
84 static void ticker_start_conn_op_cb(uint32_t status, void *param);
85
86 static void conn_setup_adv_scan_disabled_cb(void *param);
87 static inline void disable(uint16_t handle);
88 static void conn_cleanup(struct ll_conn *conn, uint8_t reason);
89 static void conn_cleanup_finalize(struct ll_conn *conn);
90 static void tx_ull_flush(struct ll_conn *conn);
91 static void ticker_stop_op_cb(uint32_t status, void *param);
92 static void conn_disable(void *param);
93 static void disabled_cb(void *param);
94 static void tx_lll_flush(void *param);
95
96 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
97 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx);
98 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
99
100 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
101 /* Connection context pointer used as CPR mutex to serialize connection
102 * parameter requests procedures across simulataneous connections so that
103 * offsets exchanged to the peer do not get changed.
104 */
105 struct ll_conn *conn_upd_curr;
106 #endif /* defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) */
107
108 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
109 static uint8_t force_md_cnt_calc(struct lll_conn *lll_conn, uint32_t tx_rate);
110 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
111
112 #if !defined(BT_CTLR_USER_TX_BUFFER_OVERHEAD)
113 #define BT_CTLR_USER_TX_BUFFER_OVERHEAD 0
114 #endif /* BT_CTLR_USER_TX_BUFFER_OVERHEAD */
115
116 #define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
117 offsetof(struct pdu_data, lldata) + \
118 (LL_LENGTH_OCTETS_TX_MAX + \
119 BT_CTLR_USER_TX_BUFFER_OVERHEAD))
120
121 #define CONN_DATA_BUFFERS CONFIG_BT_BUF_ACL_TX_COUNT
122
123 static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx), CONN_DATA_BUFFERS);
124 static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx),
125 (CONN_DATA_BUFFERS +
126 LLCP_TX_CTRL_BUF_COUNT));
127
128 static struct {
129 void *free;
130 uint8_t pool[CONN_TX_BUF_SIZE * CONN_DATA_BUFFERS];
131 } mem_conn_tx;
132
133 static struct {
134 void *free;
135 uint8_t pool[sizeof(memq_link_t) *
136 (CONN_DATA_BUFFERS +
137 LLCP_TX_CTRL_BUF_COUNT)];
138 } mem_link_tx;
139
140 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
141 static uint16_t default_tx_octets;
142 static uint16_t default_tx_time;
143 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
144
145 #if defined(CONFIG_BT_CTLR_PHY)
146 static uint8_t default_phy_tx;
147 static uint8_t default_phy_rx;
148 #endif /* CONFIG_BT_CTLR_PHY */
149
150 static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN];
151 static void *conn_free;
152
ll_conn_acquire(void)153 struct ll_conn *ll_conn_acquire(void)
154 {
155 return mem_acquire(&conn_free);
156 }
157
ll_conn_release(struct ll_conn * conn)158 void ll_conn_release(struct ll_conn *conn)
159 {
160 mem_release(conn, &conn_free);
161 }
162
ll_conn_handle_get(struct ll_conn * conn)163 uint16_t ll_conn_handle_get(struct ll_conn *conn)
164 {
165 return mem_index_get(conn, conn_pool, sizeof(struct ll_conn));
166 }
167
ll_conn_get(uint16_t handle)168 struct ll_conn *ll_conn_get(uint16_t handle)
169 {
170 return mem_get(conn_pool, sizeof(struct ll_conn), handle);
171 }
172
ll_connected_get(uint16_t handle)173 struct ll_conn *ll_connected_get(uint16_t handle)
174 {
175 struct ll_conn *conn;
176
177 if (handle >= CONFIG_BT_MAX_CONN) {
178 return NULL;
179 }
180
181 conn = ll_conn_get(handle);
182 if (conn->lll.handle != handle) {
183 return NULL;
184 }
185
186 return conn;
187 }
188
ll_conn_free_count_get(void)189 uint16_t ll_conn_free_count_get(void)
190 {
191 return mem_free_count_get(conn_free);
192 }
193
ll_tx_mem_acquire(void)194 void *ll_tx_mem_acquire(void)
195 {
196 return mem_acquire(&mem_conn_tx.free);
197 }
198
ll_tx_mem_release(void * tx)199 void ll_tx_mem_release(void *tx)
200 {
201 mem_release(tx, &mem_conn_tx.free);
202 }
203
ll_tx_mem_enqueue(uint16_t handle,void * tx)204 int ll_tx_mem_enqueue(uint16_t handle, void *tx)
205 {
206 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
207 #define BT_CTLR_THROUGHPUT_PERIOD 1000000000UL
208 static uint32_t tx_rate;
209 static uint32_t tx_cnt;
210 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
211 struct lll_tx *lll_tx;
212 struct ll_conn *conn;
213 uint8_t idx;
214
215 conn = ll_connected_get(handle);
216 if (!conn) {
217 return -EINVAL;
218 }
219
220 idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx);
221 if (!lll_tx) {
222 return -ENOBUFS;
223 }
224
225 lll_tx->handle = handle;
226 lll_tx->node = tx;
227
228 MFIFO_ENQUEUE(conn_tx, idx);
229
230 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
231 if (ull_ref_get(&conn->ull)) {
232 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
233 if (tx_cnt >= CONFIG_BT_BUF_ACL_TX_COUNT) {
234 uint8_t previous, force_md_cnt;
235
236 force_md_cnt = force_md_cnt_calc(&conn->lll, tx_rate);
237 previous = lll_conn_force_md_cnt_set(force_md_cnt);
238 if (previous != force_md_cnt) {
239 LOG_INF("force_md_cnt: old= %u, new= %u.", previous, force_md_cnt);
240 }
241 }
242 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
243
244 tx_demux_sched(conn);
245
246 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
247 } else {
248 lll_conn_force_md_cnt_set(0U);
249 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
250 }
251 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
252
253 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
254 ull_periph_latency_cancel(conn, handle);
255 }
256
257 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
258 static uint32_t last_cycle_stamp;
259 static uint32_t tx_len;
260 struct pdu_data *pdu;
261 uint32_t cycle_stamp;
262 uint64_t delta;
263
264 cycle_stamp = k_cycle_get_32();
265 delta = k_cyc_to_ns_floor64(cycle_stamp - last_cycle_stamp);
266 if (delta > BT_CTLR_THROUGHPUT_PERIOD) {
267 LOG_INF("incoming Tx: count= %u, len= %u, rate= %u bps.", tx_cnt, tx_len, tx_rate);
268
269 last_cycle_stamp = cycle_stamp;
270 tx_cnt = 0U;
271 tx_len = 0U;
272 }
273
274 pdu = (void *)((struct node_tx *)tx)->pdu;
275 tx_len += pdu->len;
276 tx_rate = ((uint64_t)tx_len << 3) * BT_CTLR_THROUGHPUT_PERIOD / delta;
277 tx_cnt++;
278 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
279
280 return 0;
281 }
282
ll_conn_update(uint16_t handle,uint8_t cmd,uint8_t status,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout,uint16_t * offset)283 uint8_t ll_conn_update(uint16_t handle, uint8_t cmd, uint8_t status, uint16_t interval_min,
284 uint16_t interval_max, uint16_t latency, uint16_t timeout, uint16_t *offset)
285 {
286 struct ll_conn *conn;
287
288 conn = ll_connected_get(handle);
289 if (!conn) {
290 return BT_HCI_ERR_UNKNOWN_CONN_ID;
291 }
292
293 if (cmd == 0U) {
294 uint8_t err;
295
296 err = ull_cp_conn_update(conn, interval_min, interval_max, latency, timeout,
297 offset);
298 if (err) {
299 return err;
300 }
301
302 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
303 conn->lll.role) {
304 ull_periph_latency_cancel(conn, handle);
305 }
306 } else if (cmd == 2U) {
307 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
308 if (status == 0U) {
309 ull_cp_conn_param_req_reply(conn);
310 } else {
311 ull_cp_conn_param_req_neg_reply(conn, status);
312 }
313 return BT_HCI_ERR_SUCCESS;
314 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
315 /* CPR feature not supported */
316 return BT_HCI_ERR_CMD_DISALLOWED;
317 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
318 } else {
319 return BT_HCI_ERR_UNKNOWN_CMD;
320 }
321
322 return 0;
323 }
324
ll_chm_get(uint16_t handle,uint8_t * chm)325 uint8_t ll_chm_get(uint16_t handle, uint8_t *chm)
326 {
327 struct ll_conn *conn;
328
329 conn = ll_connected_get(handle);
330 if (!conn) {
331 return BT_HCI_ERR_UNKNOWN_CONN_ID;
332 }
333
334 /*
335 * Core Spec 5.2 Vol4: 7.8.20:
336 * The HCI_LE_Read_Channel_Map command returns the current Channel_Map
337 * for the specified Connection_Handle. The returned value indicates the state of
338 * the Channel_Map specified by the last transmitted or received Channel_Map
339 * (in a CONNECT_IND or LL_CHANNEL_MAP_IND message) for the specified
340 * Connection_Handle, regardless of whether the Central has received an
341 * acknowledgment
342 */
343 const uint8_t *pending_chm;
344
345 pending_chm = ull_cp_chan_map_update_pending(conn);
346 if (pending_chm) {
347 memcpy(chm, pending_chm, sizeof(conn->lll.data_chan_map));
348 } else {
349 memcpy(chm, conn->lll.data_chan_map, sizeof(conn->lll.data_chan_map));
350 }
351
352 return 0;
353 }
354
355 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
ll_req_peer_sca(uint16_t handle)356 uint8_t ll_req_peer_sca(uint16_t handle)
357 {
358 struct ll_conn *conn;
359
360 conn = ll_connected_get(handle);
361 if (!conn) {
362 return BT_HCI_ERR_UNKNOWN_CONN_ID;
363 }
364
365 return ull_cp_req_peer_sca(conn);
366 }
367 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
368
is_valid_disconnect_reason(uint8_t reason)369 static bool is_valid_disconnect_reason(uint8_t reason)
370 {
371 switch (reason) {
372 case BT_HCI_ERR_AUTH_FAIL:
373 case BT_HCI_ERR_REMOTE_USER_TERM_CONN:
374 case BT_HCI_ERR_REMOTE_LOW_RESOURCES:
375 case BT_HCI_ERR_REMOTE_POWER_OFF:
376 case BT_HCI_ERR_UNSUPP_REMOTE_FEATURE:
377 case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
378 case BT_HCI_ERR_UNACCEPT_CONN_PARAM:
379 return true;
380 default:
381 return false;
382 }
383 }
384
ll_terminate_ind_send(uint16_t handle,uint8_t reason)385 uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
386 {
387 struct ll_conn *conn;
388 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
389 struct ll_conn_iso_stream *cis;
390 #endif
391
392 if (IS_ACL_HANDLE(handle)) {
393 conn = ll_connected_get(handle);
394
395 /* Is conn still connected? */
396 if (!conn) {
397 return BT_HCI_ERR_CMD_DISALLOWED;
398 }
399
400 if (!is_valid_disconnect_reason(reason)) {
401 return BT_HCI_ERR_INVALID_PARAM;
402 }
403
404 uint8_t err;
405
406 err = ull_cp_terminate(conn, reason);
407 if (err) {
408 return err;
409 }
410
411 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
412 ull_periph_latency_cancel(conn, handle);
413 }
414 return 0;
415 }
416 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
417 if (IS_CIS_HANDLE(handle)) {
418 cis = ll_iso_stream_connected_get(handle);
419 /* Disallow if CIS is not connected */
420 if (!cis) {
421 return BT_HCI_ERR_CMD_DISALLOWED;
422 }
423
424 conn = ll_connected_get(cis->lll.acl_handle);
425 /* Disallow if ACL has disconnected */
426 if (!conn) {
427 return BT_HCI_ERR_CMD_DISALLOWED;
428 }
429
430 return ull_cp_cis_terminate(conn, cis, reason);
431 }
432 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
433
434 return BT_HCI_ERR_UNKNOWN_CONN_ID;
435 }
436
437 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
ll_feature_req_send(uint16_t handle)438 uint8_t ll_feature_req_send(uint16_t handle)
439 {
440 struct ll_conn *conn;
441
442 conn = ll_connected_get(handle);
443 if (!conn) {
444 return BT_HCI_ERR_UNKNOWN_CONN_ID;
445 }
446
447 uint8_t err;
448
449 err = ull_cp_feature_exchange(conn, 1U);
450 if (err) {
451 return err;
452 }
453
454 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
455 IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) &&
456 conn->lll.role) {
457 ull_periph_latency_cancel(conn, handle);
458 }
459
460 return 0;
461 }
462 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
463
ll_version_ind_send(uint16_t handle)464 uint8_t ll_version_ind_send(uint16_t handle)
465 {
466 struct ll_conn *conn;
467
468 conn = ll_connected_get(handle);
469 if (!conn) {
470 return BT_HCI_ERR_UNKNOWN_CONN_ID;
471 }
472
473 uint8_t err;
474
475 err = ull_cp_version_exchange(conn);
476 if (err) {
477 return err;
478 }
479
480 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
481 ull_periph_latency_cancel(conn, handle);
482 }
483
484 return 0;
485 }
486
487 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ll_len_validate(uint16_t tx_octets,uint16_t tx_time)488 static bool ll_len_validate(uint16_t tx_octets, uint16_t tx_time)
489 {
490 /* validate if within HCI allowed range */
491 if (!IN_RANGE(tx_octets, PDU_DC_PAYLOAD_SIZE_MIN,
492 PDU_DC_PAYLOAD_SIZE_MAX)) {
493 return false;
494 }
495
496 /* validate if within HCI allowed range */
497 if (!IN_RANGE(tx_time, PDU_DC_PAYLOAD_TIME_MIN,
498 PDU_DC_PAYLOAD_TIME_MAX_CODED)) {
499 return false;
500 }
501
502 return true;
503 }
504
ll_length_req_send(uint16_t handle,uint16_t tx_octets,uint16_t tx_time)505 uint32_t ll_length_req_send(uint16_t handle, uint16_t tx_octets,
506 uint16_t tx_time)
507 {
508 struct ll_conn *conn;
509
510 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
511 !ll_len_validate(tx_octets, tx_time)) {
512 return BT_HCI_ERR_INVALID_PARAM;
513 }
514
515 conn = ll_connected_get(handle);
516 if (!conn) {
517 return BT_HCI_ERR_UNKNOWN_CONN_ID;
518 }
519
520 if (!feature_dle(conn)) {
521 return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
522 }
523
524 uint8_t err;
525
526 err = ull_cp_data_length_update(conn, tx_octets, tx_time);
527 if (err) {
528 return err;
529 }
530
531 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
532 ull_periph_latency_cancel(conn, handle);
533 }
534
535 return 0;
536 }
537
ll_length_default_get(uint16_t * max_tx_octets,uint16_t * max_tx_time)538 void ll_length_default_get(uint16_t *max_tx_octets, uint16_t *max_tx_time)
539 {
540 *max_tx_octets = default_tx_octets;
541 *max_tx_time = default_tx_time;
542 }
543
ll_length_default_set(uint16_t max_tx_octets,uint16_t max_tx_time)544 uint32_t ll_length_default_set(uint16_t max_tx_octets, uint16_t max_tx_time)
545 {
546 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
547 !ll_len_validate(max_tx_octets, max_tx_time)) {
548 return BT_HCI_ERR_INVALID_PARAM;
549 }
550
551 default_tx_octets = max_tx_octets;
552 default_tx_time = max_tx_time;
553
554 return 0;
555 }
556
ll_length_max_get(uint16_t * max_tx_octets,uint16_t * max_tx_time,uint16_t * max_rx_octets,uint16_t * max_rx_time)557 void ll_length_max_get(uint16_t *max_tx_octets, uint16_t *max_tx_time,
558 uint16_t *max_rx_octets, uint16_t *max_rx_time)
559 {
560 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_PHY_CODED)
561 #define PHY (PHY_CODED)
562 #else /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
563 #define PHY (PHY_1M)
564 #endif /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
565 *max_tx_octets = LL_LENGTH_OCTETS_RX_MAX;
566 *max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
567 *max_tx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
568 *max_rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
569 #undef PHY
570 }
571 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
572
573 #if defined(CONFIG_BT_CTLR_PHY)
ll_phy_get(uint16_t handle,uint8_t * tx,uint8_t * rx)574 uint8_t ll_phy_get(uint16_t handle, uint8_t *tx, uint8_t *rx)
575 {
576 struct ll_conn *conn;
577
578 conn = ll_connected_get(handle);
579 if (!conn) {
580 return BT_HCI_ERR_UNKNOWN_CONN_ID;
581 }
582
583 /* TODO: context safe read */
584 *tx = conn->lll.phy_tx;
585 *rx = conn->lll.phy_rx;
586
587 return 0;
588 }
589
ll_phy_default_set(uint8_t tx,uint8_t rx)590 uint8_t ll_phy_default_set(uint8_t tx, uint8_t rx)
591 {
592 /* TODO: validate against supported phy */
593
594 default_phy_tx = tx;
595 default_phy_rx = rx;
596
597 return 0;
598 }
599
ll_phy_req_send(uint16_t handle,uint8_t tx,uint8_t flags,uint8_t rx)600 uint8_t ll_phy_req_send(uint16_t handle, uint8_t tx, uint8_t flags, uint8_t rx)
601 {
602 struct ll_conn *conn;
603
604 conn = ll_connected_get(handle);
605 if (!conn) {
606 return BT_HCI_ERR_UNKNOWN_CONN_ID;
607 }
608
609 if (!feature_phy_2m(conn) && !feature_phy_coded(conn)) {
610 return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
611 }
612
613 uint8_t err;
614
615 err = ull_cp_phy_update(conn, tx, flags, rx, 1U);
616 if (err) {
617 return err;
618 }
619
620 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
621 ull_periph_latency_cancel(conn, handle);
622 }
623
624 return 0;
625 }
626 #endif /* CONFIG_BT_CTLR_PHY */
627
628 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
ll_rssi_get(uint16_t handle,uint8_t * rssi)629 uint8_t ll_rssi_get(uint16_t handle, uint8_t *rssi)
630 {
631 struct ll_conn *conn;
632
633 conn = ll_connected_get(handle);
634 if (!conn) {
635 return BT_HCI_ERR_UNKNOWN_CONN_ID;
636 }
637
638 *rssi = conn->lll.rssi_latest;
639
640 return 0;
641 }
642 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
643
644 #if defined(CONFIG_BT_CTLR_LE_PING)
ll_apto_get(uint16_t handle,uint16_t * apto)645 uint8_t ll_apto_get(uint16_t handle, uint16_t *apto)
646 {
647 struct ll_conn *conn;
648
649 conn = ll_connected_get(handle);
650 if (!conn) {
651 return BT_HCI_ERR_UNKNOWN_CONN_ID;
652 }
653
654 *apto = conn->apto_reload * conn->lll.interval * 125U / 1000;
655
656 return 0;
657 }
658
ll_apto_set(uint16_t handle,uint16_t apto)659 uint8_t ll_apto_set(uint16_t handle, uint16_t apto)
660 {
661 struct ll_conn *conn;
662
663 conn = ll_connected_get(handle);
664 if (!conn) {
665 return BT_HCI_ERR_UNKNOWN_CONN_ID;
666 }
667
668 conn->apto_reload = RADIO_CONN_EVENTS(apto * 10U * 1000U,
669 conn->lll.interval *
670 CONN_INT_UNIT_US);
671
672 return 0;
673 }
674 #endif /* CONFIG_BT_CTLR_LE_PING */
675
ull_conn_init(void)676 int ull_conn_init(void)
677 {
678 int err;
679
680 err = init_reset();
681 if (err) {
682 return err;
683 }
684
685 return 0;
686 }
687
ull_conn_reset(void)688 int ull_conn_reset(void)
689 {
690 uint16_t handle;
691 int err;
692
693 #if defined(CONFIG_BT_CENTRAL)
694 /* Reset initiator */
695 (void)ull_central_reset();
696 #endif /* CONFIG_BT_CENTRAL */
697
698 for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
699 disable(handle);
700 }
701
702 /* Re-initialize the Tx mfifo */
703 MFIFO_INIT(conn_tx);
704
705 /* Re-initialize the Tx Ack mfifo */
706 MFIFO_INIT(conn_ack);
707
708 err = init_reset();
709 if (err) {
710 return err;
711 }
712
713 return 0;
714 }
715
ull_conn_lll_get(uint16_t handle)716 struct lll_conn *ull_conn_lll_get(uint16_t handle)
717 {
718 struct ll_conn *conn;
719
720 conn = ll_conn_get(handle);
721
722 return &conn->lll;
723 }
724
725 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_conn_default_tx_octets_get(void)726 uint16_t ull_conn_default_tx_octets_get(void)
727 {
728 return default_tx_octets;
729 }
730
731 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_tx_time_get(void)732 uint16_t ull_conn_default_tx_time_get(void)
733 {
734 return default_tx_time;
735 }
736 #endif /* CONFIG_BT_CTLR_PHY */
737 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
738
739 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_phy_tx_get(void)740 uint8_t ull_conn_default_phy_tx_get(void)
741 {
742 return default_phy_tx;
743 }
744
ull_conn_default_phy_rx_get(void)745 uint8_t ull_conn_default_phy_rx_get(void)
746 {
747 return default_phy_rx;
748 }
749 #endif /* CONFIG_BT_CTLR_PHY */
750
751 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
ull_conn_peer_connected(uint8_t const own_id_addr_type,uint8_t const * const own_id_addr,uint8_t const peer_id_addr_type,uint8_t const * const peer_id_addr)752 bool ull_conn_peer_connected(uint8_t const own_id_addr_type,
753 uint8_t const *const own_id_addr,
754 uint8_t const peer_id_addr_type,
755 uint8_t const *const peer_id_addr)
756 {
757 uint16_t handle;
758
759 for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
760 struct ll_conn *conn = ll_connected_get(handle);
761
762 if (conn &&
763 conn->peer_id_addr_type == peer_id_addr_type &&
764 !memcmp(conn->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
765 conn->own_id_addr_type == own_id_addr_type &&
766 !memcmp(conn->own_id_addr, own_id_addr, BDADDR_SIZE)) {
767 return true;
768 }
769 }
770
771 return false;
772 }
773 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
774
ull_conn_setup(memq_link_t * rx_link,struct node_rx_hdr * rx)775 void ull_conn_setup(memq_link_t *rx_link, struct node_rx_hdr *rx)
776 {
777 struct node_rx_ftr *ftr;
778 struct ull_hdr *hdr;
779
780 /* Store the link in the node rx so that when done event is
781 * processed it can be used to enqueue node rx towards LL context
782 */
783 rx->link = rx_link;
784
785 /* NOTE: LLL conn context SHALL be after lll_hdr in
786 * struct lll_adv and struct lll_scan.
787 */
788 ftr = &(rx->rx_ftr);
789
790 /* Check for reference count and decide to setup connection
791 * here or when done event arrives.
792 */
793 hdr = HDR_LLL2ULL(ftr->param);
794 if (ull_ref_get(hdr)) {
795 /* Setup connection in ULL disabled callback,
796 * pass the node rx as disabled callback parameter.
797 */
798 LL_ASSERT(!hdr->disabled_cb);
799 hdr->disabled_param = rx;
800 hdr->disabled_cb = conn_setup_adv_scan_disabled_cb;
801 } else {
802 conn_setup_adv_scan_disabled_cb(rx);
803 }
804 }
805
ull_conn_rx(memq_link_t * link,struct node_rx_pdu ** rx)806 int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
807 {
808 struct pdu_data *pdu_rx;
809 struct ll_conn *conn;
810
811 conn = ll_connected_get((*rx)->hdr.handle);
812 if (!conn) {
813 /* Mark for buffer for release */
814 (*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
815
816 return 0;
817 }
818
819 ull_cp_tx_ntf(conn);
820
821 pdu_rx = (void *)(*rx)->pdu;
822
823 switch (pdu_rx->ll_id) {
824 case PDU_DATA_LLID_CTRL:
825 {
826 /* Mark buffer for release */
827 (*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
828
829 ull_cp_rx(conn, link, *rx);
830
831 return 0;
832 }
833
834 case PDU_DATA_LLID_DATA_CONTINUE:
835 case PDU_DATA_LLID_DATA_START:
836 #if defined(CONFIG_BT_CTLR_LE_ENC)
837 if (conn->pause_rx_data) {
838 conn->llcp_terminate.reason_final =
839 BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
840
841 /* Mark for buffer for release */
842 (*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
843 }
844 #endif /* CONFIG_BT_CTLR_LE_ENC */
845 break;
846
847 case PDU_DATA_LLID_RESV:
848 default:
849 #if defined(CONFIG_BT_CTLR_LE_ENC)
850 if (conn->pause_rx_data) {
851 conn->llcp_terminate.reason_final =
852 BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
853 }
854 #endif /* CONFIG_BT_CTLR_LE_ENC */
855
856 /* Invalid LL id, drop it. */
857
858 /* Mark for buffer for release */
859 (*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
860
861 break;
862 }
863
864
865 return 0;
866 }
867
ull_conn_llcp(struct ll_conn * conn,uint32_t ticks_at_expire,uint32_t remainder,uint16_t lazy)868 int ull_conn_llcp(struct ll_conn *conn, uint32_t ticks_at_expire,
869 uint32_t remainder, uint16_t lazy)
870 {
871 LL_ASSERT(conn->lll.handle != LLL_HANDLE_INVALID);
872
873 conn->llcp.prep.ticks_at_expire = ticks_at_expire;
874 conn->llcp.prep.remainder = remainder;
875 conn->llcp.prep.lazy = lazy;
876
877 ull_cp_run(conn);
878
879 if (conn->cancel_prepare) {
880 /* Reset signal */
881 conn->cancel_prepare = 0U;
882
883 /* Cancel prepare */
884 return -ECANCELED;
885 }
886
887 /* Continue prepare */
888 return 0;
889 }
890
ull_conn_done(struct node_rx_event_done * done)891 void ull_conn_done(struct node_rx_event_done *done)
892 {
893 uint32_t ticks_drift_minus;
894 uint32_t ticks_drift_plus;
895 uint32_t ticks_slot_minus;
896 uint32_t ticks_slot_plus;
897 uint16_t latency_event;
898 uint16_t elapsed_event;
899 struct lll_conn *lll;
900 struct ll_conn *conn;
901 uint8_t reason_final;
902 uint16_t lazy;
903 uint8_t force;
904
905 /* Get reference to ULL context */
906 conn = CONTAINER_OF(done->param, struct ll_conn, ull);
907 lll = &conn->lll;
908
909 /* Skip if connection terminated by local host */
910 if (unlikely(lll->handle == LLL_HANDLE_INVALID)) {
911 return;
912 }
913
914 ull_cp_tx_ntf(conn);
915
916 #if defined(CONFIG_BT_CTLR_LE_ENC)
917 /* Check authenticated payload expiry or MIC failure */
918 switch (done->extra.mic_state) {
919 case LLL_CONN_MIC_NONE:
920 #if defined(CONFIG_BT_CTLR_LE_PING)
921 if (lll->enc_rx && lll->enc_tx) {
922 uint16_t appto_reload_new;
923
924 /* check for change in apto */
925 appto_reload_new = (conn->apto_reload >
926 (lll->latency + 6)) ?
927 (conn->apto_reload -
928 (lll->latency + 6)) :
929 conn->apto_reload;
930 if (conn->appto_reload != appto_reload_new) {
931 conn->appto_reload = appto_reload_new;
932 conn->apto_expire = 0U;
933 }
934
935 /* start authenticated payload (pre) timeout */
936 if (conn->apto_expire == 0U) {
937 conn->appto_expire = conn->appto_reload;
938 conn->apto_expire = conn->apto_reload;
939 }
940 }
941 #endif /* CONFIG_BT_CTLR_LE_PING */
942 break;
943
944 case LLL_CONN_MIC_PASS:
945 #if defined(CONFIG_BT_CTLR_LE_PING)
946 conn->appto_expire = conn->apto_expire = 0U;
947 #endif /* CONFIG_BT_CTLR_LE_PING */
948 break;
949
950 case LLL_CONN_MIC_FAIL:
951 conn->llcp_terminate.reason_final =
952 BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
953 break;
954 }
955 #endif /* CONFIG_BT_CTLR_LE_ENC */
956
957 reason_final = conn->llcp_terminate.reason_final;
958 if (reason_final) {
959 conn_cleanup(conn, reason_final);
960
961 return;
962 }
963
964 /* Events elapsed used in timeout checks below */
965 #if defined(CONFIG_BT_CTLR_CONN_META)
966 /* If event has shallow expiry do not add latency, but rely on
967 * accumulated lazy count.
968 */
969 latency_event = conn->common.is_must_expire ? 0 : lll->latency_event;
970 #else
971 latency_event = lll->latency_event;
972 #endif
973 if (lll->latency_prepare) {
974 elapsed_event = latency_event + lll->latency_prepare;
975 } else {
976 elapsed_event = latency_event + 1U;
977 }
978
979 /* Peripheral drift compensation calc and new latency or
980 * central terminate acked
981 */
982 ticks_drift_plus = 0U;
983 ticks_drift_minus = 0U;
984 ticks_slot_plus = 0U;
985 ticks_slot_minus = 0U;
986
987 if (done->extra.trx_cnt) {
988 if (0) {
989 #if defined(CONFIG_BT_PERIPHERAL)
990 } else if (lll->role) {
991 ull_drift_ticks_get(done, &ticks_drift_plus,
992 &ticks_drift_minus);
993
994 if (!ull_tx_q_peek(&conn->tx_q)) {
995 ull_conn_tx_demux(UINT8_MAX);
996 }
997
998 if (ull_tx_q_peek(&conn->tx_q) ||
999 memq_peek(lll->memq_tx.head,
1000 lll->memq_tx.tail, NULL)) {
1001 lll->latency_event = 0U;
1002 } else if (lll->periph.latency_enabled) {
1003 lll->latency_event = lll->latency;
1004 }
1005 #endif /* CONFIG_BT_PERIPHERAL */
1006
1007 #if defined(CONFIG_BT_CENTRAL)
1008 } else if (reason_final) {
1009 conn->central.terminate_ack = 1;
1010 #endif /* CONFIG_BT_CENTRAL */
1011
1012 }
1013
1014 /* Reset connection failed to establish countdown */
1015 conn->connect_expire = 0U;
1016 }
1017
1018 /* Reset supervision countdown */
1019 if (done->extra.crc_valid) {
1020 conn->supervision_expire = 0U;
1021 }
1022
1023 /* check connection failed to establish */
1024 else if (conn->connect_expire) {
1025 if (conn->connect_expire > elapsed_event) {
1026 conn->connect_expire -= elapsed_event;
1027 } else {
1028 conn_cleanup(conn, BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
1029
1030 return;
1031 }
1032 }
1033
1034 /* if anchor point not sync-ed, start supervision timeout, and break
1035 * latency if any.
1036 */
1037 else {
1038 /* Start supervision timeout, if not started already */
1039 if (!conn->supervision_expire) {
1040 const uint32_t conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
1041
1042 conn->supervision_expire = RADIO_CONN_EVENTS(
1043 (conn->supervision_timeout * 10U * 1000U),
1044 conn_interval_us);
1045 }
1046 }
1047
1048 /* check supervision timeout */
1049 force = 0U;
1050 if (conn->supervision_expire) {
1051 if (conn->supervision_expire > elapsed_event) {
1052 conn->supervision_expire -= elapsed_event;
1053
1054 /* break latency */
1055 lll->latency_event = 0U;
1056
1057 /* Force both central and peripheral when close to
1058 * supervision timeout.
1059 */
1060 if (conn->supervision_expire <= 6U) {
1061 force = 1U;
1062 }
1063 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
1064 /* use randomness to force peripheral role when anchor
1065 * points are being missed.
1066 */
1067 else if (lll->role) {
1068 if (latency_event) {
1069 force = 1U;
1070 } else {
1071 force = conn->periph.force & 0x01;
1072
1073 /* rotate force bits */
1074 conn->periph.force >>= 1U;
1075 if (force) {
1076 conn->periph.force |= BIT(31);
1077 }
1078 }
1079 }
1080 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
1081 } else {
1082 conn_cleanup(conn, BT_HCI_ERR_CONN_TIMEOUT);
1083
1084 return;
1085 }
1086 }
1087
1088 /* check procedure timeout */
1089 uint8_t error_code;
1090
1091 if (-ETIMEDOUT == ull_cp_prt_elapse(conn, elapsed_event, &error_code)) {
1092 conn_cleanup(conn, error_code);
1093
1094 return;
1095 }
1096
1097 #if defined(CONFIG_BT_CTLR_LE_PING)
1098 /* check apto */
1099 if (conn->apto_expire != 0U) {
1100 if (conn->apto_expire > elapsed_event) {
1101 conn->apto_expire -= elapsed_event;
1102 } else {
1103 struct node_rx_hdr *rx;
1104
1105 rx = ll_pdu_rx_alloc();
1106 if (rx) {
1107 conn->apto_expire = 0U;
1108
1109 rx->handle = lll->handle;
1110 rx->type = NODE_RX_TYPE_APTO;
1111
1112 /* enqueue apto event into rx queue */
1113 ll_rx_put_sched(rx->link, rx);
1114 } else {
1115 conn->apto_expire = 1U;
1116 }
1117 }
1118 }
1119
1120 /* check appto */
1121 if (conn->appto_expire != 0U) {
1122 if (conn->appto_expire > elapsed_event) {
1123 conn->appto_expire -= elapsed_event;
1124 } else {
1125 conn->appto_expire = 0U;
1126
1127 /* Initiate LE_PING procedure */
1128 ull_cp_le_ping(conn);
1129 }
1130 }
1131 #endif /* CONFIG_BT_CTLR_LE_PING */
1132
1133 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1134 /* Check if the CTE_REQ procedure is periodic and counter has been started.
1135 * req_expire is set when: new CTE_REQ is started, after completion of last periodic run.
1136 */
1137 if (conn->llcp.cte_req.req_interval != 0U && conn->llcp.cte_req.req_expire != 0U) {
1138 if (conn->llcp.cte_req.req_expire > elapsed_event) {
1139 conn->llcp.cte_req.req_expire -= elapsed_event;
1140 } else {
1141 uint8_t err;
1142
1143 /* Set req_expire to zero to mark that new periodic CTE_REQ was started.
1144 * The counter is re-started after completion of this run.
1145 */
1146 conn->llcp.cte_req.req_expire = 0U;
1147
1148 err = ull_cp_cte_req(conn, conn->llcp.cte_req.min_cte_len,
1149 conn->llcp.cte_req.cte_type);
1150
1151 if (err == BT_HCI_ERR_CMD_DISALLOWED) {
1152 /* Conditions has changed e.g. PHY was changed to CODED.
1153 * New CTE REQ is not possible. Disable the periodic requests.
1154 */
1155 ull_cp_cte_req_set_disable(conn);
1156 }
1157 }
1158 }
1159 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1160
1161 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1162 /* generate RSSI event */
1163 if (lll->rssi_sample_count == 0U) {
1164 struct node_rx_pdu *rx;
1165 struct pdu_data *pdu_data_rx;
1166
1167 rx = ll_pdu_rx_alloc();
1168 if (rx) {
1169 lll->rssi_reported = lll->rssi_latest;
1170 lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT;
1171
1172 /* Prepare the rx packet structure */
1173 rx->hdr.handle = lll->handle;
1174 rx->hdr.type = NODE_RX_TYPE_RSSI;
1175
1176 /* prepare connection RSSI structure */
1177 pdu_data_rx = (void *)rx->pdu;
1178 pdu_data_rx->rssi = lll->rssi_reported;
1179
1180 /* enqueue connection RSSI structure into queue */
1181 ll_rx_put_sched(rx->hdr.link, rx);
1182 }
1183 }
1184 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1185
1186 /* check if latency needs update */
1187 lazy = 0U;
1188 if ((force) || (latency_event != lll->latency_event)) {
1189 lazy = lll->latency_event + 1U;
1190 }
1191
1192 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
1193 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) || defined(CONFIG_BT_CTLR_PHY)
1194 if (lll->evt_len_upd) {
1195 uint32_t ready_delay, rx_time, tx_time, ticks_slot;
1196
1197 lll->evt_len_upd = 0;
1198 #if defined(CONFIG_BT_CTLR_PHY)
1199 ready_delay = (lll->role) ?
1200 lll_radio_rx_ready_delay_get(lll->phy_rx, PHY_FLAGS_S8) :
1201 lll_radio_tx_ready_delay_get(lll->phy_tx, lll->phy_flags);
1202 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1203 tx_time = lll->dle.eff.max_tx_time;
1204 rx_time = lll->dle.eff.max_rx_time;
1205 #else /* CONFIG_BT_CTLR_DATA_LENGTH */
1206
1207 tx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1208 PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
1209 rx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1210 PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
1211 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1212 #else /* CONFIG_BT_CTLR_PHY */
1213 ready_delay = (lll->role) ?
1214 lll_radio_rx_ready_delay_get(0, 0) :
1215 lll_radio_tx_ready_delay_get(0, 0);
1216 tx_time = PDU_DC_MAX_US(lll->dle.eff.max_tx_octets, 0);
1217 rx_time = PDU_DC_MAX_US(lll->dle.eff.max_rx_octets, 0);
1218 #endif /* CONFIG_BT_CTLR_PHY */
1219 ticks_slot = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US +
1220 ready_delay +
1221 EVENT_IFS_US +
1222 rx_time +
1223 tx_time +
1224 4);
1225 if (ticks_slot > conn->ull.ticks_slot) {
1226 ticks_slot_plus = ticks_slot - conn->ull.ticks_slot;
1227 } else {
1228 ticks_slot_minus = conn->ull.ticks_slot - ticks_slot;
1229 }
1230 conn->ull.ticks_slot = ticks_slot;
1231 }
1232 #endif /* CONFIG_BT_CTLR_DATA_LENGTH || CONFIG_BT_CTLR_PHY */
1233 #else /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1234 ticks_slot_plus = 0;
1235 ticks_slot_minus = 0;
1236 #endif /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1237
1238 /* update conn ticker */
1239 if (ticks_drift_plus || ticks_drift_minus ||
1240 ticks_slot_plus || ticks_slot_minus ||
1241 lazy || force) {
1242 uint8_t ticker_id = TICKER_ID_CONN_BASE + lll->handle;
1243 struct ll_conn *conn = lll->hdr.parent;
1244 uint32_t ticker_status;
1245
1246 /* Call to ticker_update can fail under the race
1247 * condition where in the peripheral role is being stopped but
1248 * at the same time it is preempted by peripheral event that
1249 * gets into close state. Accept failure when peripheral role
1250 * is being stopped.
1251 */
1252 ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
1253 TICKER_USER_ID_ULL_HIGH,
1254 ticker_id,
1255 ticks_drift_plus, ticks_drift_minus,
1256 ticks_slot_plus, ticks_slot_minus,
1257 lazy, force,
1258 ticker_update_conn_op_cb,
1259 conn);
1260 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1261 (ticker_status == TICKER_STATUS_BUSY) ||
1262 ((void *)conn == ull_disable_mark_get()));
1263 }
1264 }
1265
1266 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
ull_conn_lll_tx_demux_sched(struct lll_conn * lll)1267 void ull_conn_lll_tx_demux_sched(struct lll_conn *lll)
1268 {
1269 static memq_link_t link;
1270 static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1271
1272 mfy.param = HDR_LLL2ULL(lll);
1273
1274 mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1U, &mfy);
1275 }
1276 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
1277
ull_conn_tx_demux(uint8_t count)1278 void ull_conn_tx_demux(uint8_t count)
1279 {
1280 do {
1281 struct lll_tx *lll_tx;
1282 struct ll_conn *conn;
1283
1284 lll_tx = MFIFO_DEQUEUE_GET(conn_tx);
1285 if (!lll_tx) {
1286 break;
1287 }
1288
1289 conn = ll_connected_get(lll_tx->handle);
1290 if (conn) {
1291 struct node_tx *tx = lll_tx->node;
1292
1293 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1294 if (empty_data_start_release(conn, tx)) {
1295 goto ull_conn_tx_demux_release;
1296 }
1297 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1298
1299 ull_tx_q_enqueue_data(&conn->tx_q, tx);
1300 } else {
1301 struct node_tx *tx = lll_tx->node;
1302 struct pdu_data *p = (void *)tx->pdu;
1303
1304 p->ll_id = PDU_DATA_LLID_RESV;
1305 ll_tx_ack_put(LLL_HANDLE_INVALID, tx);
1306 }
1307
1308 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1309 ull_conn_tx_demux_release:
1310 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1311
1312 MFIFO_DEQUEUE(conn_tx);
1313 } while (--count);
1314 }
1315
ull_conn_tx_lll_enqueue(struct ll_conn * conn,uint8_t count)1316 void ull_conn_tx_lll_enqueue(struct ll_conn *conn, uint8_t count)
1317 {
1318 while (count--) {
1319 struct node_tx *tx;
1320 memq_link_t *link;
1321
1322 tx = tx_ull_dequeue(conn, NULL);
1323 if (!tx) {
1324 /* No more tx nodes available */
1325 break;
1326 }
1327
1328 link = mem_acquire(&mem_link_tx.free);
1329 LL_ASSERT(link);
1330
1331 /* Enqueue towards LLL */
1332 memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1333 }
1334 }
1335
ull_conn_link_tx_release(void * link)1336 void ull_conn_link_tx_release(void *link)
1337 {
1338 mem_release(link, &mem_link_tx.free);
1339 }
1340
ull_conn_ack_last_idx_get(void)1341 uint8_t ull_conn_ack_last_idx_get(void)
1342 {
1343 return mfifo_conn_ack.l;
1344 }
1345
ull_conn_ack_peek(uint8_t * ack_last,uint16_t * handle,struct node_tx ** tx)1346 memq_link_t *ull_conn_ack_peek(uint8_t *ack_last, uint16_t *handle,
1347 struct node_tx **tx)
1348 {
1349 struct lll_tx *lll_tx;
1350
1351 lll_tx = MFIFO_DEQUEUE_GET(conn_ack);
1352 if (!lll_tx) {
1353 return NULL;
1354 }
1355
1356 *ack_last = mfifo_conn_ack.l;
1357
1358 *handle = lll_tx->handle;
1359 *tx = lll_tx->node;
1360
1361 return (*tx)->link;
1362 }
1363
ull_conn_ack_by_last_peek(uint8_t last,uint16_t * handle,struct node_tx ** tx)1364 memq_link_t *ull_conn_ack_by_last_peek(uint8_t last, uint16_t *handle,
1365 struct node_tx **tx)
1366 {
1367 struct lll_tx *lll_tx;
1368
1369 lll_tx = mfifo_dequeue_get(mfifo_conn_ack.m, mfifo_conn_ack.s,
1370 mfifo_conn_ack.f, last);
1371 if (!lll_tx) {
1372 return NULL;
1373 }
1374
1375 *handle = lll_tx->handle;
1376 *tx = lll_tx->node;
1377
1378 return (*tx)->link;
1379 }
1380
ull_conn_ack_dequeue(void)1381 void *ull_conn_ack_dequeue(void)
1382 {
1383 return MFIFO_DEQUEUE(conn_ack);
1384 }
1385
ull_conn_lll_ack_enqueue(uint16_t handle,struct node_tx * tx)1386 void ull_conn_lll_ack_enqueue(uint16_t handle, struct node_tx *tx)
1387 {
1388 struct lll_tx *lll_tx;
1389 uint8_t idx;
1390
1391 idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&lll_tx);
1392 LL_ASSERT(lll_tx);
1393
1394 lll_tx->handle = handle;
1395 lll_tx->node = tx;
1396
1397 MFIFO_ENQUEUE(conn_ack, idx);
1398 }
1399
ull_conn_tx_ack(uint16_t handle,memq_link_t * link,struct node_tx * tx)1400 void ull_conn_tx_ack(uint16_t handle, memq_link_t *link, struct node_tx *tx)
1401 {
1402 struct pdu_data *pdu_tx;
1403
1404 pdu_tx = (void *)tx->pdu;
1405 LL_ASSERT(pdu_tx->len);
1406
1407 if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1408 if (handle != LLL_HANDLE_INVALID) {
1409 struct ll_conn *conn = ll_conn_get(handle);
1410
1411 ull_cp_tx_ack(conn, tx);
1412 }
1413
1414 /* release ctrl mem if points to itself */
1415 if (link->next == (void *)tx) {
1416 LL_ASSERT(link->next);
1417
1418 struct ll_conn *conn = ll_connected_get(handle);
1419
1420 ull_cp_release_tx(conn, tx);
1421 return;
1422 } else if (!tx) {
1423 /* Tx Node re-used to enqueue new ctrl PDU */
1424 return;
1425 }
1426 LL_ASSERT(!link->next);
1427 } else if (handle == LLL_HANDLE_INVALID) {
1428 pdu_tx->ll_id = PDU_DATA_LLID_RESV;
1429 } else {
1430 LL_ASSERT(handle != LLL_HANDLE_INVALID);
1431 }
1432
1433 ll_tx_ack_put(handle, tx);
1434 }
1435
ull_conn_lll_max_tx_octets_get(struct lll_conn * lll)1436 uint16_t ull_conn_lll_max_tx_octets_get(struct lll_conn *lll)
1437 {
1438 uint16_t max_tx_octets;
1439
1440 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1441 #if defined(CONFIG_BT_CTLR_PHY)
1442 switch (lll->phy_tx_time) {
1443 default:
1444 case PHY_1M:
1445 /* 1M PHY, 1us = 1 bit, hence divide by 8.
1446 * Deduct 10 bytes for preamble (1), access address (4),
1447 * header (2), and CRC (3).
1448 */
1449 max_tx_octets = (lll->dle.eff.max_tx_time >> 3) - 10;
1450 break;
1451
1452 case PHY_2M:
1453 /* 2M PHY, 1us = 2 bits, hence divide by 4.
1454 * Deduct 11 bytes for preamble (2), access address (4),
1455 * header (2), and CRC (3).
1456 */
1457 max_tx_octets = (lll->dle.eff.max_tx_time >> 2) - 11;
1458 break;
1459
1460 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1461 case PHY_CODED:
1462 if (lll->phy_flags & 0x01) {
1463 /* S8 Coded PHY, 8us = 1 bit, hence divide by
1464 * 64.
1465 * Subtract time for preamble (80), AA (256),
1466 * CI (16), TERM1 (24), CRC (192) and
1467 * TERM2 (24), total 592 us.
1468 * Subtract 2 bytes for header.
1469 */
1470 max_tx_octets = ((lll->dle.eff.max_tx_time - 592) >>
1471 6) - 2;
1472 } else {
1473 /* S2 Coded PHY, 2us = 1 bit, hence divide by
1474 * 16.
1475 * Subtract time for preamble (80), AA (256),
1476 * CI (16), TERM1 (24), CRC (48) and
1477 * TERM2 (6), total 430 us.
1478 * Subtract 2 bytes for header.
1479 */
1480 max_tx_octets = ((lll->dle.eff.max_tx_time - 430) >>
1481 4) - 2;
1482 }
1483 break;
1484 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1485 }
1486
1487 #if defined(CONFIG_BT_CTLR_LE_ENC)
1488 if (lll->enc_tx) {
1489 /* deduct the MIC */
1490 max_tx_octets -= 4U;
1491 }
1492 #endif /* CONFIG_BT_CTLR_LE_ENC */
1493
1494 if (max_tx_octets > lll->dle.eff.max_tx_octets) {
1495 max_tx_octets = lll->dle.eff.max_tx_octets;
1496 }
1497
1498 #else /* !CONFIG_BT_CTLR_PHY */
1499 max_tx_octets = lll->dle.eff.max_tx_octets;
1500 #endif /* !CONFIG_BT_CTLR_PHY */
1501 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
1502 max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1503 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
1504 return max_tx_octets;
1505 }
1506
1507 /**
1508 * @brief Initialize pdu_data members that are read only in lower link layer.
1509 *
1510 * @param pdu Pointer to pdu_data object to be initialized
1511 */
ull_pdu_data_init(struct pdu_data * pdu)1512 void ull_pdu_data_init(struct pdu_data *pdu)
1513 {
1514 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1515 pdu->cp = 0U;
1516 pdu->octet3.resv[0] = 0U;
1517 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1518 }
1519
init_reset(void)1520 static int init_reset(void)
1521 {
1522 /* Initialize conn pool. */
1523 mem_init(conn_pool, sizeof(struct ll_conn),
1524 sizeof(conn_pool) / sizeof(struct ll_conn), &conn_free);
1525
1526 /* Initialize tx pool. */
1527 mem_init(mem_conn_tx.pool, CONN_TX_BUF_SIZE, CONN_DATA_BUFFERS,
1528 &mem_conn_tx.free);
1529
1530 /* Initialize tx link pool. */
1531 mem_init(mem_link_tx.pool, sizeof(memq_link_t),
1532 (CONN_DATA_BUFFERS +
1533 LLCP_TX_CTRL_BUF_COUNT),
1534 &mem_link_tx.free);
1535
1536 /* Initialize control procedure system. */
1537 ull_cp_init();
1538
1539 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1540 /* Reset CPR mutex */
1541 cpr_active_reset();
1542 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1543
1544 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1545 /* Initialize the DLE defaults */
1546 default_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1547 default_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
1548 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1549
1550 #if defined(CONFIG_BT_CTLR_PHY)
1551 /* Initialize the PHY defaults */
1552 default_phy_tx = PHY_1M;
1553 default_phy_rx = PHY_1M;
1554
1555 #if defined(CONFIG_BT_CTLR_PHY_2M)
1556 default_phy_tx |= PHY_2M;
1557 default_phy_rx |= PHY_2M;
1558 #endif /* CONFIG_BT_CTLR_PHY_2M */
1559
1560 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1561 default_phy_tx |= PHY_CODED;
1562 default_phy_rx |= PHY_CODED;
1563 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1564 #endif /* CONFIG_BT_CTLR_PHY */
1565
1566 return 0;
1567 }
1568
1569 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
tx_demux_sched(struct ll_conn * conn)1570 static void tx_demux_sched(struct ll_conn *conn)
1571 {
1572 static memq_link_t link;
1573 static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1574
1575 mfy.param = conn;
1576
1577 mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1578 }
1579 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
1580
tx_demux(void * param)1581 static void tx_demux(void *param)
1582 {
1583 ull_conn_tx_demux(1);
1584
1585 ull_conn_tx_lll_enqueue(param, 1);
1586 }
1587
tx_ull_dequeue(struct ll_conn * conn,struct node_tx * unused)1588 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *unused)
1589 {
1590 struct node_tx *tx = NULL;
1591
1592 tx = ull_tx_q_dequeue(&conn->tx_q);
1593 if (tx) {
1594 struct pdu_data *pdu_tx;
1595
1596 pdu_tx = (void *)tx->pdu;
1597 if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1598 /* Mark the tx node as belonging to the ctrl pool */
1599 tx->next = tx;
1600 } else {
1601 /* Mark the tx node as belonging to the data pool */
1602 tx->next = NULL;
1603 }
1604 }
1605 return tx;
1606 }
1607
ticker_update_conn_op_cb(uint32_t status,void * param)1608 static void ticker_update_conn_op_cb(uint32_t status, void *param)
1609 {
1610 /* Peripheral drift compensation succeeds, or it fails in a race condition
1611 * when disconnecting or connection update (race between ticker_update
1612 * and ticker_stop calls).
1613 */
1614 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1615 param == ull_update_mark_get() ||
1616 param == ull_disable_mark_get());
1617 }
1618
ticker_stop_conn_op_cb(uint32_t status,void * param)1619 static void ticker_stop_conn_op_cb(uint32_t status, void *param)
1620 {
1621 void *p;
1622
1623 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1624
1625 p = ull_update_mark(param);
1626 LL_ASSERT(p == param);
1627 }
1628
ticker_start_conn_op_cb(uint32_t status,void * param)1629 static void ticker_start_conn_op_cb(uint32_t status, void *param)
1630 {
1631 void *p;
1632
1633 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1634
1635 p = ull_update_unmark(param);
1636 LL_ASSERT(p == param);
1637 }
1638
conn_setup_adv_scan_disabled_cb(void * param)1639 static void conn_setup_adv_scan_disabled_cb(void *param)
1640 {
1641 struct node_rx_ftr *ftr;
1642 struct node_rx_hdr *rx;
1643 struct lll_conn *lll;
1644
1645 /* NOTE: LLL conn context SHALL be after lll_hdr in
1646 * struct lll_adv and struct lll_scan.
1647 */
1648 rx = param;
1649 ftr = &(rx->rx_ftr);
1650 lll = *((struct lll_conn **)((uint8_t *)ftr->param +
1651 sizeof(struct lll_hdr)));
1652
1653 if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING)) {
1654 struct ull_hdr *hdr;
1655
1656 /* Prevent fast ADV re-scheduling from re-triggering */
1657 hdr = HDR_LLL2ULL(ftr->param);
1658 hdr->disabled_cb = NULL;
1659 }
1660
1661 switch (lll->role) {
1662 #if defined(CONFIG_BT_CENTRAL)
1663 case 0:
1664 ull_central_setup(rx, ftr, lll);
1665 break;
1666 #endif /* CONFIG_BT_CENTRAL */
1667
1668 #if defined(CONFIG_BT_PERIPHERAL)
1669 case 1:
1670 ull_periph_setup(rx, ftr, lll);
1671 break;
1672 #endif /* CONFIG_BT_PERIPHERAL */
1673
1674 default:
1675 LL_ASSERT(0);
1676 break;
1677 }
1678 }
1679
disable(uint16_t handle)1680 static inline void disable(uint16_t handle)
1681 {
1682 struct ll_conn *conn;
1683 int err;
1684
1685 conn = ll_conn_get(handle);
1686
1687 err = ull_ticker_stop_with_mark(TICKER_ID_CONN_BASE + handle,
1688 conn, &conn->lll);
1689 LL_ASSERT(err == 0 || err == -EALREADY);
1690
1691 conn->lll.handle = LLL_HANDLE_INVALID;
1692 conn->lll.link_tx_free = NULL;
1693 }
1694
1695 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
conn_cleanup_iso_cis_released_cb(struct ll_conn * conn)1696 static void conn_cleanup_iso_cis_released_cb(struct ll_conn *conn)
1697 {
1698 struct ll_conn_iso_stream *cis;
1699
1700 cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1701 if (cis) {
1702 struct node_rx_pdu *rx;
1703 uint8_t reason;
1704
1705 /* More associated CISes - stop next */
1706 rx = (void *)&conn->llcp_terminate.node_rx;
1707 reason = *(uint8_t *)rx->pdu;
1708
1709 ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1710 reason);
1711 } else {
1712 /* No more CISes associated with conn - finalize */
1713 conn_cleanup_finalize(conn);
1714 }
1715 }
1716 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1717
conn_cleanup_finalize(struct ll_conn * conn)1718 static void conn_cleanup_finalize(struct ll_conn *conn)
1719 {
1720 struct lll_conn *lll = &conn->lll;
1721 uint32_t ticker_status;
1722
1723 ull_cp_state_set(conn, ULL_CP_DISCONNECTED);
1724
1725 /* Update tx buffer queue handling */
1726 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
1727 ull_cp_update_tx_buffer_queue(conn);
1728 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
1729 ull_cp_release_nodes(conn);
1730
1731 /* flush demux-ed Tx buffer still in ULL context */
1732 tx_ull_flush(conn);
1733
1734 /* Stop Central or Peripheral role ticker */
1735 ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1736 TICKER_USER_ID_ULL_HIGH,
1737 TICKER_ID_CONN_BASE + lll->handle,
1738 ticker_stop_op_cb, conn);
1739 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1740 (ticker_status == TICKER_STATUS_BUSY));
1741
1742 /* Invalidate the connection context */
1743 lll->handle = LLL_HANDLE_INVALID;
1744
1745 /* Demux and flush Tx PDUs that remain enqueued in thread context */
1746 ull_conn_tx_demux(UINT8_MAX);
1747 }
1748
conn_cleanup(struct ll_conn * conn,uint8_t reason)1749 static void conn_cleanup(struct ll_conn *conn, uint8_t reason)
1750 {
1751 struct node_rx_pdu *rx;
1752
1753 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1754 struct ll_conn_iso_stream *cis;
1755 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1756
1757 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1758 /* Reset CPR mutex */
1759 cpr_active_check_and_reset(conn);
1760 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1761
1762 /* Only termination structure is populated here in ULL context
1763 * but the actual enqueue happens in the LLL context in
1764 * tx_lll_flush. The reason being to avoid passing the reason
1765 * value and handle through the mayfly scheduling of the
1766 * tx_lll_flush.
1767 */
1768 rx = (void *)&conn->llcp_terminate.node_rx;
1769 rx->hdr.handle = conn->lll.handle;
1770 rx->hdr.type = NODE_RX_TYPE_TERMINATE;
1771 *((uint8_t *)rx->pdu) = reason;
1772
1773 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1774 cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1775 if (cis) {
1776 /* Stop CIS and defer cleanup to after teardown. */
1777 ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1778 reason);
1779 return;
1780 }
1781 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1782
1783 conn_cleanup_finalize(conn);
1784 }
1785
tx_ull_flush(struct ll_conn * conn)1786 static void tx_ull_flush(struct ll_conn *conn)
1787 {
1788 struct node_tx *tx;
1789
1790 ull_tx_q_resume_data(&conn->tx_q);
1791
1792 tx = tx_ull_dequeue(conn, NULL);
1793 while (tx) {
1794 memq_link_t *link;
1795
1796 link = mem_acquire(&mem_link_tx.free);
1797 LL_ASSERT(link);
1798
1799 /* Enqueue towards LLL */
1800 memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1801
1802 tx = tx_ull_dequeue(conn, NULL);
1803 }
1804 }
1805
ticker_stop_op_cb(uint32_t status,void * param)1806 static void ticker_stop_op_cb(uint32_t status, void *param)
1807 {
1808 static memq_link_t link;
1809 static struct mayfly mfy = {0, 0, &link, NULL, conn_disable};
1810 uint32_t ret;
1811
1812 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1813
1814 /* Check if any pending LLL events that need to be aborted */
1815 mfy.param = param;
1816 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1817 TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1818 LL_ASSERT(!ret);
1819 }
1820
conn_disable(void * param)1821 static void conn_disable(void *param)
1822 {
1823 struct ll_conn *conn;
1824 struct ull_hdr *hdr;
1825
1826 /* Check ref count to determine if any pending LLL events in pipeline */
1827 conn = param;
1828 hdr = &conn->ull;
1829 if (ull_ref_get(hdr)) {
1830 static memq_link_t link;
1831 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1832 uint32_t ret;
1833
1834 mfy.param = &conn->lll;
1835
1836 /* Setup disabled callback to be called when ref count
1837 * returns to zero.
1838 */
1839 LL_ASSERT(!hdr->disabled_cb);
1840 hdr->disabled_param = mfy.param;
1841 hdr->disabled_cb = disabled_cb;
1842
1843 /* Trigger LLL disable */
1844 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1845 TICKER_USER_ID_LLL, 0, &mfy);
1846 LL_ASSERT(!ret);
1847 } else {
1848 /* No pending LLL events */
1849 disabled_cb(&conn->lll);
1850 }
1851 }
1852
disabled_cb(void * param)1853 static void disabled_cb(void *param)
1854 {
1855 static memq_link_t link;
1856 static struct mayfly mfy = {0, 0, &link, NULL, tx_lll_flush};
1857 uint32_t ret;
1858
1859 mfy.param = param;
1860 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1861 TICKER_USER_ID_LLL, 0, &mfy);
1862 LL_ASSERT(!ret);
1863 }
1864
tx_lll_flush(void * param)1865 static void tx_lll_flush(void *param)
1866 {
1867 struct node_rx_pdu *rx;
1868 struct lll_conn *lll;
1869 struct ll_conn *conn;
1870 struct node_tx *tx;
1871 memq_link_t *link;
1872 uint16_t handle;
1873
1874 /* Get reference to ULL context */
1875 lll = param;
1876 conn = HDR_LLL2ULL(lll);
1877 handle = ll_conn_handle_get(conn);
1878
1879 lll_conn_flush(handle, lll);
1880
1881 link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
1882 (void **)&tx);
1883 while (link) {
1884 uint8_t idx;
1885 struct lll_tx *tx_buf;
1886
1887 idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx_buf);
1888 LL_ASSERT(tx_buf);
1889
1890 tx_buf->handle = LLL_HANDLE_INVALID;
1891 tx_buf->node = tx;
1892
1893 /* TX node UPSTREAM, i.e. Tx node ack path */
1894 link->next = tx->next; /* Indicates ctrl pool or data pool */
1895 tx->next = link;
1896
1897 MFIFO_ENQUEUE(conn_ack, idx);
1898
1899 link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
1900 (void **)&tx);
1901 }
1902
1903 /* Get the terminate structure reserved in the connection context.
1904 * The terminate reason and connection handle should already be
1905 * populated before this mayfly function was scheduled.
1906 */
1907 rx = (void *)&conn->llcp_terminate.node_rx;
1908 LL_ASSERT(rx->hdr.link);
1909 link = rx->hdr.link;
1910 rx->hdr.link = NULL;
1911
1912 /* Enqueue the terminate towards ULL context */
1913 ull_rx_put_sched(link, rx);
1914 }
1915
1916 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
empty_data_start_release(struct ll_conn * conn,struct node_tx * tx)1917 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx)
1918 {
1919 struct pdu_data *p = (void *)tx->pdu;
1920
1921 if ((p->ll_id == PDU_DATA_LLID_DATA_START) && !p->len) {
1922 conn->start_empty = 1U;
1923
1924 ll_tx_ack_put(conn->lll.handle, tx);
1925
1926 return -EINVAL;
1927 } else if (p->len && conn->start_empty) {
1928 conn->start_empty = 0U;
1929
1930 if (p->ll_id == PDU_DATA_LLID_DATA_CONTINUE) {
1931 p->ll_id = PDU_DATA_LLID_DATA_START;
1932 }
1933 }
1934
1935 return 0;
1936 }
1937 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1938
1939 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
force_md_cnt_calc(struct lll_conn * lll_connection,uint32_t tx_rate)1940 static uint8_t force_md_cnt_calc(struct lll_conn *lll_connection, uint32_t tx_rate)
1941 {
1942 uint32_t time_incoming, time_outgoing;
1943 uint8_t force_md_cnt;
1944 uint8_t phy_flags;
1945 uint8_t mic_size;
1946 uint8_t phy;
1947
1948 #if defined(CONFIG_BT_CTLR_PHY)
1949 phy = lll_connection->phy_tx;
1950 phy_flags = lll_connection->phy_flags;
1951 #else /* !CONFIG_BT_CTLR_PHY */
1952 phy = PHY_1M;
1953 phy_flags = 0U;
1954 #endif /* !CONFIG_BT_CTLR_PHY */
1955
1956 #if defined(CONFIG_BT_CTLR_LE_ENC)
1957 mic_size = PDU_MIC_SIZE * lll_connection->enc_tx;
1958 #else /* !CONFIG_BT_CTLR_LE_ENC */
1959 mic_size = 0U;
1960 #endif /* !CONFIG_BT_CTLR_LE_ENC */
1961
1962 time_incoming = (LL_LENGTH_OCTETS_RX_MAX << 3) *
1963 1000000UL / tx_rate;
1964 time_outgoing = PDU_DC_US(LL_LENGTH_OCTETS_RX_MAX, mic_size, phy,
1965 phy_flags) +
1966 PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
1967 (EVENT_IFS_US << 1);
1968
1969 force_md_cnt = 0U;
1970 if (time_incoming > time_outgoing) {
1971 uint32_t delta;
1972 uint32_t time_keep_alive;
1973
1974 delta = (time_incoming << 1) - time_outgoing;
1975 time_keep_alive = (PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
1976 EVENT_IFS_US) << 1;
1977 force_md_cnt = (delta + (time_keep_alive - 1)) /
1978 time_keep_alive;
1979 LOG_DBG("Time: incoming= %u, expected outgoing= %u, delta= %u, "
1980 "keepalive= %u, force_md_cnt = %u.",
1981 time_incoming, time_outgoing, delta, time_keep_alive,
1982 force_md_cnt);
1983 }
1984
1985 return force_md_cnt;
1986 }
1987 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
1988
1989 #if defined(CONFIG_BT_CTLR_LE_ENC)
1990 /**
1991 * @brief Pause the data path of a rx queue.
1992 */
ull_conn_pause_rx_data(struct ll_conn * conn)1993 void ull_conn_pause_rx_data(struct ll_conn *conn)
1994 {
1995 conn->pause_rx_data = 1U;
1996 }
1997
1998 /**
1999 * @brief Resume the data path of a rx queue.
2000 */
ull_conn_resume_rx_data(struct ll_conn * conn)2001 void ull_conn_resume_rx_data(struct ll_conn *conn)
2002 {
2003 conn->pause_rx_data = 0U;
2004 }
2005 #endif /* CONFIG_BT_CTLR_LE_ENC */
2006
ull_conn_event_counter(struct ll_conn * conn)2007 uint16_t ull_conn_event_counter(struct ll_conn *conn)
2008 {
2009 struct lll_conn *lll;
2010 uint16_t event_counter;
2011
2012 lll = &conn->lll;
2013
2014 /* Calculate current event counter. If refcount is non-zero, we have called
2015 * prepare and the LLL implementation has calculated and incremented the event
2016 * counter (RX path). In this case we need to subtract one from the current
2017 * event counter.
2018 * Otherwise we are in the TX path, and we calculate the current event counter
2019 * similar to LLL by taking the expected event counter value plus accumulated
2020 * latency.
2021 */
2022 if (ull_ref_get(&conn->ull)) {
2023 /* We are in post-prepare (RX path). Event counter is already
2024 * calculated and incremented by 1 for next event.
2025 */
2026 event_counter = lll->event_counter - 1;
2027 } else {
2028 event_counter = lll->event_counter + lll->latency_prepare +
2029 conn->llcp.prep.lazy;
2030 }
2031
2032 return event_counter;
2033 }
ull_conn_update_ticker(struct ll_conn * conn,uint32_t ticks_win_offset,uint32_t ticks_slot_overhead,uint32_t periodic_us,uint32_t ticks_at_expire)2034 static void ull_conn_update_ticker(struct ll_conn *conn,
2035 uint32_t ticks_win_offset,
2036 uint32_t ticks_slot_overhead,
2037 uint32_t periodic_us,
2038 uint32_t ticks_at_expire)
2039 {
2040 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2041 /* disable ticker job, in order to chain stop and start
2042 * to avoid RTC being stopped if no tickers active.
2043 */
2044 uint32_t mayfly_was_enabled =
2045 mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW);
2046
2047 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0U);
2048 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2049
2050 /* start periph/central with new timings */
2051 uint8_t ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
2052 uint32_t ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2053 ticker_id_conn, ticker_stop_conn_op_cb, (void *)conn);
2054 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2055 (ticker_status == TICKER_STATUS_BUSY));
2056 ticker_status = ticker_start(
2057 TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH, ticker_id_conn, ticks_at_expire,
2058 ticks_win_offset, HAL_TICKER_US_TO_TICKS(periodic_us),
2059 HAL_TICKER_REMAINDER(periodic_us),
2060 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2061 TICKER_NULL_LAZY,
2062 #else /* !CONFIG_BT_TICKER_LOW_LAT */
2063 TICKER_LAZY_MUST_EXPIRE_KEEP,
2064 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2065 (ticks_slot_overhead + conn->ull.ticks_slot),
2066 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CENTRAL)
2067 conn->lll.role == BT_HCI_ROLE_PERIPHERAL ?
2068 ull_periph_ticker_cb : ull_central_ticker_cb,
2069 #elif defined(CONFIG_BT_PERIPHERAL)
2070 ull_periph_ticker_cb,
2071 #else
2072 ull_central_ticker_cb,
2073 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CENTRAL */
2074 conn, ticker_start_conn_op_cb, (void *)conn);
2075 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2076 (ticker_status == TICKER_STATUS_BUSY));
2077
2078 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2079 /* enable ticker job, if disabled in this function */
2080 if (mayfly_was_enabled) {
2081 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1U);
2082 }
2083 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2084 }
2085
ull_conn_update_parameters(struct ll_conn * conn,uint8_t is_cu_proc,uint8_t win_size,uint32_t win_offset_us,uint16_t interval,uint16_t latency,uint16_t timeout,uint16_t instant)2086 void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_t win_size,
2087 uint32_t win_offset_us, uint16_t interval, uint16_t latency,
2088 uint16_t timeout, uint16_t instant)
2089 {
2090 struct lll_conn *lll;
2091 uint32_t ticks_win_offset = 0U;
2092 uint32_t ticks_slot_overhead;
2093 uint16_t conn_interval_old;
2094 uint16_t conn_interval_new;
2095 uint32_t conn_interval_us;
2096 uint32_t periodic_us;
2097 uint16_t latency_upd;
2098 uint16_t instant_latency;
2099 uint16_t event_counter;
2100 uint32_t ticks_at_expire;
2101
2102 lll = &conn->lll;
2103
2104 /* Calculate current event counter */
2105 event_counter = ull_conn_event_counter(conn);
2106
2107 instant_latency = (event_counter - instant) & 0xFFFF;
2108
2109
2110 ticks_at_expire = conn->llcp.prep.ticks_at_expire;
2111
2112 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
2113 /* restore to normal prepare */
2114 if (conn->ull.ticks_prepare_to_start & XON_BITMASK) {
2115 uint32_t ticks_prepare_to_start =
2116 MAX(conn->ull.ticks_active_to_start, conn->ull.ticks_preempt_to_start);
2117
2118 conn->ull.ticks_prepare_to_start &= ~XON_BITMASK;
2119
2120 ticks_at_expire -= (conn->ull.ticks_prepare_to_start - ticks_prepare_to_start);
2121 }
2122 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
2123
2124 /* compensate for instant_latency due to laziness */
2125 conn_interval_old = instant_latency * lll->interval;
2126 latency_upd = conn_interval_old / interval;
2127 conn_interval_new = latency_upd * interval;
2128 if (conn_interval_new > conn_interval_old) {
2129 ticks_at_expire += HAL_TICKER_US_TO_TICKS((conn_interval_new - conn_interval_old) *
2130 CONN_INT_UNIT_US);
2131 } else {
2132 ticks_at_expire -= HAL_TICKER_US_TO_TICKS((conn_interval_old - conn_interval_new) *
2133 CONN_INT_UNIT_US);
2134 }
2135
2136 lll->latency_prepare += conn->llcp.prep.lazy;
2137 lll->latency_prepare -= (instant_latency - latency_upd);
2138
2139 /* calculate the offset */
2140 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
2141 ticks_slot_overhead =
2142 MAX(conn->ull.ticks_active_to_start, conn->ull.ticks_prepare_to_start);
2143 } else {
2144 ticks_slot_overhead = 0U;
2145 }
2146
2147 /* calculate the window widening and interval */
2148 conn_interval_us = interval * CONN_INT_UNIT_US;
2149 periodic_us = conn_interval_us;
2150
2151 switch (lll->role) {
2152 #if defined(CONFIG_BT_PERIPHERAL)
2153 case BT_HCI_ROLE_PERIPHERAL:
2154 lll->periph.window_widening_prepare_us -=
2155 lll->periph.window_widening_periodic_us * instant_latency;
2156
2157 lll->periph.window_widening_periodic_us =
2158 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2159 lll_clock_ppm_get(conn->periph.sca)) *
2160 conn_interval_us), 1000000U);
2161 lll->periph.window_widening_max_us = (conn_interval_us >> 1U) - EVENT_IFS_US;
2162 lll->periph.window_size_prepare_us = win_size * CONN_INT_UNIT_US;
2163
2164 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2165 conn->periph.ticks_to_offset = 0U;
2166 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2167
2168 lll->periph.window_widening_prepare_us +=
2169 lll->periph.window_widening_periodic_us * latency_upd;
2170 if (lll->periph.window_widening_prepare_us > lll->periph.window_widening_max_us) {
2171 lll->periph.window_widening_prepare_us = lll->periph.window_widening_max_us;
2172 }
2173
2174 ticks_at_expire -= HAL_TICKER_US_TO_TICKS(lll->periph.window_widening_periodic_us *
2175 latency_upd);
2176 ticks_win_offset = HAL_TICKER_US_TO_TICKS((win_offset_us / CONN_INT_UNIT_US) *
2177 CONN_INT_UNIT_US);
2178 periodic_us -= lll->periph.window_widening_periodic_us;
2179 break;
2180 #endif /* CONFIG_BT_PERIPHERAL */
2181 #if defined(CONFIG_BT_CENTRAL)
2182 case BT_HCI_ROLE_CENTRAL:
2183 ticks_win_offset = HAL_TICKER_US_TO_TICKS(win_offset_us);
2184
2185 /* Workaround: Due to the missing remainder param in
2186 * ticker_start function for first interval; add a
2187 * tick so as to use the ceiled value.
2188 */
2189 ticks_win_offset += 1U;
2190 break;
2191 #endif /*CONFIG_BT_CENTRAL */
2192 default:
2193 LL_ASSERT(0);
2194 break;
2195 }
2196
2197 lll->interval = interval;
2198 lll->latency = latency;
2199
2200 conn->supervision_timeout = timeout;
2201 ull_cp_prt_reload_set(conn, conn_interval_us);
2202
2203 #if defined(CONFIG_BT_CTLR_LE_PING)
2204 /* APTO in no. of connection events */
2205 conn->apto_reload = RADIO_CONN_EVENTS((30U * 1000U * 1000U), conn_interval_us);
2206 /* Dispatch LE Ping PDU 6 connection events (that peer would
2207 * listen to) before 30s timeout
2208 * TODO: "peer listens to" is greater than 30s due to latency
2209 */
2210 conn->appto_reload = (conn->apto_reload > (lll->latency + 6U)) ?
2211 (conn->apto_reload - (lll->latency + 6U)) :
2212 conn->apto_reload;
2213 #endif /* CONFIG_BT_CTLR_LE_PING */
2214
2215 if (is_cu_proc) {
2216 conn->supervision_expire = 0U;
2217 }
2218
2219 /* Update ACL ticker */
2220 ull_conn_update_ticker(conn, ticks_win_offset, ticks_slot_overhead, periodic_us,
2221 ticks_at_expire);
2222 /* Signal that the prepare needs to be canceled */
2223 conn->cancel_prepare = 1U;
2224 }
2225
2226 #if defined(CONFIG_BT_PERIPHERAL)
ull_conn_update_peer_sca(struct ll_conn * conn)2227 void ull_conn_update_peer_sca(struct ll_conn *conn)
2228 {
2229 struct lll_conn *lll;
2230
2231 uint32_t conn_interval_us;
2232 uint32_t periodic_us;
2233
2234 lll = &conn->lll;
2235
2236 /* calculate the window widening and interval */
2237 conn_interval_us = lll->interval * CONN_INT_UNIT_US;
2238 periodic_us = conn_interval_us;
2239
2240 lll->periph.window_widening_periodic_us =
2241 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2242 lll_clock_ppm_get(conn->periph.sca)) *
2243 conn_interval_us), 1000000U);
2244
2245 periodic_us -= lll->periph.window_widening_periodic_us;
2246
2247 /* Update ACL ticker */
2248 ull_conn_update_ticker(conn, HAL_TICKER_US_TO_TICKS(periodic_us), 0, periodic_us,
2249 conn->llcp.prep.ticks_at_expire);
2250
2251 }
2252 #endif /* CONFIG_BT_PERIPHERAL */
2253
ull_conn_chan_map_set(struct ll_conn * conn,const uint8_t chm[5])2254 void ull_conn_chan_map_set(struct ll_conn *conn, const uint8_t chm[5])
2255 {
2256 struct lll_conn *lll = &conn->lll;
2257
2258 memcpy(lll->data_chan_map, chm, sizeof(lll->data_chan_map));
2259 lll->data_chan_count = util_ones_count_get(lll->data_chan_map, sizeof(lll->data_chan_map));
2260 }
2261
2262 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2263 static inline void dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2264 uint16_t *max_tx_time)
2265 {
2266 uint8_t phy_select = PHY_1M;
2267 uint16_t rx_time = 0U;
2268 uint16_t tx_time = 0U;
2269
2270 #if defined(CONFIG_BT_CTLR_PHY)
2271 if (conn->llcp.fex.valid && feature_phy_coded(conn)) {
2272 /* If coded PHY is supported on the connection
2273 * this will define the max times
2274 */
2275 phy_select = PHY_CODED;
2276 /* If not, max times should be defined by 1M timing */
2277 }
2278 #endif
2279
2280 rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select);
2281
2282 #if defined(CONFIG_BT_CTLR_PHY)
2283 tx_time = MIN(conn->lll.dle.default_tx_time,
2284 PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select));
2285 #else /* !CONFIG_BT_CTLR_PHY */
2286 tx_time = PDU_DC_MAX_US(conn->lll.dle.default_tx_octets, phy_select);
2287 #endif /* !CONFIG_BT_CTLR_PHY */
2288
2289 /*
2290 * see Vol. 6 Part B chapter 4.5.10
2291 * minimum value for time is 328 us
2292 */
2293 rx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, rx_time);
2294 tx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, tx_time);
2295
2296 *max_rx_time = rx_time;
2297 *max_tx_time = tx_time;
2298 }
2299
ull_dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2300 void ull_dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2301 uint16_t *max_tx_time)
2302 {
2303 return dle_max_time_get(conn, max_rx_time, max_tx_time);
2304 }
2305
2306 /*
2307 * TODO: this probably can be optimised for ex. by creating a macro for the
2308 * ull_dle_update_eff function
2309 */
ull_dle_update_eff(struct ll_conn * conn)2310 uint8_t ull_dle_update_eff(struct ll_conn *conn)
2311 {
2312 uint8_t dle_changed = 0U;
2313
2314 /* Note that we must use bitwise or and not logical or */
2315 dle_changed = ull_dle_update_eff_rx(conn);
2316 dle_changed |= ull_dle_update_eff_tx(conn);
2317 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2318 if (dle_changed) {
2319 conn->lll.evt_len_upd = 1U;
2320 }
2321 #endif
2322
2323
2324 return dle_changed;
2325 }
2326
ull_dle_update_eff_rx(struct ll_conn * conn)2327 uint8_t ull_dle_update_eff_rx(struct ll_conn *conn)
2328 {
2329 uint8_t dle_changed = 0U;
2330
2331 const uint16_t eff_rx_octets =
2332 MAX(MIN(conn->lll.dle.local.max_rx_octets, conn->lll.dle.remote.max_tx_octets),
2333 PDU_DC_PAYLOAD_SIZE_MIN);
2334
2335 #if defined(CONFIG_BT_CTLR_PHY)
2336 unsigned int min_eff_rx_time = (conn->lll.phy_rx == PHY_CODED) ?
2337 PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2338
2339 const uint16_t eff_rx_time =
2340 MAX(MIN(conn->lll.dle.local.max_rx_time, conn->lll.dle.remote.max_tx_time),
2341 min_eff_rx_time);
2342
2343 if (eff_rx_time != conn->lll.dle.eff.max_rx_time) {
2344 conn->lll.dle.eff.max_rx_time = eff_rx_time;
2345 dle_changed = 1U;
2346 }
2347 #else
2348 conn->lll.dle.eff.max_rx_time = PDU_DC_MAX_US(eff_rx_octets, PHY_1M);
2349 #endif
2350
2351 if (eff_rx_octets != conn->lll.dle.eff.max_rx_octets) {
2352 conn->lll.dle.eff.max_rx_octets = eff_rx_octets;
2353 dle_changed = 1U;
2354 }
2355 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2356 /* we delay the update of event length to after the DLE procedure is finishede */
2357 if (dle_changed) {
2358 conn->lll.evt_len_upd_delayed = 1;
2359 }
2360 #endif
2361
2362 return dle_changed;
2363 }
2364
ull_dle_update_eff_tx(struct ll_conn * conn)2365 uint8_t ull_dle_update_eff_tx(struct ll_conn *conn)
2366
2367 {
2368 uint8_t dle_changed = 0U;
2369
2370 const uint16_t eff_tx_octets =
2371 MAX(MIN(conn->lll.dle.local.max_tx_octets, conn->lll.dle.remote.max_rx_octets),
2372 PDU_DC_PAYLOAD_SIZE_MIN);
2373
2374 #if defined(CONFIG_BT_CTLR_PHY)
2375 unsigned int min_eff_tx_time = (conn->lll.phy_tx == PHY_CODED) ?
2376 PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2377
2378 const uint16_t eff_tx_time =
2379 MAX(MIN(conn->lll.dle.local.max_tx_time, conn->lll.dle.remote.max_rx_time),
2380 min_eff_tx_time);
2381
2382 if (eff_tx_time != conn->lll.dle.eff.max_tx_time) {
2383 conn->lll.dle.eff.max_tx_time = eff_tx_time;
2384 dle_changed = 1U;
2385 }
2386 #else
2387 conn->lll.dle.eff.max_tx_time = PDU_DC_MAX_US(eff_tx_octets, PHY_1M);
2388 #endif
2389
2390 if (eff_tx_octets != conn->lll.dle.eff.max_tx_octets) {
2391 conn->lll.dle.eff.max_tx_octets = eff_tx_octets;
2392 dle_changed = 1U;
2393 }
2394
2395 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2396 if (dle_changed) {
2397 conn->lll.evt_len_upd = 1U;
2398 }
2399 conn->lll.evt_len_upd |= conn->lll.evt_len_upd_delayed;
2400 conn->lll.evt_len_upd_delayed = 0;
2401 #endif
2402
2403 return dle_changed;
2404 }
2405
ull_len_data_length_trim(uint16_t * tx_octets,uint16_t * tx_time)2406 static void ull_len_data_length_trim(uint16_t *tx_octets, uint16_t *tx_time)
2407 {
2408 #if defined(CONFIG_BT_CTLR_PHY_CODED)
2409 uint16_t tx_time_max =
2410 PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_CODED);
2411 #else /* !CONFIG_BT_CTLR_PHY_CODED */
2412 uint16_t tx_time_max =
2413 PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_1M);
2414 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
2415
2416 /* trim to supported values */
2417 if (*tx_octets > LL_LENGTH_OCTETS_TX_MAX) {
2418 *tx_octets = LL_LENGTH_OCTETS_TX_MAX;
2419 }
2420
2421 if (*tx_time > tx_time_max) {
2422 *tx_time = tx_time_max;
2423 }
2424 }
2425
ull_dle_local_tx_update(struct ll_conn * conn,uint16_t tx_octets,uint16_t tx_time)2426 void ull_dle_local_tx_update(struct ll_conn *conn, uint16_t tx_octets, uint16_t tx_time)
2427 {
2428 /* Trim to supported values */
2429 ull_len_data_length_trim(&tx_octets, &tx_time);
2430
2431 conn->lll.dle.default_tx_octets = tx_octets;
2432
2433 #if defined(CONFIG_BT_CTLR_PHY)
2434 conn->lll.dle.default_tx_time = tx_time;
2435 #endif /* CONFIG_BT_CTLR_PHY */
2436
2437 dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time, &conn->lll.dle.local.max_tx_time);
2438 conn->lll.dle.local.max_tx_octets = conn->lll.dle.default_tx_octets;
2439 }
2440
ull_dle_init(struct ll_conn * conn,uint8_t phy)2441 void ull_dle_init(struct ll_conn *conn, uint8_t phy)
2442 {
2443 #if defined(CONFIG_BT_CTLR_PHY)
2444 const uint16_t max_time_min = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy);
2445 const uint16_t max_time_max = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy);
2446 #endif /* CONFIG_BT_CTLR_PHY */
2447
2448 /* Clear DLE data set */
2449 memset(&conn->lll.dle, 0, sizeof(conn->lll.dle));
2450 /* See BT. 5.2 Spec - Vol 6, Part B, Sect 4.5.10
2451 * Default to locally max supported rx/tx length/time
2452 */
2453 ull_dle_local_tx_update(conn, default_tx_octets, default_tx_time);
2454
2455 conn->lll.dle.local.max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
2456 #if defined(CONFIG_BT_CTLR_PHY)
2457 conn->lll.dle.local.max_rx_time = max_time_max;
2458 #endif /* CONFIG_BT_CTLR_PHY */
2459
2460 /* Default to minimum rx/tx data length/time */
2461 conn->lll.dle.remote.max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2462 conn->lll.dle.remote.max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2463
2464 #if defined(CONFIG_BT_CTLR_PHY)
2465 conn->lll.dle.remote.max_tx_time = max_time_min;
2466 conn->lll.dle.remote.max_rx_time = max_time_min;
2467 #endif /* CONFIG_BT_CTLR_PHY */
2468
2469 /*
2470 * ref. Bluetooth Core Specification version 5.3, Vol. 6,
2471 * Part B, section 4.5.10 we can call ull_dle_update_eff
2472 * for initialisation
2473 */
2474 (void)ull_dle_update_eff(conn);
2475
2476 /* Check whether the controller should perform a data length update after
2477 * connection is established
2478 */
2479 #if defined(CONFIG_BT_CTLR_PHY)
2480 if ((conn->lll.dle.local.max_rx_time != max_time_min ||
2481 conn->lll.dle.local.max_tx_time != max_time_min)) {
2482 conn->lll.dle.update = 1;
2483 } else
2484 #endif
2485 {
2486 if (conn->lll.dle.local.max_tx_octets != PDU_DC_PAYLOAD_SIZE_MIN ||
2487 conn->lll.dle.local.max_rx_octets != PDU_DC_PAYLOAD_SIZE_MIN) {
2488 conn->lll.dle.update = 1;
2489 }
2490 }
2491 }
2492
ull_conn_default_tx_octets_set(uint16_t tx_octets)2493 void ull_conn_default_tx_octets_set(uint16_t tx_octets)
2494 {
2495 default_tx_octets = tx_octets;
2496 }
2497
ull_conn_default_tx_time_set(uint16_t tx_time)2498 void ull_conn_default_tx_time_set(uint16_t tx_time)
2499 {
2500 default_tx_time = tx_time;
2501 }
2502 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2503
ull_conn_lll_phy_active(struct ll_conn * conn,uint8_t phys)2504 uint8_t ull_conn_lll_phy_active(struct ll_conn *conn, uint8_t phys)
2505 {
2506 #if defined(CONFIG_BT_CTLR_PHY)
2507 if (!(phys & (conn->lll.phy_tx | conn->lll.phy_rx))) {
2508 #else /* !CONFIG_BT_CTLR_PHY */
2509 if (!(phys & 0x01)) {
2510 #endif /* !CONFIG_BT_CTLR_PHY */
2511 return 0;
2512 }
2513 return 1;
2514 }
2515
2516 uint8_t ull_is_lll_tx_queue_empty(struct ll_conn *conn)
2517 {
2518 return (memq_peek(conn->lll.memq_tx.head, conn->lll.memq_tx.tail, NULL) == NULL);
2519 }
2520