1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 #include <zephyr/sys/byteorder.h>
12 
13 #include "hal/cpu.h"
14 #include "hal/ecb.h"
15 #include "hal/ccm.h"
16 #include "hal/ticker.h"
17 
18 #include "util/util.h"
19 #include "util/mem.h"
20 #include "util/memq.h"
21 #include "util/mfifo.h"
22 #include "util/mayfly.h"
23 #include "util/dbuf.h"
24 
25 #include "ticker/ticker.h"
26 
27 #include "pdu_df.h"
28 #include "lll/pdu_vendor.h"
29 #include "pdu.h"
30 
31 #include "lll.h"
32 #include "lll_clock.h"
33 #include "lll/lll_df_types.h"
34 #include "lll_conn.h"
35 #include "lll_conn_iso.h"
36 #include "lll/lll_vendor.h"
37 
38 #include "ll_sw/ull_tx_queue.h"
39 
40 #include "isoal.h"
41 #include "ull_iso_types.h"
42 #include "ull_conn_types.h"
43 #include "ull_conn_iso_types.h"
44 
45 #if defined(CONFIG_BT_CTLR_USER_EXT)
46 #include "ull_vendor.h"
47 #endif /* CONFIG_BT_CTLR_USER_EXT */
48 
49 #include "ull_internal.h"
50 #include "ull_llcp_internal.h"
51 #include "ull_sched_internal.h"
52 #include "ull_chan_internal.h"
53 #include "ull_conn_internal.h"
54 #include "ull_peripheral_internal.h"
55 #include "ull_central_internal.h"
56 
57 #include "ull_iso_internal.h"
58 #include "ull_conn_iso_internal.h"
59 #include "ull_peripheral_iso_internal.h"
60 
61 
62 #include "ll.h"
63 #include "ll_feat.h"
64 #include "ll_settings.h"
65 
66 #include "ll_sw/ull_llcp.h"
67 #include "ll_sw/ull_llcp_features.h"
68 
69 #include "hal/debug.h"
70 
71 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
72 #include <zephyr/logging/log.h>
73 LOG_MODULE_REGISTER(bt_ctlr_ull_conn);
74 
75 static int init_reset(void);
76 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
77 static void tx_demux_sched(struct ll_conn *conn);
78 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
79 static void tx_demux(void *param);
80 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx);
81 
82 static void ticker_update_conn_op_cb(uint32_t status, void *param);
83 static void ticker_stop_conn_op_cb(uint32_t status, void *param);
84 static void ticker_start_conn_op_cb(uint32_t status, void *param);
85 
86 static void conn_setup_adv_scan_disabled_cb(void *param);
87 static inline void disable(uint16_t handle);
88 static void conn_cleanup(struct ll_conn *conn, uint8_t reason);
89 static void conn_cleanup_finalize(struct ll_conn *conn);
90 static void tx_ull_flush(struct ll_conn *conn);
91 static void ticker_stop_op_cb(uint32_t status, void *param);
92 static void conn_disable(void *param);
93 static void disabled_cb(void *param);
94 static void tx_lll_flush(void *param);
95 
96 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
97 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx);
98 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
99 
100 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
101 /* Connection context pointer used as CPR mutex to serialize connection
102  * parameter requests procedures across simultaneous connections so that
103  * offsets exchanged to the peer do not get changed.
104  */
105 struct ll_conn *conn_upd_curr;
106 #endif /* defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) */
107 
108 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
109 static uint8_t force_md_cnt_calc(struct lll_conn *lll_conn, uint32_t tx_rate);
110 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
111 
112 #if !defined(BT_CTLR_USER_TX_BUFFER_OVERHEAD)
113 #define BT_CTLR_USER_TX_BUFFER_OVERHEAD 0
114 #endif /* BT_CTLR_USER_TX_BUFFER_OVERHEAD */
115 
116 #define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
117 				offsetof(struct pdu_data, lldata) + \
118 				(LL_LENGTH_OCTETS_TX_MAX + \
119 				BT_CTLR_USER_TX_BUFFER_OVERHEAD))
120 
121 #define CONN_DATA_BUFFERS CONFIG_BT_BUF_ACL_TX_COUNT
122 
123 static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx), CONN_DATA_BUFFERS);
124 static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx),
125 		    (CONN_DATA_BUFFERS +
126 		     LLCP_TX_CTRL_BUF_COUNT));
127 
128 static struct {
129 	void *free;
130 	uint8_t pool[CONN_TX_BUF_SIZE * CONN_DATA_BUFFERS];
131 } mem_conn_tx;
132 
133 static struct {
134 	void *free;
135 	uint8_t pool[sizeof(memq_link_t) *
136 		     (CONN_DATA_BUFFERS +
137 		      LLCP_TX_CTRL_BUF_COUNT)];
138 } mem_link_tx;
139 
140 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
141 static uint16_t default_tx_octets;
142 static uint16_t default_tx_time;
143 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
144 
145 #if defined(CONFIG_BT_CTLR_PHY)
146 static uint8_t default_phy_tx;
147 static uint8_t default_phy_rx;
148 #endif /* CONFIG_BT_CTLR_PHY */
149 
150 static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN];
151 static void *conn_free;
152 
ll_conn_acquire(void)153 struct ll_conn *ll_conn_acquire(void)
154 {
155 	return mem_acquire(&conn_free);
156 }
157 
ll_conn_release(struct ll_conn * conn)158 void ll_conn_release(struct ll_conn *conn)
159 {
160 	mem_release(conn, &conn_free);
161 }
162 
ll_conn_handle_get(struct ll_conn * conn)163 uint16_t ll_conn_handle_get(struct ll_conn *conn)
164 {
165 	return mem_index_get(conn, conn_pool, sizeof(struct ll_conn));
166 }
167 
ll_conn_get(uint16_t handle)168 struct ll_conn *ll_conn_get(uint16_t handle)
169 {
170 	return mem_get(conn_pool, sizeof(struct ll_conn), handle);
171 }
172 
ll_connected_get(uint16_t handle)173 struct ll_conn *ll_connected_get(uint16_t handle)
174 {
175 	struct ll_conn *conn;
176 
177 	if (handle >= CONFIG_BT_MAX_CONN) {
178 		return NULL;
179 	}
180 
181 	conn = ll_conn_get(handle);
182 	if (conn->lll.handle != handle) {
183 		return NULL;
184 	}
185 
186 	return conn;
187 }
188 
ll_conn_free_count_get(void)189 uint16_t ll_conn_free_count_get(void)
190 {
191 	return mem_free_count_get(conn_free);
192 }
193 
ll_tx_mem_acquire(void)194 void *ll_tx_mem_acquire(void)
195 {
196 	return mem_acquire(&mem_conn_tx.free);
197 }
198 
ll_tx_mem_release(void * tx)199 void ll_tx_mem_release(void *tx)
200 {
201 	mem_release(tx, &mem_conn_tx.free);
202 }
203 
ll_tx_mem_enqueue(uint16_t handle,void * tx)204 int ll_tx_mem_enqueue(uint16_t handle, void *tx)
205 {
206 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
207 #define BT_CTLR_THROUGHPUT_PERIOD 1000000000UL
208 	static uint32_t tx_rate;
209 	static uint32_t tx_cnt;
210 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
211 	struct lll_tx *lll_tx;
212 	struct ll_conn *conn;
213 	uint8_t idx;
214 
215 	conn = ll_connected_get(handle);
216 	if (!conn) {
217 		return -EINVAL;
218 	}
219 
220 	idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx);
221 	if (!lll_tx) {
222 		return -ENOBUFS;
223 	}
224 
225 	lll_tx->handle = handle;
226 	lll_tx->node = tx;
227 
228 	MFIFO_ENQUEUE(conn_tx, idx);
229 
230 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
231 	if (ull_ref_get(&conn->ull)) {
232 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
233 		if (tx_cnt >= CONFIG_BT_BUF_ACL_TX_COUNT) {
234 			uint8_t previous, force_md_cnt;
235 
236 			force_md_cnt = force_md_cnt_calc(&conn->lll, tx_rate);
237 			previous = lll_conn_force_md_cnt_set(force_md_cnt);
238 			if (previous != force_md_cnt) {
239 				LOG_INF("force_md_cnt: old= %u, new= %u.", previous, force_md_cnt);
240 			}
241 		}
242 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
243 
244 		tx_demux_sched(conn);
245 
246 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
247 	} else {
248 		lll_conn_force_md_cnt_set(0U);
249 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
250 	}
251 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
252 
253 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
254 		ull_periph_latency_cancel(conn, handle);
255 	}
256 
257 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
258 	static uint32_t last_cycle_stamp;
259 	static uint32_t tx_len;
260 	struct pdu_data *pdu;
261 	uint32_t cycle_stamp;
262 	uint64_t delta;
263 
264 	cycle_stamp = k_cycle_get_32();
265 	delta = k_cyc_to_ns_floor64(cycle_stamp - last_cycle_stamp);
266 	if (delta > BT_CTLR_THROUGHPUT_PERIOD) {
267 		LOG_INF("incoming Tx: count= %u, len= %u, rate= %u bps.", tx_cnt, tx_len, tx_rate);
268 
269 		last_cycle_stamp = cycle_stamp;
270 		tx_cnt = 0U;
271 		tx_len = 0U;
272 	}
273 
274 	pdu = (void *)((struct node_tx *)tx)->pdu;
275 	tx_len += pdu->len;
276 	if (delta == 0) { /* Let's avoid a division by 0 if we happen to have a really fast HCI IF*/
277 		delta = 1;
278 	}
279 	tx_rate = ((uint64_t)tx_len << 3) * BT_CTLR_THROUGHPUT_PERIOD / delta;
280 	tx_cnt++;
281 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
282 
283 	return 0;
284 }
285 
ll_conn_update(uint16_t handle,uint8_t cmd,uint8_t status,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout,uint16_t * offset)286 uint8_t ll_conn_update(uint16_t handle, uint8_t cmd, uint8_t status, uint16_t interval_min,
287 		    uint16_t interval_max, uint16_t latency, uint16_t timeout, uint16_t *offset)
288 {
289 	struct ll_conn *conn;
290 
291 	conn = ll_connected_get(handle);
292 	if (!conn) {
293 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
294 	}
295 
296 	if (cmd == 0U) {
297 		uint8_t err;
298 
299 		err = ull_cp_conn_update(conn, interval_min, interval_max, latency, timeout,
300 					 offset);
301 		if (err) {
302 			return err;
303 		}
304 
305 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
306 		    conn->lll.role) {
307 			ull_periph_latency_cancel(conn, handle);
308 		}
309 	} else if (cmd == 2U) {
310 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
311 		if (status == 0U) {
312 			ull_cp_conn_param_req_reply(conn);
313 		} else {
314 			ull_cp_conn_param_req_neg_reply(conn, status);
315 		}
316 		return BT_HCI_ERR_SUCCESS;
317 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
318 		/* CPR feature not supported */
319 		return BT_HCI_ERR_CMD_DISALLOWED;
320 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
321 	} else {
322 		return BT_HCI_ERR_UNKNOWN_CMD;
323 	}
324 
325 	return 0;
326 }
327 
ll_chm_get(uint16_t handle,uint8_t * chm)328 uint8_t ll_chm_get(uint16_t handle, uint8_t *chm)
329 {
330 	struct ll_conn *conn;
331 
332 	conn = ll_connected_get(handle);
333 	if (!conn) {
334 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
335 	}
336 
337 	/*
338 	 * Core Spec 5.2 Vol4: 7.8.20:
339 	 * The HCI_LE_Read_Channel_Map command returns the current Channel_Map
340 	 * for the specified Connection_Handle. The returned value indicates the state of
341 	 * the Channel_Map specified by the last transmitted or received Channel_Map
342 	 * (in a CONNECT_IND or LL_CHANNEL_MAP_IND message) for the specified
343 	 * Connection_Handle, regardless of whether the Central has received an
344 	 * acknowledgment
345 	 */
346 	const uint8_t *pending_chm;
347 
348 	pending_chm = ull_cp_chan_map_update_pending(conn);
349 	if (pending_chm) {
350 		memcpy(chm, pending_chm, sizeof(conn->lll.data_chan_map));
351 	} else {
352 		memcpy(chm, conn->lll.data_chan_map, sizeof(conn->lll.data_chan_map));
353 	}
354 
355 	return 0;
356 }
357 
358 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
ll_req_peer_sca(uint16_t handle)359 uint8_t ll_req_peer_sca(uint16_t handle)
360 {
361 	struct ll_conn *conn;
362 
363 	conn = ll_connected_get(handle);
364 	if (!conn) {
365 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
366 	}
367 
368 	return ull_cp_req_peer_sca(conn);
369 }
370 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
371 
is_valid_disconnect_reason(uint8_t reason)372 static bool is_valid_disconnect_reason(uint8_t reason)
373 {
374 	switch (reason) {
375 	case BT_HCI_ERR_AUTH_FAIL:
376 	case BT_HCI_ERR_REMOTE_USER_TERM_CONN:
377 	case BT_HCI_ERR_REMOTE_LOW_RESOURCES:
378 	case BT_HCI_ERR_REMOTE_POWER_OFF:
379 	case BT_HCI_ERR_UNSUPP_REMOTE_FEATURE:
380 	case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
381 	case BT_HCI_ERR_UNACCEPT_CONN_PARAM:
382 		return true;
383 	default:
384 		return false;
385 	}
386 }
387 
ll_terminate_ind_send(uint16_t handle,uint8_t reason)388 uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
389 {
390 	struct ll_conn *conn;
391 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
392 	struct ll_conn_iso_stream *cis;
393 #endif
394 
395 	if (IS_ACL_HANDLE(handle)) {
396 		conn = ll_connected_get(handle);
397 
398 		/* Is conn still connected? */
399 		if (!conn) {
400 			return BT_HCI_ERR_CMD_DISALLOWED;
401 		}
402 
403 		if (!is_valid_disconnect_reason(reason)) {
404 			return BT_HCI_ERR_INVALID_PARAM;
405 		}
406 
407 		uint8_t err;
408 
409 		err = ull_cp_terminate(conn, reason);
410 		if (err) {
411 			return err;
412 		}
413 
414 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
415 			ull_periph_latency_cancel(conn, handle);
416 		}
417 		return 0;
418 	}
419 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
420 	if (IS_CIS_HANDLE(handle)) {
421 		cis = ll_iso_stream_connected_get(handle);
422 		if (!cis) {
423 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
424 			/* CIS is not connected - get the unconnected instance */
425 			cis = ll_conn_iso_stream_get(handle);
426 
427 			/* Sanity-check instance to make sure it's created but not connected */
428 			if (cis->group && cis->lll.handle == handle && !cis->established) {
429 				if (cis->group->state == CIG_STATE_CONFIGURABLE) {
430 					/* Disallow if CIG is still in configurable state */
431 					return BT_HCI_ERR_CMD_DISALLOWED;
432 
433 				} else if (cis->group->state == CIG_STATE_INITIATING) {
434 					conn = ll_connected_get(cis->lll.acl_handle);
435 
436 					/* CIS is not yet established - try to cancel procedure */
437 					if (ull_cp_cc_cancel(conn)) {
438 						/* Successfully canceled - complete disconnect */
439 						struct node_rx_pdu *node_terminate;
440 
441 						node_terminate = ull_pdu_rx_alloc();
442 						LL_ASSERT(node_terminate);
443 
444 						node_terminate->hdr.handle = handle;
445 						node_terminate->hdr.type = NODE_RX_TYPE_TERMINATE;
446 						*((uint8_t *)node_terminate->pdu) =
447 							BT_HCI_ERR_LOCALHOST_TERM_CONN;
448 
449 						ll_rx_put_sched(node_terminate->hdr.link,
450 							node_terminate);
451 
452 						/* We're no longer initiating a connection */
453 						cis->group->state = CIG_STATE_CONFIGURABLE;
454 
455 						/* This is now a successful disconnection */
456 						return BT_HCI_ERR_SUCCESS;
457 					}
458 
459 					/* Procedure could not be canceled in the current
460 					 * state - let it run its course and enqueue a
461 					 * terminate procedure.
462 					 */
463 					return ull_cp_cis_terminate(conn, cis, reason);
464 				}
465 			}
466 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
467 			/* Disallow if CIS is not connected */
468 			return BT_HCI_ERR_CMD_DISALLOWED;
469 		}
470 
471 		conn = ll_connected_get(cis->lll.acl_handle);
472 		/* Disallow if ACL has disconnected */
473 		if (!conn) {
474 			return BT_HCI_ERR_CMD_DISALLOWED;
475 		}
476 
477 		return ull_cp_cis_terminate(conn, cis, reason);
478 	}
479 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
480 
481 	return BT_HCI_ERR_UNKNOWN_CONN_ID;
482 }
483 
484 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
ll_feature_req_send(uint16_t handle)485 uint8_t ll_feature_req_send(uint16_t handle)
486 {
487 	struct ll_conn *conn;
488 
489 	conn = ll_connected_get(handle);
490 	if (!conn) {
491 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
492 	}
493 
494 	uint8_t err;
495 
496 	err = ull_cp_feature_exchange(conn, 1U);
497 	if (err) {
498 		return err;
499 	}
500 
501 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
502 	    IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) &&
503 	    conn->lll.role) {
504 		ull_periph_latency_cancel(conn, handle);
505 	}
506 
507 	return 0;
508 }
509 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
510 
ll_version_ind_send(uint16_t handle)511 uint8_t ll_version_ind_send(uint16_t handle)
512 {
513 	struct ll_conn *conn;
514 
515 	conn = ll_connected_get(handle);
516 	if (!conn) {
517 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
518 	}
519 
520 	uint8_t err;
521 
522 	err = ull_cp_version_exchange(conn);
523 	if (err) {
524 		return err;
525 	}
526 
527 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
528 		ull_periph_latency_cancel(conn, handle);
529 	}
530 
531 	return 0;
532 }
533 
534 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ll_len_validate(uint16_t tx_octets,uint16_t tx_time)535 static bool ll_len_validate(uint16_t tx_octets, uint16_t tx_time)
536 {
537 	/* validate if within HCI allowed range */
538 	if (!IN_RANGE(tx_octets, PDU_DC_PAYLOAD_SIZE_MIN,
539 		      PDU_DC_PAYLOAD_SIZE_MAX)) {
540 		return false;
541 	}
542 
543 	/* validate if within HCI allowed range */
544 	if (!IN_RANGE(tx_time, PDU_DC_PAYLOAD_TIME_MIN,
545 		      PDU_DC_PAYLOAD_TIME_MAX_CODED)) {
546 		return false;
547 	}
548 
549 	return true;
550 }
551 
ll_length_req_send(uint16_t handle,uint16_t tx_octets,uint16_t tx_time)552 uint32_t ll_length_req_send(uint16_t handle, uint16_t tx_octets,
553 			    uint16_t tx_time)
554 {
555 	struct ll_conn *conn;
556 
557 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
558 	    !ll_len_validate(tx_octets, tx_time)) {
559 		return BT_HCI_ERR_INVALID_PARAM;
560 	}
561 
562 	conn = ll_connected_get(handle);
563 	if (!conn) {
564 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
565 	}
566 
567 	if (!feature_dle(conn)) {
568 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
569 	}
570 
571 	uint8_t err;
572 
573 	err = ull_cp_data_length_update(conn, tx_octets, tx_time);
574 	if (err) {
575 		return err;
576 	}
577 
578 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
579 		ull_periph_latency_cancel(conn, handle);
580 	}
581 
582 	return 0;
583 }
584 
ll_length_default_get(uint16_t * max_tx_octets,uint16_t * max_tx_time)585 void ll_length_default_get(uint16_t *max_tx_octets, uint16_t *max_tx_time)
586 {
587 	*max_tx_octets = default_tx_octets;
588 	*max_tx_time = default_tx_time;
589 }
590 
ll_length_default_set(uint16_t max_tx_octets,uint16_t max_tx_time)591 uint32_t ll_length_default_set(uint16_t max_tx_octets, uint16_t max_tx_time)
592 {
593 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
594 	    !ll_len_validate(max_tx_octets, max_tx_time)) {
595 		return BT_HCI_ERR_INVALID_PARAM;
596 	}
597 
598 	default_tx_octets = max_tx_octets;
599 	default_tx_time = max_tx_time;
600 
601 	return 0;
602 }
603 
ll_length_max_get(uint16_t * max_tx_octets,uint16_t * max_tx_time,uint16_t * max_rx_octets,uint16_t * max_rx_time)604 void ll_length_max_get(uint16_t *max_tx_octets, uint16_t *max_tx_time,
605 		       uint16_t *max_rx_octets, uint16_t *max_rx_time)
606 {
607 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_PHY_CODED)
608 #define PHY (PHY_CODED)
609 #else /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
610 #define PHY (PHY_1M)
611 #endif /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
612 	*max_tx_octets = LL_LENGTH_OCTETS_RX_MAX;
613 	*max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
614 	*max_tx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
615 	*max_rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
616 #undef PHY
617 }
618 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
619 
620 #if defined(CONFIG_BT_CTLR_PHY)
ll_phy_get(uint16_t handle,uint8_t * tx,uint8_t * rx)621 uint8_t ll_phy_get(uint16_t handle, uint8_t *tx, uint8_t *rx)
622 {
623 	struct ll_conn *conn;
624 
625 	conn = ll_connected_get(handle);
626 	if (!conn) {
627 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
628 	}
629 
630 	/* TODO: context safe read */
631 	*tx = conn->lll.phy_tx;
632 	*rx = conn->lll.phy_rx;
633 
634 	return 0;
635 }
636 
ll_phy_default_set(uint8_t tx,uint8_t rx)637 uint8_t ll_phy_default_set(uint8_t tx, uint8_t rx)
638 {
639 	/* TODO: validate against supported phy */
640 
641 	default_phy_tx = tx;
642 	default_phy_rx = rx;
643 
644 	return 0;
645 }
646 
ll_phy_req_send(uint16_t handle,uint8_t tx,uint8_t flags,uint8_t rx)647 uint8_t ll_phy_req_send(uint16_t handle, uint8_t tx, uint8_t flags, uint8_t rx)
648 {
649 	struct ll_conn *conn;
650 
651 	conn = ll_connected_get(handle);
652 	if (!conn) {
653 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
654 	}
655 
656 	if (!feature_phy_2m(conn) && !feature_phy_coded(conn)) {
657 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
658 	}
659 
660 	uint8_t err;
661 
662 	err = ull_cp_phy_update(conn, tx, flags, rx, 1U);
663 	if (err) {
664 		return err;
665 	}
666 
667 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
668 		ull_periph_latency_cancel(conn, handle);
669 	}
670 
671 	return 0;
672 }
673 #endif /* CONFIG_BT_CTLR_PHY */
674 
675 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
ll_rssi_get(uint16_t handle,uint8_t * rssi)676 uint8_t ll_rssi_get(uint16_t handle, uint8_t *rssi)
677 {
678 	struct ll_conn *conn;
679 
680 	conn = ll_connected_get(handle);
681 	if (!conn) {
682 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
683 	}
684 
685 	*rssi = conn->lll.rssi_latest;
686 
687 	return 0;
688 }
689 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
690 
691 #if defined(CONFIG_BT_CTLR_LE_PING)
ll_apto_get(uint16_t handle,uint16_t * apto)692 uint8_t ll_apto_get(uint16_t handle, uint16_t *apto)
693 {
694 	struct ll_conn *conn;
695 
696 	conn = ll_connected_get(handle);
697 	if (!conn) {
698 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
699 	}
700 
701 	*apto = conn->apto_reload * conn->lll.interval * 125U / 1000;
702 
703 	return 0;
704 }
705 
ll_apto_set(uint16_t handle,uint16_t apto)706 uint8_t ll_apto_set(uint16_t handle, uint16_t apto)
707 {
708 	struct ll_conn *conn;
709 
710 	conn = ll_connected_get(handle);
711 	if (!conn) {
712 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
713 	}
714 
715 	conn->apto_reload = RADIO_CONN_EVENTS(apto * 10U * 1000U,
716 					      conn->lll.interval *
717 					      CONN_INT_UNIT_US);
718 
719 	return 0;
720 }
721 #endif /* CONFIG_BT_CTLR_LE_PING */
722 
ull_conn_init(void)723 int ull_conn_init(void)
724 {
725 	int err;
726 
727 	err = init_reset();
728 	if (err) {
729 		return err;
730 	}
731 
732 	return 0;
733 }
734 
ull_conn_reset(void)735 int ull_conn_reset(void)
736 {
737 	uint16_t handle;
738 	int err;
739 
740 #if defined(CONFIG_BT_CENTRAL)
741 	/* Reset initiator */
742 	(void)ull_central_reset();
743 #endif /* CONFIG_BT_CENTRAL */
744 
745 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
746 		disable(handle);
747 	}
748 
749 	/* Re-initialize the Tx mfifo */
750 	MFIFO_INIT(conn_tx);
751 
752 	/* Re-initialize the Tx Ack mfifo */
753 	MFIFO_INIT(conn_ack);
754 
755 	err = init_reset();
756 	if (err) {
757 		return err;
758 	}
759 
760 	return 0;
761 }
762 
ull_conn_lll_get(uint16_t handle)763 struct lll_conn *ull_conn_lll_get(uint16_t handle)
764 {
765 	struct ll_conn *conn;
766 
767 	conn = ll_conn_get(handle);
768 
769 	return &conn->lll;
770 }
771 
772 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_conn_default_tx_octets_get(void)773 uint16_t ull_conn_default_tx_octets_get(void)
774 {
775 	return default_tx_octets;
776 }
777 
778 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_tx_time_get(void)779 uint16_t ull_conn_default_tx_time_get(void)
780 {
781 	return default_tx_time;
782 }
783 #endif /* CONFIG_BT_CTLR_PHY */
784 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
785 
786 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_phy_tx_get(void)787 uint8_t ull_conn_default_phy_tx_get(void)
788 {
789 	return default_phy_tx;
790 }
791 
ull_conn_default_phy_rx_get(void)792 uint8_t ull_conn_default_phy_rx_get(void)
793 {
794 	return default_phy_rx;
795 }
796 #endif /* CONFIG_BT_CTLR_PHY */
797 
798 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
ull_conn_peer_connected(uint8_t const own_id_addr_type,uint8_t const * const own_id_addr,uint8_t const peer_id_addr_type,uint8_t const * const peer_id_addr)799 bool ull_conn_peer_connected(uint8_t const own_id_addr_type,
800 			     uint8_t const *const own_id_addr,
801 			     uint8_t const peer_id_addr_type,
802 			     uint8_t const *const peer_id_addr)
803 {
804 	uint16_t handle;
805 
806 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
807 		struct ll_conn *conn = ll_connected_get(handle);
808 
809 		if (conn &&
810 		    conn->peer_id_addr_type == peer_id_addr_type &&
811 		    !memcmp(conn->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
812 		    conn->own_id_addr_type == own_id_addr_type &&
813 		    !memcmp(conn->own_id_addr, own_id_addr, BDADDR_SIZE)) {
814 			return true;
815 		}
816 	}
817 
818 	return false;
819 }
820 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
821 
ull_conn_setup(memq_link_t * rx_link,struct node_rx_pdu * rx)822 void ull_conn_setup(memq_link_t *rx_link, struct node_rx_pdu *rx)
823 {
824 	struct node_rx_ftr *ftr;
825 	struct ull_hdr *hdr;
826 
827 	/* Store the link in the node rx so that when done event is
828 	 * processed it can be used to enqueue node rx towards LL context
829 	 */
830 	rx->hdr.link = rx_link;
831 
832 	/* NOTE: LLL conn context SHALL be after lll_hdr in
833 	 *       struct lll_adv and struct lll_scan.
834 	 */
835 	ftr = &(rx->rx_ftr);
836 
837 	/* Check for reference count and decide to setup connection
838 	 * here or when done event arrives.
839 	 */
840 	hdr = HDR_LLL2ULL(ftr->param);
841 	if (ull_ref_get(hdr)) {
842 		/* Setup connection in ULL disabled callback,
843 		 * pass the node rx as disabled callback parameter.
844 		 */
845 		LL_ASSERT(!hdr->disabled_cb);
846 		hdr->disabled_param = rx;
847 		hdr->disabled_cb = conn_setup_adv_scan_disabled_cb;
848 	} else {
849 		conn_setup_adv_scan_disabled_cb(rx);
850 	}
851 }
852 
ull_conn_rx(memq_link_t * link,struct node_rx_pdu ** rx)853 void ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
854 {
855 	struct pdu_data *pdu_rx;
856 	struct ll_conn *conn;
857 
858 	conn = ll_connected_get((*rx)->hdr.handle);
859 	if (!conn) {
860 		/* Mark for buffer for release */
861 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
862 
863 		return;
864 	}
865 
866 	ull_cp_tx_ntf(conn);
867 
868 	pdu_rx = (void *)(*rx)->pdu;
869 
870 	switch (pdu_rx->ll_id) {
871 	case PDU_DATA_LLID_CTRL:
872 	{
873 		/* Mark buffer for release */
874 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
875 
876 		ull_cp_rx(conn, link, *rx);
877 
878 		return;
879 	}
880 
881 	case PDU_DATA_LLID_DATA_CONTINUE:
882 	case PDU_DATA_LLID_DATA_START:
883 #if defined(CONFIG_BT_CTLR_LE_ENC)
884 		if (conn->pause_rx_data) {
885 			conn->llcp_terminate.reason_final =
886 				BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
887 
888 			/* Mark for buffer for release */
889 			(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
890 		}
891 #endif /* CONFIG_BT_CTLR_LE_ENC */
892 		break;
893 
894 	case PDU_DATA_LLID_RESV:
895 	default:
896 #if defined(CONFIG_BT_CTLR_LE_ENC)
897 		if (conn->pause_rx_data) {
898 			conn->llcp_terminate.reason_final =
899 				BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
900 		}
901 #endif /* CONFIG_BT_CTLR_LE_ENC */
902 
903 		/* Invalid LL id, drop it. */
904 
905 		/* Mark for buffer for release */
906 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
907 
908 		break;
909 	}
910 }
911 
ull_conn_llcp(struct ll_conn * conn,uint32_t ticks_at_expire,uint32_t remainder,uint16_t lazy)912 int ull_conn_llcp(struct ll_conn *conn, uint32_t ticks_at_expire,
913 		  uint32_t remainder, uint16_t lazy)
914 {
915 	LL_ASSERT(conn->lll.handle != LLL_HANDLE_INVALID);
916 
917 	conn->llcp.prep.ticks_at_expire = ticks_at_expire;
918 	conn->llcp.prep.remainder = remainder;
919 	conn->llcp.prep.lazy = lazy;
920 
921 	ull_cp_run(conn);
922 
923 	if (conn->cancel_prepare) {
924 		/* Reset signal */
925 		conn->cancel_prepare = 0U;
926 
927 		/* Cancel prepare */
928 		return -ECANCELED;
929 	}
930 
931 	/* Continue prepare */
932 	return 0;
933 }
934 
ull_conn_done(struct node_rx_event_done * done)935 void ull_conn_done(struct node_rx_event_done *done)
936 {
937 	uint32_t ticks_drift_minus;
938 	uint32_t ticks_drift_plus;
939 	uint32_t ticks_slot_minus;
940 	uint32_t ticks_slot_plus;
941 	uint16_t latency_event;
942 	uint16_t elapsed_event;
943 	struct lll_conn *lll;
944 	struct ll_conn *conn;
945 	uint8_t reason_final;
946 	uint16_t lazy;
947 	uint8_t force;
948 
949 	/* Get reference to ULL context */
950 	conn = CONTAINER_OF(done->param, struct ll_conn, ull);
951 	lll = &conn->lll;
952 
953 	/* Skip if connection terminated by local host */
954 	if (unlikely(lll->handle == LLL_HANDLE_INVALID)) {
955 		return;
956 	}
957 
958 	ull_cp_tx_ntf(conn);
959 
960 #if defined(CONFIG_BT_CTLR_LE_ENC)
961 	/* Check authenticated payload expiry or MIC failure */
962 	switch (done->extra.mic_state) {
963 	case LLL_CONN_MIC_NONE:
964 #if defined(CONFIG_BT_CTLR_LE_PING)
965 		if (lll->enc_rx && lll->enc_tx) {
966 			uint16_t appto_reload_new;
967 
968 			/* check for change in apto */
969 			appto_reload_new = (conn->apto_reload >
970 					    (lll->latency + 6)) ?
971 					   (conn->apto_reload -
972 					    (lll->latency + 6)) :
973 					   conn->apto_reload;
974 			if (conn->appto_reload != appto_reload_new) {
975 				conn->appto_reload = appto_reload_new;
976 				conn->apto_expire = 0U;
977 			}
978 
979 			/* start authenticated payload (pre) timeout */
980 			if (conn->apto_expire == 0U) {
981 				conn->appto_expire = conn->appto_reload;
982 				conn->apto_expire = conn->apto_reload;
983 			}
984 		}
985 #endif /* CONFIG_BT_CTLR_LE_PING */
986 		break;
987 
988 	case LLL_CONN_MIC_PASS:
989 #if defined(CONFIG_BT_CTLR_LE_PING)
990 		conn->appto_expire = conn->apto_expire = 0U;
991 #endif /* CONFIG_BT_CTLR_LE_PING */
992 		break;
993 
994 	case LLL_CONN_MIC_FAIL:
995 		conn->llcp_terminate.reason_final =
996 			BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
997 		break;
998 	}
999 #endif /* CONFIG_BT_CTLR_LE_ENC */
1000 
1001 	reason_final = conn->llcp_terminate.reason_final;
1002 	if (reason_final) {
1003 		conn_cleanup(conn, reason_final);
1004 
1005 		return;
1006 	}
1007 
1008 	/* Events elapsed used in timeout checks below */
1009 #if defined(CONFIG_BT_CTLR_CONN_META)
1010 	/* If event has shallow expiry do not add latency, but rely on
1011 	 * accumulated lazy count.
1012 	 */
1013 	latency_event = conn->common.is_must_expire ? 0 : lll->latency_event;
1014 #else
1015 	latency_event = lll->latency_event;
1016 #endif
1017 	if (lll->latency_prepare) {
1018 		elapsed_event = latency_event + lll->latency_prepare;
1019 	} else {
1020 		elapsed_event = latency_event + 1U;
1021 	}
1022 
1023 	/* Peripheral drift compensation calc and new latency or
1024 	 * central terminate acked
1025 	 */
1026 	ticks_drift_plus = 0U;
1027 	ticks_drift_minus = 0U;
1028 	ticks_slot_plus = 0U;
1029 	ticks_slot_minus = 0U;
1030 
1031 	if (done->extra.trx_cnt) {
1032 		if (0) {
1033 #if defined(CONFIG_BT_PERIPHERAL)
1034 		} else if (lll->role) {
1035 			ull_drift_ticks_get(done, &ticks_drift_plus,
1036 					    &ticks_drift_minus);
1037 
1038 			if (!ull_tx_q_peek(&conn->tx_q)) {
1039 				ull_conn_tx_demux(UINT8_MAX);
1040 			}
1041 
1042 			if (ull_tx_q_peek(&conn->tx_q) ||
1043 			    memq_peek(lll->memq_tx.head,
1044 				      lll->memq_tx.tail, NULL)) {
1045 				lll->latency_event = 0U;
1046 			} else if (lll->periph.latency_enabled) {
1047 				lll->latency_event = lll->latency;
1048 			}
1049 #endif /* CONFIG_BT_PERIPHERAL */
1050 		}
1051 
1052 		/* Reset connection failed to establish countdown */
1053 		conn->connect_expire = 0U;
1054 	}
1055 
1056 	/* Reset supervision countdown */
1057 	if (done->extra.crc_valid) {
1058 		conn->supervision_expire = 0U;
1059 	}
1060 
1061 	/* check connection failed to establish */
1062 	else if (conn->connect_expire) {
1063 		if (conn->connect_expire > elapsed_event) {
1064 			conn->connect_expire -= elapsed_event;
1065 		} else {
1066 			conn_cleanup(conn, BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
1067 
1068 			return;
1069 		}
1070 	}
1071 
1072 	/* if anchor point not sync-ed, start supervision timeout, and break
1073 	 * latency if any.
1074 	 */
1075 	else {
1076 		/* Start supervision timeout, if not started already */
1077 		if (!conn->supervision_expire) {
1078 			const uint32_t conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
1079 
1080 			conn->supervision_expire = RADIO_CONN_EVENTS(
1081 				(conn->supervision_timeout * 10U * 1000U),
1082 				conn_interval_us);
1083 		}
1084 	}
1085 
1086 	/* check supervision timeout */
1087 	force = 0U;
1088 	if (conn->supervision_expire) {
1089 		if (conn->supervision_expire > elapsed_event) {
1090 			conn->supervision_expire -= elapsed_event;
1091 
1092 			/* break latency */
1093 			lll->latency_event = 0U;
1094 
1095 			/* Force both central and peripheral when close to
1096 			 * supervision timeout.
1097 			 */
1098 			if (conn->supervision_expire <= 6U) {
1099 				force = 1U;
1100 			}
1101 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
1102 			/* use randomness to force peripheral role when anchor
1103 			 * points are being missed.
1104 			 */
1105 			else if (lll->role) {
1106 				if (latency_event) {
1107 					force = 1U;
1108 				} else {
1109 					force = conn->periph.force & 0x01;
1110 
1111 					/* rotate force bits */
1112 					conn->periph.force >>= 1U;
1113 					if (force) {
1114 						conn->periph.force |= BIT(31);
1115 					}
1116 				}
1117 			}
1118 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
1119 		} else {
1120 			conn_cleanup(conn, BT_HCI_ERR_CONN_TIMEOUT);
1121 
1122 			return;
1123 		}
1124 	}
1125 
1126 	/* check procedure timeout */
1127 	uint8_t error_code;
1128 
1129 	if (-ETIMEDOUT == ull_cp_prt_elapse(conn, elapsed_event, &error_code)) {
1130 		conn_cleanup(conn, error_code);
1131 
1132 		return;
1133 	}
1134 
1135 #if defined(CONFIG_BT_CTLR_LE_PING)
1136 	/* check apto */
1137 	if (conn->apto_expire != 0U) {
1138 		if (conn->apto_expire > elapsed_event) {
1139 			conn->apto_expire -= elapsed_event;
1140 		} else {
1141 			struct node_rx_hdr *rx;
1142 
1143 			rx = ll_pdu_rx_alloc();
1144 			if (rx) {
1145 				conn->apto_expire = 0U;
1146 
1147 				rx->handle = lll->handle;
1148 				rx->type = NODE_RX_TYPE_APTO;
1149 
1150 				/* enqueue apto event into rx queue */
1151 				ll_rx_put_sched(rx->link, rx);
1152 			} else {
1153 				conn->apto_expire = 1U;
1154 			}
1155 		}
1156 	}
1157 
1158 	/* check appto */
1159 	if (conn->appto_expire != 0U) {
1160 		if (conn->appto_expire > elapsed_event) {
1161 			conn->appto_expire -= elapsed_event;
1162 		} else {
1163 			conn->appto_expire = 0U;
1164 
1165 			/* Initiate LE_PING procedure */
1166 			ull_cp_le_ping(conn);
1167 		}
1168 	}
1169 #endif /* CONFIG_BT_CTLR_LE_PING */
1170 
1171 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1172 	/* Check if the CTE_REQ procedure is periodic and counter has been started.
1173 	 * req_expire is set when: new CTE_REQ is started, after completion of last periodic run.
1174 	 */
1175 	if (conn->llcp.cte_req.req_interval != 0U && conn->llcp.cte_req.req_expire != 0U) {
1176 		if (conn->llcp.cte_req.req_expire > elapsed_event) {
1177 			conn->llcp.cte_req.req_expire -= elapsed_event;
1178 		} else {
1179 			uint8_t err;
1180 
1181 			/* Set req_expire to zero to mark that new periodic CTE_REQ was started.
1182 			 * The counter is re-started after completion of this run.
1183 			 */
1184 			conn->llcp.cte_req.req_expire = 0U;
1185 
1186 			err = ull_cp_cte_req(conn, conn->llcp.cte_req.min_cte_len,
1187 					     conn->llcp.cte_req.cte_type);
1188 
1189 			if (err == BT_HCI_ERR_CMD_DISALLOWED) {
1190 				/* Conditions has changed e.g. PHY was changed to CODED.
1191 				 * New CTE REQ is not possible. Disable the periodic requests.
1192 				 */
1193 				ull_cp_cte_req_set_disable(conn);
1194 			}
1195 		}
1196 	}
1197 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1198 
1199 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1200 	/* generate RSSI event */
1201 	if (lll->rssi_sample_count == 0U) {
1202 		struct node_rx_pdu *rx;
1203 		struct pdu_data *pdu_data_rx;
1204 
1205 		rx = ll_pdu_rx_alloc();
1206 		if (rx) {
1207 			lll->rssi_reported = lll->rssi_latest;
1208 			lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT;
1209 
1210 			/* Prepare the rx packet structure */
1211 			rx->hdr.handle = lll->handle;
1212 			rx->hdr.type = NODE_RX_TYPE_RSSI;
1213 
1214 			/* prepare connection RSSI structure */
1215 			pdu_data_rx = (void *)rx->pdu;
1216 			pdu_data_rx->rssi = lll->rssi_reported;
1217 
1218 			/* enqueue connection RSSI structure into queue */
1219 			ll_rx_put_sched(rx->hdr.link, rx);
1220 		}
1221 	}
1222 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1223 
1224 	/* check if latency needs update */
1225 	lazy = 0U;
1226 	if ((force) || (latency_event != lll->latency_event)) {
1227 		lazy = lll->latency_event + 1U;
1228 	}
1229 
1230 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
1231 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) || defined(CONFIG_BT_CTLR_PHY)
1232 	if (lll->evt_len_upd) {
1233 		uint32_t ready_delay, rx_time, tx_time, ticks_slot, slot_us;
1234 
1235 		lll->evt_len_upd = 0;
1236 #if defined(CONFIG_BT_CTLR_PHY)
1237 		ready_delay = (lll->role) ?
1238 			lll_radio_rx_ready_delay_get(lll->phy_rx, PHY_FLAGS_S8) :
1239 			lll_radio_tx_ready_delay_get(lll->phy_tx, lll->phy_flags);
1240 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1241 		tx_time = lll->dle.eff.max_tx_time;
1242 		rx_time = lll->dle.eff.max_rx_time;
1243 #else /* CONFIG_BT_CTLR_DATA_LENGTH */
1244 
1245 		tx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1246 			      PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
1247 		rx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1248 			      PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
1249 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1250 #else /* CONFIG_BT_CTLR_PHY */
1251 		ready_delay = (lll->role) ?
1252 			lll_radio_rx_ready_delay_get(0, 0) :
1253 			lll_radio_tx_ready_delay_get(0, 0);
1254 		tx_time = PDU_DC_MAX_US(lll->dle.eff.max_tx_octets, 0);
1255 		rx_time = PDU_DC_MAX_US(lll->dle.eff.max_rx_octets, 0);
1256 #endif /* CONFIG_BT_CTLR_PHY */
1257 
1258 		/* Calculate event time reservation */
1259 		slot_us = tx_time + rx_time;
1260 		slot_us += EVENT_IFS_US + (EVENT_CLOCK_JITTER_US << 1);
1261 		slot_us += ready_delay;
1262 
1263 		if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX) ||
1264 		    !conn->lll.role) {
1265 			slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1266 		}
1267 
1268 		ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1269 		if (ticks_slot > conn->ull.ticks_slot) {
1270 			ticks_slot_plus = ticks_slot - conn->ull.ticks_slot;
1271 		} else {
1272 			ticks_slot_minus = conn->ull.ticks_slot - ticks_slot;
1273 		}
1274 		conn->ull.ticks_slot = ticks_slot;
1275 	}
1276 #endif /* CONFIG_BT_CTLR_DATA_LENGTH || CONFIG_BT_CTLR_PHY */
1277 #else /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1278 	ticks_slot_plus = 0;
1279 	ticks_slot_minus = 0;
1280 #endif /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1281 
1282 	/* update conn ticker */
1283 	if (ticks_drift_plus || ticks_drift_minus ||
1284 	    ticks_slot_plus || ticks_slot_minus ||
1285 	    lazy || force) {
1286 		uint8_t ticker_id = TICKER_ID_CONN_BASE + lll->handle;
1287 		struct ll_conn *conn_ll = lll->hdr.parent;
1288 		uint32_t ticker_status;
1289 
1290 		/* Call to ticker_update can fail under the race
1291 		 * condition where in the peripheral role is being stopped but
1292 		 * at the same time it is preempted by peripheral event that
1293 		 * gets into close state. Accept failure when peripheral role
1294 		 * is being stopped.
1295 		 */
1296 		ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
1297 					      TICKER_USER_ID_ULL_HIGH,
1298 					      ticker_id,
1299 					      ticks_drift_plus, ticks_drift_minus,
1300 					      ticks_slot_plus, ticks_slot_minus,
1301 					      lazy, force,
1302 					      ticker_update_conn_op_cb,
1303 					      conn_ll);
1304 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1305 			  (ticker_status == TICKER_STATUS_BUSY) ||
1306 			  ((void *)conn_ll == ull_disable_mark_get()));
1307 	}
1308 }
1309 
1310 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
ull_conn_lll_tx_demux_sched(struct lll_conn * lll)1311 void ull_conn_lll_tx_demux_sched(struct lll_conn *lll)
1312 {
1313 	static memq_link_t link;
1314 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1315 
1316 	mfy.param = HDR_LLL2ULL(lll);
1317 
1318 	mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1U, &mfy);
1319 }
1320 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
1321 
ull_conn_tx_demux(uint8_t count)1322 void ull_conn_tx_demux(uint8_t count)
1323 {
1324 	do {
1325 		struct lll_tx *lll_tx;
1326 		struct ll_conn *conn;
1327 
1328 		lll_tx = MFIFO_DEQUEUE_GET(conn_tx);
1329 		if (!lll_tx) {
1330 			break;
1331 		}
1332 
1333 		conn = ll_connected_get(lll_tx->handle);
1334 		if (conn) {
1335 			struct node_tx *tx = lll_tx->node;
1336 
1337 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1338 			if (empty_data_start_release(conn, tx)) {
1339 				goto ull_conn_tx_demux_release;
1340 			}
1341 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1342 
1343 			ull_tx_q_enqueue_data(&conn->tx_q, tx);
1344 		} else {
1345 			struct node_tx *tx = lll_tx->node;
1346 			struct pdu_data *p = (void *)tx->pdu;
1347 
1348 			p->ll_id = PDU_DATA_LLID_RESV;
1349 			ll_tx_ack_put(LLL_HANDLE_INVALID, tx);
1350 		}
1351 
1352 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1353 ull_conn_tx_demux_release:
1354 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1355 
1356 		MFIFO_DEQUEUE(conn_tx);
1357 	} while (--count);
1358 }
1359 
ull_conn_tx_lll_enqueue(struct ll_conn * conn,uint8_t count)1360 void ull_conn_tx_lll_enqueue(struct ll_conn *conn, uint8_t count)
1361 {
1362 	while (count--) {
1363 		struct node_tx *tx;
1364 		memq_link_t *link;
1365 
1366 		tx = tx_ull_dequeue(conn, NULL);
1367 		if (!tx) {
1368 			/* No more tx nodes available */
1369 			break;
1370 		}
1371 
1372 		link = mem_acquire(&mem_link_tx.free);
1373 		LL_ASSERT(link);
1374 
1375 		/* Enqueue towards LLL */
1376 		memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1377 	}
1378 }
1379 
ull_conn_link_tx_release(void * link)1380 void ull_conn_link_tx_release(void *link)
1381 {
1382 	mem_release(link, &mem_link_tx.free);
1383 }
1384 
ull_conn_ack_last_idx_get(void)1385 uint8_t ull_conn_ack_last_idx_get(void)
1386 {
1387 	return mfifo_fifo_conn_ack.l;
1388 }
1389 
ull_conn_ack_peek(uint8_t * ack_last,uint16_t * handle,struct node_tx ** tx)1390 memq_link_t *ull_conn_ack_peek(uint8_t *ack_last, uint16_t *handle,
1391 			       struct node_tx **tx)
1392 {
1393 	struct lll_tx *lll_tx;
1394 
1395 	lll_tx = MFIFO_DEQUEUE_GET(conn_ack);
1396 	if (!lll_tx) {
1397 		return NULL;
1398 	}
1399 
1400 	*ack_last = mfifo_fifo_conn_ack.l;
1401 
1402 	*handle = lll_tx->handle;
1403 	*tx = lll_tx->node;
1404 
1405 	return (*tx)->link;
1406 }
1407 
ull_conn_ack_by_last_peek(uint8_t last,uint16_t * handle,struct node_tx ** tx)1408 memq_link_t *ull_conn_ack_by_last_peek(uint8_t last, uint16_t *handle,
1409 				       struct node_tx **tx)
1410 {
1411 	struct lll_tx *lll_tx;
1412 
1413 	lll_tx = mfifo_dequeue_get(mfifo_fifo_conn_ack.m, mfifo_conn_ack.s,
1414 				   mfifo_fifo_conn_ack.f, last);
1415 	if (!lll_tx) {
1416 		return NULL;
1417 	}
1418 
1419 	*handle = lll_tx->handle;
1420 	*tx = lll_tx->node;
1421 
1422 	return (*tx)->link;
1423 }
1424 
ull_conn_ack_dequeue(void)1425 void *ull_conn_ack_dequeue(void)
1426 {
1427 	return MFIFO_DEQUEUE(conn_ack);
1428 }
1429 
ull_conn_lll_ack_enqueue(uint16_t handle,struct node_tx * tx)1430 void ull_conn_lll_ack_enqueue(uint16_t handle, struct node_tx *tx)
1431 {
1432 	struct lll_tx *lll_tx;
1433 	uint8_t idx;
1434 
1435 	idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&lll_tx);
1436 	LL_ASSERT(lll_tx);
1437 
1438 	lll_tx->handle = handle;
1439 	lll_tx->node = tx;
1440 
1441 	MFIFO_ENQUEUE(conn_ack, idx);
1442 }
1443 
ull_conn_tx_ack(uint16_t handle,memq_link_t * link,struct node_tx * tx)1444 void ull_conn_tx_ack(uint16_t handle, memq_link_t *link, struct node_tx *tx)
1445 {
1446 	struct pdu_data *pdu_tx;
1447 
1448 	pdu_tx = (void *)tx->pdu;
1449 	LL_ASSERT(pdu_tx->len);
1450 
1451 	if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1452 		if (handle != LLL_HANDLE_INVALID) {
1453 			struct ll_conn *conn = ll_conn_get(handle);
1454 
1455 			ull_cp_tx_ack(conn, tx);
1456 		}
1457 
1458 		/* release ctrl mem if points to itself */
1459 		if (link->next == (void *)tx) {
1460 			LL_ASSERT(link->next);
1461 
1462 			struct ll_conn *conn = ll_connected_get(handle);
1463 
1464 			ull_cp_release_tx(conn, tx);
1465 			return;
1466 		} else if (!tx) {
1467 			/* Tx Node re-used to enqueue new ctrl PDU */
1468 			return;
1469 		}
1470 		LL_ASSERT(!link->next);
1471 	} else if (handle == LLL_HANDLE_INVALID) {
1472 		pdu_tx->ll_id = PDU_DATA_LLID_RESV;
1473 	} else {
1474 		LL_ASSERT(handle != LLL_HANDLE_INVALID);
1475 	}
1476 
1477 	ll_tx_ack_put(handle, tx);
1478 }
1479 
ull_conn_lll_max_tx_octets_get(struct lll_conn * lll)1480 uint16_t ull_conn_lll_max_tx_octets_get(struct lll_conn *lll)
1481 {
1482 	uint16_t max_tx_octets;
1483 
1484 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1485 #if defined(CONFIG_BT_CTLR_PHY)
1486 	switch (lll->phy_tx_time) {
1487 	default:
1488 	case PHY_1M:
1489 		/* 1M PHY, 1us = 1 bit, hence divide by 8.
1490 		 * Deduct 10 bytes for preamble (1), access address (4),
1491 		 * header (2), and CRC (3).
1492 		 */
1493 		max_tx_octets = (lll->dle.eff.max_tx_time >> 3) - 10;
1494 		break;
1495 
1496 	case PHY_2M:
1497 		/* 2M PHY, 1us = 2 bits, hence divide by 4.
1498 		 * Deduct 11 bytes for preamble (2), access address (4),
1499 		 * header (2), and CRC (3).
1500 		 */
1501 		max_tx_octets = (lll->dle.eff.max_tx_time >> 2) - 11;
1502 		break;
1503 
1504 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1505 	case PHY_CODED:
1506 		if (lll->phy_flags & 0x01) {
1507 			/* S8 Coded PHY, 8us = 1 bit, hence divide by
1508 			 * 64.
1509 			 * Subtract time for preamble (80), AA (256),
1510 			 * CI (16), TERM1 (24), CRC (192) and
1511 			 * TERM2 (24), total 592 us.
1512 			 * Subtract 2 bytes for header.
1513 			 */
1514 			max_tx_octets = ((lll->dle.eff.max_tx_time - 592) >>
1515 					  6) - 2;
1516 		} else {
1517 			/* S2 Coded PHY, 2us = 1 bit, hence divide by
1518 			 * 16.
1519 			 * Subtract time for preamble (80), AA (256),
1520 			 * CI (16), TERM1 (24), CRC (48) and
1521 			 * TERM2 (6), total 430 us.
1522 			 * Subtract 2 bytes for header.
1523 			 */
1524 			max_tx_octets = ((lll->dle.eff.max_tx_time - 430) >>
1525 					  4) - 2;
1526 		}
1527 		break;
1528 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1529 	}
1530 
1531 #if defined(CONFIG_BT_CTLR_LE_ENC)
1532 	if (lll->enc_tx) {
1533 		/* deduct the MIC */
1534 		max_tx_octets -= 4U;
1535 	}
1536 #endif /* CONFIG_BT_CTLR_LE_ENC */
1537 
1538 	if (max_tx_octets > lll->dle.eff.max_tx_octets) {
1539 		max_tx_octets = lll->dle.eff.max_tx_octets;
1540 	}
1541 
1542 #else /* !CONFIG_BT_CTLR_PHY */
1543 	max_tx_octets = lll->dle.eff.max_tx_octets;
1544 #endif /* !CONFIG_BT_CTLR_PHY */
1545 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
1546 	max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1547 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
1548 	return max_tx_octets;
1549 }
1550 
1551 /**
1552  * @brief Initialize pdu_data members that are read only in lower link layer.
1553  *
1554  * @param pdu Pointer to pdu_data object to be initialized
1555  */
ull_pdu_data_init(struct pdu_data * pdu)1556 void ull_pdu_data_init(struct pdu_data *pdu)
1557 {
1558 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1559 	pdu->cp = 0U;
1560 	pdu->octet3.resv[0] = 0U;
1561 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1562 }
1563 
init_reset(void)1564 static int init_reset(void)
1565 {
1566 	/* Initialize conn pool. */
1567 	mem_init(conn_pool, sizeof(struct ll_conn),
1568 		 sizeof(conn_pool) / sizeof(struct ll_conn), &conn_free);
1569 
1570 	/* Initialize tx pool. */
1571 	mem_init(mem_conn_tx.pool, CONN_TX_BUF_SIZE, CONN_DATA_BUFFERS,
1572 		 &mem_conn_tx.free);
1573 
1574 	/* Initialize tx link pool. */
1575 	mem_init(mem_link_tx.pool, sizeof(memq_link_t),
1576 		 (CONN_DATA_BUFFERS +
1577 		  LLCP_TX_CTRL_BUF_COUNT),
1578 		 &mem_link_tx.free);
1579 
1580 	/* Initialize control procedure system. */
1581 	ull_cp_init();
1582 
1583 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1584 	/* Reset CPR mutex */
1585 	cpr_active_reset();
1586 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1587 
1588 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1589 	/* Initialize the DLE defaults */
1590 	default_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1591 	default_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
1592 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1593 
1594 #if defined(CONFIG_BT_CTLR_PHY)
1595 	/* Initialize the PHY defaults */
1596 	default_phy_tx = PHY_1M;
1597 	default_phy_rx = PHY_1M;
1598 
1599 #if defined(CONFIG_BT_CTLR_PHY_2M)
1600 	default_phy_tx |= PHY_2M;
1601 	default_phy_rx |= PHY_2M;
1602 #endif /* CONFIG_BT_CTLR_PHY_2M */
1603 
1604 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1605 	default_phy_tx |= PHY_CODED;
1606 	default_phy_rx |= PHY_CODED;
1607 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1608 #endif /* CONFIG_BT_CTLR_PHY */
1609 
1610 	return 0;
1611 }
1612 
1613 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
tx_demux_sched(struct ll_conn * conn)1614 static void tx_demux_sched(struct ll_conn *conn)
1615 {
1616 	static memq_link_t link;
1617 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1618 
1619 	mfy.param = conn;
1620 
1621 	mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1622 }
1623 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
1624 
tx_demux(void * param)1625 static void tx_demux(void *param)
1626 {
1627 	ull_conn_tx_demux(1);
1628 
1629 	ull_conn_tx_lll_enqueue(param, 1);
1630 }
1631 
tx_ull_dequeue(struct ll_conn * conn,struct node_tx * unused)1632 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *unused)
1633 {
1634 	struct node_tx *tx = NULL;
1635 
1636 	tx = ull_tx_q_dequeue(&conn->tx_q);
1637 	if (tx) {
1638 		struct pdu_data *pdu_tx;
1639 
1640 		pdu_tx = (void *)tx->pdu;
1641 		if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1642 			/* Mark the tx node as belonging to the ctrl pool */
1643 			tx->next = tx;
1644 		} else {
1645 			/* Mark the tx node as belonging to the data pool */
1646 			tx->next = NULL;
1647 		}
1648 	}
1649 	return tx;
1650 }
1651 
ticker_update_conn_op_cb(uint32_t status,void * param)1652 static void ticker_update_conn_op_cb(uint32_t status, void *param)
1653 {
1654 	/* Peripheral drift compensation succeeds, or it fails in a race condition
1655 	 * when disconnecting or connection update (race between ticker_update
1656 	 * and ticker_stop calls).
1657 	 */
1658 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1659 		  param == ull_update_mark_get() ||
1660 		  param == ull_disable_mark_get());
1661 }
1662 
ticker_stop_conn_op_cb(uint32_t status,void * param)1663 static void ticker_stop_conn_op_cb(uint32_t status, void *param)
1664 {
1665 	void *p;
1666 
1667 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1668 
1669 	p = ull_update_mark(param);
1670 	LL_ASSERT(p == param);
1671 }
1672 
ticker_start_conn_op_cb(uint32_t status,void * param)1673 static void ticker_start_conn_op_cb(uint32_t status, void *param)
1674 {
1675 	void *p;
1676 
1677 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1678 
1679 	p = ull_update_unmark(param);
1680 	LL_ASSERT(p == param);
1681 }
1682 
conn_setup_adv_scan_disabled_cb(void * param)1683 static void conn_setup_adv_scan_disabled_cb(void *param)
1684 {
1685 	struct node_rx_ftr *ftr;
1686 	struct node_rx_pdu *rx;
1687 	struct lll_conn *lll;
1688 
1689 	/* NOTE: LLL conn context SHALL be after lll_hdr in
1690 	 *       struct lll_adv and struct lll_scan.
1691 	 */
1692 	rx = param;
1693 	ftr = &(rx->rx_ftr);
1694 	lll = *((struct lll_conn **)((uint8_t *)ftr->param +
1695 				     sizeof(struct lll_hdr)));
1696 
1697 	if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING)) {
1698 		struct ull_hdr *hdr;
1699 
1700 		/* Prevent fast ADV re-scheduling from re-triggering */
1701 		hdr = HDR_LLL2ULL(ftr->param);
1702 		hdr->disabled_cb = NULL;
1703 	}
1704 
1705 	switch (lll->role) {
1706 #if defined(CONFIG_BT_CENTRAL)
1707 	case 0:
1708 		ull_central_setup(rx, ftr, lll);
1709 		break;
1710 #endif /* CONFIG_BT_CENTRAL */
1711 
1712 #if defined(CONFIG_BT_PERIPHERAL)
1713 	case 1:
1714 		ull_periph_setup(rx, ftr, lll);
1715 		break;
1716 #endif /* CONFIG_BT_PERIPHERAL */
1717 
1718 	default:
1719 		LL_ASSERT(0);
1720 		break;
1721 	}
1722 }
1723 
disable(uint16_t handle)1724 static inline void disable(uint16_t handle)
1725 {
1726 	struct ll_conn *conn;
1727 	int err;
1728 
1729 	conn = ll_conn_get(handle);
1730 
1731 	err = ull_ticker_stop_with_mark(TICKER_ID_CONN_BASE + handle,
1732 					conn, &conn->lll);
1733 	LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
1734 
1735 	conn->lll.handle = LLL_HANDLE_INVALID;
1736 	conn->lll.link_tx_free = NULL;
1737 }
1738 
1739 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
conn_cleanup_iso_cis_released_cb(struct ll_conn * conn)1740 static void conn_cleanup_iso_cis_released_cb(struct ll_conn *conn)
1741 {
1742 	struct ll_conn_iso_stream *cis;
1743 
1744 	cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1745 	if (cis) {
1746 		struct node_rx_pdu *rx;
1747 		uint8_t reason;
1748 
1749 		/* More associated CISes - stop next */
1750 		rx = (void *)&conn->llcp_terminate.node_rx;
1751 		reason = *(uint8_t *)rx->pdu;
1752 
1753 		ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1754 				      reason);
1755 	} else {
1756 		/* No more CISes associated with conn - finalize */
1757 		conn_cleanup_finalize(conn);
1758 	}
1759 }
1760 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1761 
conn_cleanup_finalize(struct ll_conn * conn)1762 static void conn_cleanup_finalize(struct ll_conn *conn)
1763 {
1764 	struct lll_conn *lll = &conn->lll;
1765 	uint32_t ticker_status;
1766 
1767 	ull_cp_state_set(conn, ULL_CP_DISCONNECTED);
1768 
1769 	/* Update tx buffer queue handling */
1770 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
1771 	ull_cp_update_tx_buffer_queue(conn);
1772 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
1773 	ull_cp_release_nodes(conn);
1774 
1775 	/* flush demux-ed Tx buffer still in ULL context */
1776 	tx_ull_flush(conn);
1777 
1778 	/* Stop Central or Peripheral role ticker */
1779 	ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1780 				    TICKER_USER_ID_ULL_HIGH,
1781 				    TICKER_ID_CONN_BASE + lll->handle,
1782 				    ticker_stop_op_cb, conn);
1783 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1784 		  (ticker_status == TICKER_STATUS_BUSY));
1785 
1786 	/* Invalidate the connection context */
1787 	lll->handle = LLL_HANDLE_INVALID;
1788 
1789 	/* Demux and flush Tx PDUs that remain enqueued in thread context */
1790 	ull_conn_tx_demux(UINT8_MAX);
1791 }
1792 
conn_cleanup(struct ll_conn * conn,uint8_t reason)1793 static void conn_cleanup(struct ll_conn *conn, uint8_t reason)
1794 {
1795 	struct node_rx_pdu *rx;
1796 
1797 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1798 	struct ll_conn_iso_stream *cis;
1799 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1800 
1801 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1802 	/* Reset CPR mutex */
1803 	cpr_active_check_and_reset(conn);
1804 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1805 
1806 	/* Only termination structure is populated here in ULL context
1807 	 * but the actual enqueue happens in the LLL context in
1808 	 * tx_lll_flush. The reason being to avoid passing the reason
1809 	 * value and handle through the mayfly scheduling of the
1810 	 * tx_lll_flush.
1811 	 */
1812 	rx = (void *)&conn->llcp_terminate.node_rx.rx;
1813 	rx->hdr.handle = conn->lll.handle;
1814 	rx->hdr.type = NODE_RX_TYPE_TERMINATE;
1815 	*((uint8_t *)rx->pdu) = reason;
1816 
1817 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1818 	cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1819 	if (cis) {
1820 		/* Stop CIS and defer cleanup to after teardown. */
1821 		ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1822 				      reason);
1823 		return;
1824 	}
1825 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1826 
1827 	conn_cleanup_finalize(conn);
1828 }
1829 
tx_ull_flush(struct ll_conn * conn)1830 static void tx_ull_flush(struct ll_conn *conn)
1831 {
1832 	struct node_tx *tx;
1833 
1834 	ull_tx_q_resume_data(&conn->tx_q);
1835 
1836 	tx = tx_ull_dequeue(conn, NULL);
1837 	while (tx) {
1838 		memq_link_t *link;
1839 
1840 		link = mem_acquire(&mem_link_tx.free);
1841 		LL_ASSERT(link);
1842 
1843 		/* Enqueue towards LLL */
1844 		memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1845 
1846 		tx = tx_ull_dequeue(conn, NULL);
1847 	}
1848 }
1849 
ticker_stop_op_cb(uint32_t status,void * param)1850 static void ticker_stop_op_cb(uint32_t status, void *param)
1851 {
1852 	static memq_link_t link;
1853 	static struct mayfly mfy = {0, 0, &link, NULL, conn_disable};
1854 	uint32_t ret;
1855 
1856 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1857 
1858 	/* Check if any pending LLL events that need to be aborted */
1859 	mfy.param = param;
1860 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1861 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1862 	LL_ASSERT(!ret);
1863 }
1864 
conn_disable(void * param)1865 static void conn_disable(void *param)
1866 {
1867 	struct ll_conn *conn;
1868 	struct ull_hdr *hdr;
1869 
1870 	/* Check ref count to determine if any pending LLL events in pipeline */
1871 	conn = param;
1872 	hdr = &conn->ull;
1873 	if (ull_ref_get(hdr)) {
1874 		static memq_link_t link;
1875 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1876 		uint32_t ret;
1877 
1878 		mfy.param = &conn->lll;
1879 
1880 		/* Setup disabled callback to be called when ref count
1881 		 * returns to zero.
1882 		 */
1883 		LL_ASSERT(!hdr->disabled_cb);
1884 		hdr->disabled_param = mfy.param;
1885 		hdr->disabled_cb = disabled_cb;
1886 
1887 		/* Trigger LLL disable */
1888 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1889 				     TICKER_USER_ID_LLL, 0, &mfy);
1890 		LL_ASSERT(!ret);
1891 	} else {
1892 		/* No pending LLL events */
1893 		disabled_cb(&conn->lll);
1894 	}
1895 }
1896 
disabled_cb(void * param)1897 static void disabled_cb(void *param)
1898 {
1899 	static memq_link_t link;
1900 	static struct mayfly mfy = {0, 0, &link, NULL, tx_lll_flush};
1901 	uint32_t ret;
1902 
1903 	mfy.param = param;
1904 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1905 			     TICKER_USER_ID_LLL, 0, &mfy);
1906 	LL_ASSERT(!ret);
1907 }
1908 
tx_lll_flush(void * param)1909 static void tx_lll_flush(void *param)
1910 {
1911 	struct node_rx_pdu *rx;
1912 	struct lll_conn *lll;
1913 	struct ll_conn *conn;
1914 	struct node_tx *tx;
1915 	memq_link_t *link;
1916 	uint16_t handle;
1917 
1918 	/* Get reference to ULL context */
1919 	lll = param;
1920 	conn = HDR_LLL2ULL(lll);
1921 	handle = ll_conn_handle_get(conn);
1922 
1923 	lll_conn_flush(handle, lll);
1924 
1925 	link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
1926 			    (void **)&tx);
1927 	while (link) {
1928 		uint8_t idx;
1929 		struct lll_tx *tx_buf;
1930 
1931 		idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx_buf);
1932 		LL_ASSERT(tx_buf);
1933 
1934 		tx_buf->handle = LLL_HANDLE_INVALID;
1935 		tx_buf->node = tx;
1936 
1937 		/* TX node UPSTREAM, i.e. Tx node ack path */
1938 		link->next = tx->next; /* Indicates ctrl pool or data pool */
1939 		tx->next = link;
1940 
1941 		MFIFO_ENQUEUE(conn_ack, idx);
1942 
1943 		link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
1944 				    (void **)&tx);
1945 	}
1946 
1947 	/* Get the terminate structure reserved in the connection context.
1948 	 * The terminate reason and connection handle should already be
1949 	 * populated before this mayfly function was scheduled.
1950 	 */
1951 	rx = (void *)&conn->llcp_terminate.node_rx;
1952 	LL_ASSERT(rx->hdr.link);
1953 	link = rx->hdr.link;
1954 	rx->hdr.link = NULL;
1955 
1956 	/* Enqueue the terminate towards ULL context */
1957 	ull_rx_put_sched(link, rx);
1958 }
1959 
1960 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
empty_data_start_release(struct ll_conn * conn,struct node_tx * tx)1961 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx)
1962 {
1963 	struct pdu_data *p = (void *)tx->pdu;
1964 
1965 	if ((p->ll_id == PDU_DATA_LLID_DATA_START) && !p->len) {
1966 		conn->start_empty = 1U;
1967 
1968 		ll_tx_ack_put(conn->lll.handle, tx);
1969 
1970 		return -EINVAL;
1971 	} else if (p->len && conn->start_empty) {
1972 		conn->start_empty = 0U;
1973 
1974 		if (p->ll_id == PDU_DATA_LLID_DATA_CONTINUE) {
1975 			p->ll_id = PDU_DATA_LLID_DATA_START;
1976 		}
1977 	}
1978 
1979 	return 0;
1980 }
1981 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1982 
1983 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
force_md_cnt_calc(struct lll_conn * lll_connection,uint32_t tx_rate)1984 static uint8_t force_md_cnt_calc(struct lll_conn *lll_connection, uint32_t tx_rate)
1985 {
1986 	uint32_t time_incoming, time_outgoing;
1987 	uint8_t force_md_cnt;
1988 	uint8_t phy_flags;
1989 	uint8_t mic_size;
1990 	uint8_t phy;
1991 
1992 #if defined(CONFIG_BT_CTLR_PHY)
1993 	phy = lll_connection->phy_tx;
1994 	phy_flags = lll_connection->phy_flags;
1995 #else /* !CONFIG_BT_CTLR_PHY */
1996 	phy = PHY_1M;
1997 	phy_flags = 0U;
1998 #endif /* !CONFIG_BT_CTLR_PHY */
1999 
2000 #if defined(CONFIG_BT_CTLR_LE_ENC)
2001 	mic_size = PDU_MIC_SIZE * lll_connection->enc_tx;
2002 #else /* !CONFIG_BT_CTLR_LE_ENC */
2003 	mic_size = 0U;
2004 #endif /* !CONFIG_BT_CTLR_LE_ENC */
2005 
2006 	time_incoming = (LL_LENGTH_OCTETS_RX_MAX << 3) *
2007 			1000000UL / tx_rate;
2008 	time_outgoing = PDU_DC_US(LL_LENGTH_OCTETS_RX_MAX, mic_size, phy,
2009 				  phy_flags) +
2010 			PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
2011 			(EVENT_IFS_US << 1);
2012 
2013 	force_md_cnt = 0U;
2014 	if (time_incoming > time_outgoing) {
2015 		uint32_t delta;
2016 		uint32_t time_keep_alive;
2017 
2018 		delta = (time_incoming << 1) - time_outgoing;
2019 		time_keep_alive = (PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
2020 				   EVENT_IFS_US) << 1;
2021 		force_md_cnt = (delta + (time_keep_alive - 1)) /
2022 			       time_keep_alive;
2023 		LOG_DBG("Time: incoming= %u, expected outgoing= %u, delta= %u, "
2024 		       "keepalive= %u, force_md_cnt = %u.",
2025 		       time_incoming, time_outgoing, delta, time_keep_alive,
2026 		       force_md_cnt);
2027 	}
2028 
2029 	return force_md_cnt;
2030 }
2031 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
2032 
2033 #if defined(CONFIG_BT_CTLR_LE_ENC)
2034 /**
2035  * @brief Pause the data path of a rx queue.
2036  */
ull_conn_pause_rx_data(struct ll_conn * conn)2037 void ull_conn_pause_rx_data(struct ll_conn *conn)
2038 {
2039 	conn->pause_rx_data = 1U;
2040 }
2041 
2042 /**
2043  * @brief Resume the data path of a rx queue.
2044  */
ull_conn_resume_rx_data(struct ll_conn * conn)2045 void ull_conn_resume_rx_data(struct ll_conn *conn)
2046 {
2047 	conn->pause_rx_data = 0U;
2048 }
2049 #endif /* CONFIG_BT_CTLR_LE_ENC */
2050 
ull_conn_event_counter(struct ll_conn * conn)2051 uint16_t ull_conn_event_counter(struct ll_conn *conn)
2052 {
2053 	struct lll_conn *lll;
2054 	uint16_t event_counter;
2055 
2056 	lll = &conn->lll;
2057 
2058 	/* Calculate current event counter. If refcount is non-zero, we have called
2059 	 * prepare and the LLL implementation has calculated and incremented the event
2060 	 * counter (RX path). In this case we need to subtract one from the current
2061 	 * event counter.
2062 	 * Otherwise we are in the TX path, and we calculate the current event counter
2063 	 * similar to LLL by taking the expected event counter value plus accumulated
2064 	 * latency.
2065 	 */
2066 	if (ull_ref_get(&conn->ull)) {
2067 		/* We are in post-prepare (RX path). Event counter is already
2068 		 * calculated and incremented by 1 for next event.
2069 		 */
2070 		event_counter = lll->event_counter - 1;
2071 	} else {
2072 		event_counter = lll->event_counter + lll->latency_prepare +
2073 				conn->llcp.prep.lazy;
2074 	}
2075 
2076 	return event_counter;
2077 }
ull_conn_update_ticker(struct ll_conn * conn,uint32_t ticks_win_offset,uint32_t ticks_slot_overhead,uint32_t periodic_us,uint32_t ticks_at_expire)2078 static void ull_conn_update_ticker(struct ll_conn *conn,
2079 				   uint32_t ticks_win_offset,
2080 				   uint32_t ticks_slot_overhead,
2081 				   uint32_t periodic_us,
2082 				   uint32_t ticks_at_expire)
2083 {
2084 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2085 	/* disable ticker job, in order to chain stop and start
2086 	 * to avoid RTC being stopped if no tickers active.
2087 	 */
2088 	uint32_t mayfly_was_enabled =
2089 		mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW);
2090 
2091 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0U);
2092 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2093 
2094 	/* start periph/central with new timings */
2095 	uint8_t ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
2096 	uint32_t ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2097 				    ticker_id_conn, ticker_stop_conn_op_cb, (void *)conn);
2098 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2099 		  (ticker_status == TICKER_STATUS_BUSY));
2100 	ticker_status = ticker_start(
2101 		TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH, ticker_id_conn, ticks_at_expire,
2102 		ticks_win_offset, HAL_TICKER_US_TO_TICKS(periodic_us),
2103 		HAL_TICKER_REMAINDER(periodic_us),
2104 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2105 		TICKER_NULL_LAZY,
2106 #else /* !CONFIG_BT_TICKER_LOW_LAT */
2107 		TICKER_LAZY_MUST_EXPIRE_KEEP,
2108 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2109 		(ticks_slot_overhead + conn->ull.ticks_slot),
2110 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CENTRAL)
2111 		conn->lll.role == BT_HCI_ROLE_PERIPHERAL ?
2112 		ull_periph_ticker_cb : ull_central_ticker_cb,
2113 #elif defined(CONFIG_BT_PERIPHERAL)
2114 		ull_periph_ticker_cb,
2115 #else
2116 		ull_central_ticker_cb,
2117 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CENTRAL */
2118 		conn, ticker_start_conn_op_cb, (void *)conn);
2119 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2120 		  (ticker_status == TICKER_STATUS_BUSY));
2121 
2122 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2123 	/* enable ticker job, if disabled in this function */
2124 	if (mayfly_was_enabled) {
2125 		mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1U);
2126 	}
2127 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2128 }
2129 
ull_conn_update_parameters(struct ll_conn * conn,uint8_t is_cu_proc,uint8_t win_size,uint32_t win_offset_us,uint16_t interval,uint16_t latency,uint16_t timeout,uint16_t instant)2130 void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_t win_size,
2131 				uint32_t win_offset_us, uint16_t interval, uint16_t latency,
2132 				uint16_t timeout, uint16_t instant)
2133 {
2134 	struct lll_conn *lll;
2135 	uint32_t ticks_win_offset = 0U;
2136 	uint32_t ticks_slot_overhead;
2137 	uint16_t conn_interval_old;
2138 	uint16_t conn_interval_new;
2139 	uint32_t conn_interval_us;
2140 	uint32_t periodic_us;
2141 	uint16_t latency_upd;
2142 	uint16_t instant_latency;
2143 	uint16_t event_counter;
2144 	uint32_t ticks_at_expire;
2145 
2146 	lll = &conn->lll;
2147 
2148 	/* Calculate current event counter */
2149 	event_counter = ull_conn_event_counter(conn);
2150 
2151 	instant_latency = (event_counter - instant) & 0xFFFF;
2152 
2153 
2154 	ticks_at_expire = conn->llcp.prep.ticks_at_expire;
2155 
2156 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
2157 	/* restore to normal prepare */
2158 	if (conn->ull.ticks_prepare_to_start & XON_BITMASK) {
2159 		uint32_t ticks_prepare_to_start =
2160 			MAX(conn->ull.ticks_active_to_start, conn->ull.ticks_preempt_to_start);
2161 
2162 		conn->ull.ticks_prepare_to_start &= ~XON_BITMASK;
2163 
2164 		ticks_at_expire -= (conn->ull.ticks_prepare_to_start - ticks_prepare_to_start);
2165 	}
2166 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
2167 
2168 	/* compensate for instant_latency due to laziness */
2169 	conn_interval_old = instant_latency * lll->interval;
2170 	latency_upd = conn_interval_old / interval;
2171 	conn_interval_new = latency_upd * interval;
2172 	if (conn_interval_new > conn_interval_old) {
2173 		ticks_at_expire += HAL_TICKER_US_TO_TICKS((conn_interval_new - conn_interval_old) *
2174 							  CONN_INT_UNIT_US);
2175 	} else {
2176 		ticks_at_expire -= HAL_TICKER_US_TO_TICKS((conn_interval_old - conn_interval_new) *
2177 							  CONN_INT_UNIT_US);
2178 	}
2179 
2180 	lll->latency_prepare += conn->llcp.prep.lazy;
2181 	lll->latency_prepare -= (instant_latency - latency_upd);
2182 
2183 	/* calculate the offset */
2184 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
2185 		ticks_slot_overhead =
2186 			MAX(conn->ull.ticks_active_to_start, conn->ull.ticks_prepare_to_start);
2187 	} else {
2188 		ticks_slot_overhead = 0U;
2189 	}
2190 
2191 	/* calculate the window widening and interval */
2192 	conn_interval_us = interval * CONN_INT_UNIT_US;
2193 	periodic_us = conn_interval_us;
2194 
2195 	switch (lll->role) {
2196 #if defined(CONFIG_BT_PERIPHERAL)
2197 	case BT_HCI_ROLE_PERIPHERAL:
2198 		lll->periph.window_widening_prepare_us -=
2199 			lll->periph.window_widening_periodic_us * instant_latency;
2200 
2201 		lll->periph.window_widening_periodic_us =
2202 			DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2203 					   lll_clock_ppm_get(conn->periph.sca)) *
2204 					  conn_interval_us), 1000000U);
2205 		lll->periph.window_widening_max_us = (conn_interval_us >> 1U) - EVENT_IFS_US;
2206 		lll->periph.window_size_prepare_us = win_size * CONN_INT_UNIT_US;
2207 
2208 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2209 		conn->periph.ticks_to_offset = 0U;
2210 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2211 
2212 		lll->periph.window_widening_prepare_us +=
2213 			lll->periph.window_widening_periodic_us * latency_upd;
2214 		if (lll->periph.window_widening_prepare_us > lll->periph.window_widening_max_us) {
2215 			lll->periph.window_widening_prepare_us = lll->periph.window_widening_max_us;
2216 		}
2217 
2218 		ticks_at_expire -= HAL_TICKER_US_TO_TICKS(lll->periph.window_widening_periodic_us *
2219 							  latency_upd);
2220 		ticks_win_offset = HAL_TICKER_US_TO_TICKS((win_offset_us / CONN_INT_UNIT_US) *
2221 							  CONN_INT_UNIT_US);
2222 		periodic_us -= lll->periph.window_widening_periodic_us;
2223 		break;
2224 #endif /* CONFIG_BT_PERIPHERAL */
2225 #if defined(CONFIG_BT_CENTRAL)
2226 	case BT_HCI_ROLE_CENTRAL:
2227 		ticks_win_offset = HAL_TICKER_US_TO_TICKS(win_offset_us);
2228 
2229 		/* Workaround: Due to the missing remainder param in
2230 		 * ticker_start function for first interval; add a
2231 		 * tick so as to use the ceiled value.
2232 		 */
2233 		ticks_win_offset += 1U;
2234 		break;
2235 #endif /*CONFIG_BT_CENTRAL */
2236 	default:
2237 		LL_ASSERT(0);
2238 		break;
2239 	}
2240 
2241 	lll->interval = interval;
2242 	lll->latency = latency;
2243 
2244 	conn->supervision_timeout = timeout;
2245 	ull_cp_prt_reload_set(conn, conn_interval_us);
2246 
2247 #if defined(CONFIG_BT_CTLR_LE_PING)
2248 	/* APTO in no. of connection events */
2249 	conn->apto_reload = RADIO_CONN_EVENTS((30U * 1000U * 1000U), conn_interval_us);
2250 	/* Dispatch LE Ping PDU 6 connection events (that peer would
2251 	 * listen to) before 30s timeout
2252 	 * TODO: "peer listens to" is greater than 30s due to latency
2253 	 */
2254 	conn->appto_reload = (conn->apto_reload > (lll->latency + 6U)) ?
2255 					   (conn->apto_reload - (lll->latency + 6U)) :
2256 					   conn->apto_reload;
2257 #endif /* CONFIG_BT_CTLR_LE_PING */
2258 
2259 	if (is_cu_proc) {
2260 		conn->supervision_expire = 0U;
2261 	}
2262 
2263 	/* Update ACL ticker */
2264 	ull_conn_update_ticker(conn, ticks_win_offset, ticks_slot_overhead, periodic_us,
2265 			       ticks_at_expire);
2266 	/* Signal that the prepare needs to be canceled */
2267 	conn->cancel_prepare = 1U;
2268 }
2269 
2270 #if defined(CONFIG_BT_PERIPHERAL)
ull_conn_update_peer_sca(struct ll_conn * conn)2271 void ull_conn_update_peer_sca(struct ll_conn *conn)
2272 {
2273 	struct lll_conn *lll;
2274 
2275 	uint32_t conn_interval_us;
2276 	uint32_t periodic_us;
2277 
2278 	lll = &conn->lll;
2279 
2280 	/* calculate the window widening and interval */
2281 	conn_interval_us = lll->interval * CONN_INT_UNIT_US;
2282 	periodic_us = conn_interval_us;
2283 
2284 	lll->periph.window_widening_periodic_us =
2285 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2286 				   lll_clock_ppm_get(conn->periph.sca)) *
2287 				  conn_interval_us), 1000000U);
2288 
2289 	periodic_us -= lll->periph.window_widening_periodic_us;
2290 
2291 	/* Update ACL ticker */
2292 	ull_conn_update_ticker(conn, HAL_TICKER_US_TO_TICKS(periodic_us), 0, periodic_us,
2293 				   conn->llcp.prep.ticks_at_expire);
2294 
2295 }
2296 #endif /* CONFIG_BT_PERIPHERAL */
2297 
ull_conn_chan_map_set(struct ll_conn * conn,const uint8_t chm[5])2298 void ull_conn_chan_map_set(struct ll_conn *conn, const uint8_t chm[5])
2299 {
2300 	struct lll_conn *lll = &conn->lll;
2301 
2302 	memcpy(lll->data_chan_map, chm, sizeof(lll->data_chan_map));
2303 	lll->data_chan_count = util_ones_count_get(lll->data_chan_map, sizeof(lll->data_chan_map));
2304 }
2305 
2306 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2307 static inline void dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2308 				    uint16_t *max_tx_time)
2309 {
2310 	uint8_t phy_select = PHY_1M;
2311 	uint16_t rx_time = 0U;
2312 	uint16_t tx_time = 0U;
2313 
2314 #if defined(CONFIG_BT_CTLR_PHY)
2315 	if (conn->llcp.fex.valid && feature_phy_coded(conn)) {
2316 		/* If coded PHY is supported on the connection
2317 		 * this will define the max times
2318 		 */
2319 		phy_select = PHY_CODED;
2320 		/* If not, max times should be defined by 1M timing */
2321 	}
2322 #endif
2323 
2324 	rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select);
2325 
2326 #if defined(CONFIG_BT_CTLR_PHY)
2327 	tx_time = MIN(conn->lll.dle.default_tx_time,
2328 		      PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select));
2329 #else /* !CONFIG_BT_CTLR_PHY */
2330 	tx_time = PDU_DC_MAX_US(conn->lll.dle.default_tx_octets, phy_select);
2331 #endif /* !CONFIG_BT_CTLR_PHY */
2332 
2333 	/*
2334 	 * see Vol. 6 Part B chapter 4.5.10
2335 	 * minimum value for time is 328 us
2336 	 */
2337 	rx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, rx_time);
2338 	tx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, tx_time);
2339 
2340 	*max_rx_time = rx_time;
2341 	*max_tx_time = tx_time;
2342 }
2343 
ull_dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2344 void ull_dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2345 				    uint16_t *max_tx_time)
2346 {
2347 	return dle_max_time_get(conn, max_rx_time, max_tx_time);
2348 }
2349 
2350 /*
2351  * TODO: this probably can be optimised for ex. by creating a macro for the
2352  * ull_dle_update_eff function
2353  */
ull_dle_update_eff(struct ll_conn * conn)2354 uint8_t ull_dle_update_eff(struct ll_conn *conn)
2355 {
2356 	uint8_t dle_changed = 0U;
2357 
2358 	/* Note that we must use bitwise or and not logical or */
2359 	dle_changed = ull_dle_update_eff_rx(conn);
2360 	dle_changed |= ull_dle_update_eff_tx(conn);
2361 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2362 	if (dle_changed) {
2363 		conn->lll.evt_len_upd = 1U;
2364 	}
2365 #endif
2366 
2367 
2368 	return dle_changed;
2369 }
2370 
ull_dle_update_eff_rx(struct ll_conn * conn)2371 uint8_t ull_dle_update_eff_rx(struct ll_conn *conn)
2372 {
2373 	uint8_t dle_changed = 0U;
2374 
2375 	const uint16_t eff_rx_octets =
2376 		MAX(MIN(conn->lll.dle.local.max_rx_octets, conn->lll.dle.remote.max_tx_octets),
2377 		    PDU_DC_PAYLOAD_SIZE_MIN);
2378 
2379 #if defined(CONFIG_BT_CTLR_PHY)
2380 	unsigned int min_eff_rx_time = (conn->lll.phy_rx == PHY_CODED) ?
2381 			PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2382 
2383 	const uint16_t eff_rx_time =
2384 		MAX(MIN(conn->lll.dle.local.max_rx_time, conn->lll.dle.remote.max_tx_time),
2385 		    min_eff_rx_time);
2386 
2387 	if (eff_rx_time != conn->lll.dle.eff.max_rx_time) {
2388 		conn->lll.dle.eff.max_rx_time = eff_rx_time;
2389 		dle_changed = 1U;
2390 	}
2391 #else
2392 	conn->lll.dle.eff.max_rx_time = PDU_DC_MAX_US(eff_rx_octets, PHY_1M);
2393 #endif
2394 
2395 	if (eff_rx_octets != conn->lll.dle.eff.max_rx_octets) {
2396 		conn->lll.dle.eff.max_rx_octets = eff_rx_octets;
2397 		dle_changed = 1U;
2398 	}
2399 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2400 	/* we delay the update of event length to after the DLE procedure is finishede */
2401 	if (dle_changed) {
2402 		conn->lll.evt_len_upd_delayed = 1;
2403 	}
2404 #endif
2405 
2406 	return dle_changed;
2407 }
2408 
ull_dle_update_eff_tx(struct ll_conn * conn)2409 uint8_t ull_dle_update_eff_tx(struct ll_conn *conn)
2410 
2411 {
2412 	uint8_t dle_changed = 0U;
2413 
2414 	const uint16_t eff_tx_octets =
2415 		MAX(MIN(conn->lll.dle.local.max_tx_octets, conn->lll.dle.remote.max_rx_octets),
2416 		    PDU_DC_PAYLOAD_SIZE_MIN);
2417 
2418 #if defined(CONFIG_BT_CTLR_PHY)
2419 	unsigned int min_eff_tx_time = (conn->lll.phy_tx == PHY_CODED) ?
2420 			PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2421 
2422 	const uint16_t eff_tx_time =
2423 		MAX(MIN(conn->lll.dle.local.max_tx_time, conn->lll.dle.remote.max_rx_time),
2424 		    min_eff_tx_time);
2425 
2426 	if (eff_tx_time != conn->lll.dle.eff.max_tx_time) {
2427 		conn->lll.dle.eff.max_tx_time = eff_tx_time;
2428 		dle_changed = 1U;
2429 	}
2430 #else
2431 	conn->lll.dle.eff.max_tx_time = PDU_DC_MAX_US(eff_tx_octets, PHY_1M);
2432 #endif
2433 
2434 	if (eff_tx_octets != conn->lll.dle.eff.max_tx_octets) {
2435 		conn->lll.dle.eff.max_tx_octets = eff_tx_octets;
2436 		dle_changed = 1U;
2437 	}
2438 
2439 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2440 	if (dle_changed) {
2441 		conn->lll.evt_len_upd = 1U;
2442 	}
2443 	conn->lll.evt_len_upd |= conn->lll.evt_len_upd_delayed;
2444 	conn->lll.evt_len_upd_delayed = 0;
2445 #endif
2446 
2447 	return dle_changed;
2448 }
2449 
ull_len_data_length_trim(uint16_t * tx_octets,uint16_t * tx_time)2450 static void ull_len_data_length_trim(uint16_t *tx_octets, uint16_t *tx_time)
2451 {
2452 #if defined(CONFIG_BT_CTLR_PHY_CODED)
2453 	uint16_t tx_time_max =
2454 			PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_CODED);
2455 #else /* !CONFIG_BT_CTLR_PHY_CODED */
2456 	uint16_t tx_time_max =
2457 			PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_1M);
2458 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
2459 
2460 	/* trim to supported values */
2461 	if (*tx_octets > LL_LENGTH_OCTETS_TX_MAX) {
2462 		*tx_octets = LL_LENGTH_OCTETS_TX_MAX;
2463 	}
2464 
2465 	if (*tx_time > tx_time_max) {
2466 		*tx_time = tx_time_max;
2467 	}
2468 }
2469 
ull_dle_local_tx_update(struct ll_conn * conn,uint16_t tx_octets,uint16_t tx_time)2470 void ull_dle_local_tx_update(struct ll_conn *conn, uint16_t tx_octets, uint16_t tx_time)
2471 {
2472 	/* Trim to supported values */
2473 	ull_len_data_length_trim(&tx_octets, &tx_time);
2474 
2475 	conn->lll.dle.default_tx_octets = tx_octets;
2476 
2477 #if defined(CONFIG_BT_CTLR_PHY)
2478 	conn->lll.dle.default_tx_time = tx_time;
2479 #endif /* CONFIG_BT_CTLR_PHY */
2480 
2481 	dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time, &conn->lll.dle.local.max_tx_time);
2482 	conn->lll.dle.local.max_tx_octets = conn->lll.dle.default_tx_octets;
2483 }
2484 
ull_dle_init(struct ll_conn * conn,uint8_t phy)2485 void ull_dle_init(struct ll_conn *conn, uint8_t phy)
2486 {
2487 #if defined(CONFIG_BT_CTLR_PHY)
2488 	const uint16_t max_time_min = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy);
2489 	const uint16_t max_time_max = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy);
2490 #endif /* CONFIG_BT_CTLR_PHY */
2491 
2492 	/* Clear DLE data set */
2493 	memset(&conn->lll.dle, 0, sizeof(conn->lll.dle));
2494 	/* See BT. 5.2 Spec - Vol 6, Part B, Sect 4.5.10
2495 	 * Default to locally max supported rx/tx length/time
2496 	 */
2497 	ull_dle_local_tx_update(conn, default_tx_octets, default_tx_time);
2498 
2499 	conn->lll.dle.local.max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
2500 #if defined(CONFIG_BT_CTLR_PHY)
2501 	conn->lll.dle.local.max_rx_time = max_time_max;
2502 #endif /* CONFIG_BT_CTLR_PHY */
2503 
2504 	/* Default to minimum rx/tx data length/time */
2505 	conn->lll.dle.remote.max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2506 	conn->lll.dle.remote.max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2507 
2508 #if defined(CONFIG_BT_CTLR_PHY)
2509 	conn->lll.dle.remote.max_tx_time = max_time_min;
2510 	conn->lll.dle.remote.max_rx_time = max_time_min;
2511 #endif /* CONFIG_BT_CTLR_PHY */
2512 
2513 	/*
2514 	 * ref. Bluetooth Core Specification version 5.3, Vol. 6,
2515 	 * Part B, section 4.5.10 we can call ull_dle_update_eff
2516 	 * for initialisation
2517 	 */
2518 	(void)ull_dle_update_eff(conn);
2519 
2520 	/* Check whether the controller should perform a data length update after
2521 	 * connection is established
2522 	 */
2523 #if defined(CONFIG_BT_CTLR_PHY)
2524 	if ((conn->lll.dle.local.max_rx_time != max_time_min ||
2525 	     conn->lll.dle.local.max_tx_time != max_time_min)) {
2526 		conn->lll.dle.update = 1;
2527 	} else
2528 #endif
2529 	{
2530 		if (conn->lll.dle.local.max_tx_octets != PDU_DC_PAYLOAD_SIZE_MIN ||
2531 		    conn->lll.dle.local.max_rx_octets != PDU_DC_PAYLOAD_SIZE_MIN) {
2532 			conn->lll.dle.update = 1;
2533 		}
2534 	}
2535 }
2536 
ull_conn_default_tx_octets_set(uint16_t tx_octets)2537 void ull_conn_default_tx_octets_set(uint16_t tx_octets)
2538 {
2539 	default_tx_octets = tx_octets;
2540 }
2541 
ull_conn_default_tx_time_set(uint16_t tx_time)2542 void ull_conn_default_tx_time_set(uint16_t tx_time)
2543 {
2544 	default_tx_time = tx_time;
2545 }
2546 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2547 
ull_conn_lll_phy_active(struct ll_conn * conn,uint8_t phys)2548 uint8_t ull_conn_lll_phy_active(struct ll_conn *conn, uint8_t phys)
2549 {
2550 #if defined(CONFIG_BT_CTLR_PHY)
2551 	if (!(phys & (conn->lll.phy_tx | conn->lll.phy_rx))) {
2552 #else /* !CONFIG_BT_CTLR_PHY */
2553 	if (!(phys & 0x01)) {
2554 #endif /* !CONFIG_BT_CTLR_PHY */
2555 		return 0;
2556 	}
2557 	return 1;
2558 }
2559 
2560 uint8_t ull_is_lll_tx_queue_empty(struct ll_conn *conn)
2561 {
2562 	return (memq_peek(conn->lll.memq_tx.head, conn->lll.memq_tx.tail, NULL) == NULL);
2563 }
2564