1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 #include <zephyr/sys/byteorder.h>
12 
13 #include "hal/cpu.h"
14 #include "hal/ecb.h"
15 #include "hal/ccm.h"
16 #include "hal/ticker.h"
17 
18 #include "util/util.h"
19 #include "util/mem.h"
20 #include "util/memq.h"
21 #include "util/mfifo.h"
22 #include "util/mayfly.h"
23 #include "util/dbuf.h"
24 
25 #include "ticker/ticker.h"
26 
27 #include "pdu_df.h"
28 #include "lll/pdu_vendor.h"
29 #include "pdu.h"
30 
31 #include "lll.h"
32 #include "lll_clock.h"
33 #include "lll/lll_df_types.h"
34 #include "lll_conn.h"
35 #include "lll_conn_iso.h"
36 #include "lll/lll_vendor.h"
37 
38 #include "ll_sw/ull_tx_queue.h"
39 
40 #include "isoal.h"
41 #include "ull_iso_types.h"
42 #include "ull_conn_types.h"
43 #include "ull_conn_iso_types.h"
44 
45 #if defined(CONFIG_BT_CTLR_USER_EXT)
46 #include "ull_vendor.h"
47 #endif /* CONFIG_BT_CTLR_USER_EXT */
48 
49 #include "ull_internal.h"
50 #include "ull_llcp_internal.h"
51 #include "ull_sched_internal.h"
52 #include "ull_chan_internal.h"
53 #include "ull_conn_internal.h"
54 #include "ull_peripheral_internal.h"
55 #include "ull_central_internal.h"
56 
57 #include "ull_iso_internal.h"
58 #include "ull_conn_iso_internal.h"
59 #include "ull_peripheral_iso_internal.h"
60 
61 
62 #include "ll.h"
63 #include "ll_feat.h"
64 #include "ll_settings.h"
65 
66 #include "ll_sw/ull_llcp.h"
67 #include "ll_sw/ull_llcp_features.h"
68 
69 #include "hal/debug.h"
70 
71 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
72 #include <zephyr/logging/log.h>
73 LOG_MODULE_REGISTER(bt_ctlr_ull_conn);
74 
75 static int init_reset(void);
76 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
77 static void tx_demux_sched(struct ll_conn *conn);
78 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
79 static void tx_demux(void *param);
80 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx);
81 
82 static void ticker_update_conn_op_cb(uint32_t status, void *param);
83 static void ticker_stop_conn_op_cb(uint32_t status, void *param);
84 static void ticker_start_conn_op_cb(uint32_t status, void *param);
85 
86 static void conn_setup_adv_scan_disabled_cb(void *param);
87 static inline void disable(uint16_t handle);
88 static void conn_cleanup(struct ll_conn *conn, uint8_t reason);
89 static void conn_cleanup_finalize(struct ll_conn *conn);
90 static void tx_ull_flush(struct ll_conn *conn);
91 static void ticker_stop_op_cb(uint32_t status, void *param);
92 static void conn_disable(void *param);
93 static void disabled_cb(void *param);
94 static void tx_lll_flush(void *param);
95 
96 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
97 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx);
98 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
99 
100 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
101 /* Connection context pointer used as CPR mutex to serialize connection
102  * parameter requests procedures across simulataneous connections so that
103  * offsets exchanged to the peer do not get changed.
104  */
105 struct ll_conn *conn_upd_curr;
106 #endif /* defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) */
107 
108 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
109 static uint8_t force_md_cnt_calc(struct lll_conn *lll_conn, uint32_t tx_rate);
110 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
111 
112 #if !defined(BT_CTLR_USER_TX_BUFFER_OVERHEAD)
113 #define BT_CTLR_USER_TX_BUFFER_OVERHEAD 0
114 #endif /* BT_CTLR_USER_TX_BUFFER_OVERHEAD */
115 
116 #define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
117 				offsetof(struct pdu_data, lldata) + \
118 				(LL_LENGTH_OCTETS_TX_MAX + \
119 				BT_CTLR_USER_TX_BUFFER_OVERHEAD))
120 
121 #define CONN_DATA_BUFFERS CONFIG_BT_BUF_ACL_TX_COUNT
122 
123 static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx), CONN_DATA_BUFFERS);
124 static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx),
125 		    (CONN_DATA_BUFFERS +
126 		     LLCP_TX_CTRL_BUF_COUNT));
127 
128 static struct {
129 	void *free;
130 	uint8_t pool[CONN_TX_BUF_SIZE * CONN_DATA_BUFFERS];
131 } mem_conn_tx;
132 
133 static struct {
134 	void *free;
135 	uint8_t pool[sizeof(memq_link_t) *
136 		     (CONN_DATA_BUFFERS +
137 		      LLCP_TX_CTRL_BUF_COUNT)];
138 } mem_link_tx;
139 
140 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
141 static uint16_t default_tx_octets;
142 static uint16_t default_tx_time;
143 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
144 
145 #if defined(CONFIG_BT_CTLR_PHY)
146 static uint8_t default_phy_tx;
147 static uint8_t default_phy_rx;
148 #endif /* CONFIG_BT_CTLR_PHY */
149 
150 static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN];
151 static void *conn_free;
152 
ll_conn_acquire(void)153 struct ll_conn *ll_conn_acquire(void)
154 {
155 	return mem_acquire(&conn_free);
156 }
157 
ll_conn_release(struct ll_conn * conn)158 void ll_conn_release(struct ll_conn *conn)
159 {
160 	mem_release(conn, &conn_free);
161 }
162 
ll_conn_handle_get(struct ll_conn * conn)163 uint16_t ll_conn_handle_get(struct ll_conn *conn)
164 {
165 	return mem_index_get(conn, conn_pool, sizeof(struct ll_conn));
166 }
167 
ll_conn_get(uint16_t handle)168 struct ll_conn *ll_conn_get(uint16_t handle)
169 {
170 	return mem_get(conn_pool, sizeof(struct ll_conn), handle);
171 }
172 
ll_connected_get(uint16_t handle)173 struct ll_conn *ll_connected_get(uint16_t handle)
174 {
175 	struct ll_conn *conn;
176 
177 	if (handle >= CONFIG_BT_MAX_CONN) {
178 		return NULL;
179 	}
180 
181 	conn = ll_conn_get(handle);
182 	if (conn->lll.handle != handle) {
183 		return NULL;
184 	}
185 
186 	return conn;
187 }
188 
ll_conn_free_count_get(void)189 uint16_t ll_conn_free_count_get(void)
190 {
191 	return mem_free_count_get(conn_free);
192 }
193 
ll_tx_mem_acquire(void)194 void *ll_tx_mem_acquire(void)
195 {
196 	return mem_acquire(&mem_conn_tx.free);
197 }
198 
ll_tx_mem_release(void * tx)199 void ll_tx_mem_release(void *tx)
200 {
201 	mem_release(tx, &mem_conn_tx.free);
202 }
203 
ll_tx_mem_enqueue(uint16_t handle,void * tx)204 int ll_tx_mem_enqueue(uint16_t handle, void *tx)
205 {
206 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
207 #define BT_CTLR_THROUGHPUT_PERIOD 1000000000UL
208 	static uint32_t tx_rate;
209 	static uint32_t tx_cnt;
210 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
211 	struct lll_tx *lll_tx;
212 	struct ll_conn *conn;
213 	uint8_t idx;
214 
215 	conn = ll_connected_get(handle);
216 	if (!conn) {
217 		return -EINVAL;
218 	}
219 
220 	idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx);
221 	if (!lll_tx) {
222 		return -ENOBUFS;
223 	}
224 
225 	lll_tx->handle = handle;
226 	lll_tx->node = tx;
227 
228 	MFIFO_ENQUEUE(conn_tx, idx);
229 
230 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
231 	if (ull_ref_get(&conn->ull)) {
232 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
233 		if (tx_cnt >= CONFIG_BT_BUF_ACL_TX_COUNT) {
234 			uint8_t previous, force_md_cnt;
235 
236 			force_md_cnt = force_md_cnt_calc(&conn->lll, tx_rate);
237 			previous = lll_conn_force_md_cnt_set(force_md_cnt);
238 			if (previous != force_md_cnt) {
239 				LOG_INF("force_md_cnt: old= %u, new= %u.", previous, force_md_cnt);
240 			}
241 		}
242 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
243 
244 		tx_demux_sched(conn);
245 
246 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
247 	} else {
248 		lll_conn_force_md_cnt_set(0U);
249 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
250 	}
251 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
252 
253 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
254 		ull_periph_latency_cancel(conn, handle);
255 	}
256 
257 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
258 	static uint32_t last_cycle_stamp;
259 	static uint32_t tx_len;
260 	struct pdu_data *pdu;
261 	uint32_t cycle_stamp;
262 	uint64_t delta;
263 
264 	cycle_stamp = k_cycle_get_32();
265 	delta = k_cyc_to_ns_floor64(cycle_stamp - last_cycle_stamp);
266 	if (delta > BT_CTLR_THROUGHPUT_PERIOD) {
267 		LOG_INF("incoming Tx: count= %u, len= %u, rate= %u bps.", tx_cnt, tx_len, tx_rate);
268 
269 		last_cycle_stamp = cycle_stamp;
270 		tx_cnt = 0U;
271 		tx_len = 0U;
272 	}
273 
274 	pdu = (void *)((struct node_tx *)tx)->pdu;
275 	tx_len += pdu->len;
276 	tx_rate = ((uint64_t)tx_len << 3) * BT_CTLR_THROUGHPUT_PERIOD / delta;
277 	tx_cnt++;
278 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
279 
280 	return 0;
281 }
282 
ll_conn_update(uint16_t handle,uint8_t cmd,uint8_t status,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout,uint16_t * offset)283 uint8_t ll_conn_update(uint16_t handle, uint8_t cmd, uint8_t status, uint16_t interval_min,
284 		    uint16_t interval_max, uint16_t latency, uint16_t timeout, uint16_t *offset)
285 {
286 	struct ll_conn *conn;
287 
288 	conn = ll_connected_get(handle);
289 	if (!conn) {
290 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
291 	}
292 
293 	if (cmd == 0U) {
294 		uint8_t err;
295 
296 		err = ull_cp_conn_update(conn, interval_min, interval_max, latency, timeout,
297 					 offset);
298 		if (err) {
299 			return err;
300 		}
301 
302 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
303 		    conn->lll.role) {
304 			ull_periph_latency_cancel(conn, handle);
305 		}
306 	} else if (cmd == 2U) {
307 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
308 		if (status == 0U) {
309 			ull_cp_conn_param_req_reply(conn);
310 		} else {
311 			ull_cp_conn_param_req_neg_reply(conn, status);
312 		}
313 		return BT_HCI_ERR_SUCCESS;
314 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
315 		/* CPR feature not supported */
316 		return BT_HCI_ERR_CMD_DISALLOWED;
317 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
318 	} else {
319 		return BT_HCI_ERR_UNKNOWN_CMD;
320 	}
321 
322 	return 0;
323 }
324 
ll_chm_get(uint16_t handle,uint8_t * chm)325 uint8_t ll_chm_get(uint16_t handle, uint8_t *chm)
326 {
327 	struct ll_conn *conn;
328 
329 	conn = ll_connected_get(handle);
330 	if (!conn) {
331 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
332 	}
333 
334 	/*
335 	 * Core Spec 5.2 Vol4: 7.8.20:
336 	 * The HCI_LE_Read_Channel_Map command returns the current Channel_Map
337 	 * for the specified Connection_Handle. The returned value indicates the state of
338 	 * the Channel_Map specified by the last transmitted or received Channel_Map
339 	 * (in a CONNECT_IND or LL_CHANNEL_MAP_IND message) for the specified
340 	 * Connection_Handle, regardless of whether the Central has received an
341 	 * acknowledgment
342 	 */
343 	const uint8_t *pending_chm;
344 
345 	pending_chm = ull_cp_chan_map_update_pending(conn);
346 	if (pending_chm) {
347 		memcpy(chm, pending_chm, sizeof(conn->lll.data_chan_map));
348 	} else {
349 		memcpy(chm, conn->lll.data_chan_map, sizeof(conn->lll.data_chan_map));
350 	}
351 
352 	return 0;
353 }
354 
355 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
ll_req_peer_sca(uint16_t handle)356 uint8_t ll_req_peer_sca(uint16_t handle)
357 {
358 	struct ll_conn *conn;
359 
360 	conn = ll_connected_get(handle);
361 	if (!conn) {
362 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
363 	}
364 
365 	return ull_cp_req_peer_sca(conn);
366 }
367 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
368 
is_valid_disconnect_reason(uint8_t reason)369 static bool is_valid_disconnect_reason(uint8_t reason)
370 {
371 	switch (reason) {
372 	case BT_HCI_ERR_AUTH_FAIL:
373 	case BT_HCI_ERR_REMOTE_USER_TERM_CONN:
374 	case BT_HCI_ERR_REMOTE_LOW_RESOURCES:
375 	case BT_HCI_ERR_REMOTE_POWER_OFF:
376 	case BT_HCI_ERR_UNSUPP_REMOTE_FEATURE:
377 	case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
378 	case BT_HCI_ERR_UNACCEPT_CONN_PARAM:
379 		return true;
380 	default:
381 		return false;
382 	}
383 }
384 
ll_terminate_ind_send(uint16_t handle,uint8_t reason)385 uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
386 {
387 	struct ll_conn *conn;
388 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
389 	struct ll_conn_iso_stream *cis;
390 #endif
391 
392 	if (IS_ACL_HANDLE(handle)) {
393 		conn = ll_connected_get(handle);
394 
395 		/* Is conn still connected? */
396 		if (!conn) {
397 			return BT_HCI_ERR_CMD_DISALLOWED;
398 		}
399 
400 		if (!is_valid_disconnect_reason(reason)) {
401 			return BT_HCI_ERR_INVALID_PARAM;
402 		}
403 
404 		uint8_t err;
405 
406 		err = ull_cp_terminate(conn, reason);
407 		if (err) {
408 			return err;
409 		}
410 
411 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
412 			ull_periph_latency_cancel(conn, handle);
413 		}
414 		return 0;
415 	}
416 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
417 	if (IS_CIS_HANDLE(handle)) {
418 		cis = ll_iso_stream_connected_get(handle);
419 		if (!cis) {
420 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
421 			/* CIS is not connected - get the unconnected instance */
422 			cis = ll_conn_iso_stream_get(handle);
423 
424 			/* Sanity-check instance to make sure it's created but not connected */
425 			if (cis->group && cis->lll.handle == handle && !cis->established) {
426 				if (cis->group->state == CIG_STATE_CONFIGURABLE) {
427 					/* Disallow if CIG is still in configurable state */
428 					return BT_HCI_ERR_CMD_DISALLOWED;
429 
430 				} else if (cis->group->state == CIG_STATE_INITIATING) {
431 					conn = ll_connected_get(cis->lll.acl_handle);
432 
433 					/* CIS is not yet established - try to cancel procedure */
434 					if (ull_cp_cc_cancel(conn)) {
435 						/* Successfully canceled - complete disconnect */
436 						struct node_rx_pdu *node_terminate;
437 
438 						node_terminate = ull_pdu_rx_alloc();
439 						LL_ASSERT(node_terminate);
440 
441 						node_terminate->hdr.handle = handle;
442 						node_terminate->hdr.type = NODE_RX_TYPE_TERMINATE;
443 						*((uint8_t *)node_terminate->pdu) =
444 							BT_HCI_ERR_LOCALHOST_TERM_CONN;
445 
446 						ll_rx_put_sched(node_terminate->hdr.link,
447 							node_terminate);
448 
449 						/* We're no longer initiating a connection */
450 						cis->group->state = CIG_STATE_CONFIGURABLE;
451 
452 						/* This is now a successful disconnection */
453 						return BT_HCI_ERR_SUCCESS;
454 					}
455 
456 					/* Procedure could not be canceled in the current
457 					 * state - let it run its course and enqueue a
458 					 * terminate procedure.
459 					 */
460 					return ull_cp_cis_terminate(conn, cis, reason);
461 				}
462 			}
463 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
464 			/* Disallow if CIS is not connected */
465 			return BT_HCI_ERR_CMD_DISALLOWED;
466 		}
467 
468 		conn = ll_connected_get(cis->lll.acl_handle);
469 		/* Disallow if ACL has disconnected */
470 		if (!conn) {
471 			return BT_HCI_ERR_CMD_DISALLOWED;
472 		}
473 
474 		return ull_cp_cis_terminate(conn, cis, reason);
475 	}
476 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
477 
478 	return BT_HCI_ERR_UNKNOWN_CONN_ID;
479 }
480 
481 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
ll_feature_req_send(uint16_t handle)482 uint8_t ll_feature_req_send(uint16_t handle)
483 {
484 	struct ll_conn *conn;
485 
486 	conn = ll_connected_get(handle);
487 	if (!conn) {
488 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
489 	}
490 
491 	uint8_t err;
492 
493 	err = ull_cp_feature_exchange(conn, 1U);
494 	if (err) {
495 		return err;
496 	}
497 
498 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
499 	    IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) &&
500 	    conn->lll.role) {
501 		ull_periph_latency_cancel(conn, handle);
502 	}
503 
504 	return 0;
505 }
506 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
507 
ll_version_ind_send(uint16_t handle)508 uint8_t ll_version_ind_send(uint16_t handle)
509 {
510 	struct ll_conn *conn;
511 
512 	conn = ll_connected_get(handle);
513 	if (!conn) {
514 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
515 	}
516 
517 	uint8_t err;
518 
519 	err = ull_cp_version_exchange(conn);
520 	if (err) {
521 		return err;
522 	}
523 
524 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
525 		ull_periph_latency_cancel(conn, handle);
526 	}
527 
528 	return 0;
529 }
530 
531 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ll_len_validate(uint16_t tx_octets,uint16_t tx_time)532 static bool ll_len_validate(uint16_t tx_octets, uint16_t tx_time)
533 {
534 	/* validate if within HCI allowed range */
535 	if (!IN_RANGE(tx_octets, PDU_DC_PAYLOAD_SIZE_MIN,
536 		      PDU_DC_PAYLOAD_SIZE_MAX)) {
537 		return false;
538 	}
539 
540 	/* validate if within HCI allowed range */
541 	if (!IN_RANGE(tx_time, PDU_DC_PAYLOAD_TIME_MIN,
542 		      PDU_DC_PAYLOAD_TIME_MAX_CODED)) {
543 		return false;
544 	}
545 
546 	return true;
547 }
548 
ll_length_req_send(uint16_t handle,uint16_t tx_octets,uint16_t tx_time)549 uint32_t ll_length_req_send(uint16_t handle, uint16_t tx_octets,
550 			    uint16_t tx_time)
551 {
552 	struct ll_conn *conn;
553 
554 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
555 	    !ll_len_validate(tx_octets, tx_time)) {
556 		return BT_HCI_ERR_INVALID_PARAM;
557 	}
558 
559 	conn = ll_connected_get(handle);
560 	if (!conn) {
561 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
562 	}
563 
564 	if (!feature_dle(conn)) {
565 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
566 	}
567 
568 	uint8_t err;
569 
570 	err = ull_cp_data_length_update(conn, tx_octets, tx_time);
571 	if (err) {
572 		return err;
573 	}
574 
575 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
576 		ull_periph_latency_cancel(conn, handle);
577 	}
578 
579 	return 0;
580 }
581 
ll_length_default_get(uint16_t * max_tx_octets,uint16_t * max_tx_time)582 void ll_length_default_get(uint16_t *max_tx_octets, uint16_t *max_tx_time)
583 {
584 	*max_tx_octets = default_tx_octets;
585 	*max_tx_time = default_tx_time;
586 }
587 
ll_length_default_set(uint16_t max_tx_octets,uint16_t max_tx_time)588 uint32_t ll_length_default_set(uint16_t max_tx_octets, uint16_t max_tx_time)
589 {
590 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
591 	    !ll_len_validate(max_tx_octets, max_tx_time)) {
592 		return BT_HCI_ERR_INVALID_PARAM;
593 	}
594 
595 	default_tx_octets = max_tx_octets;
596 	default_tx_time = max_tx_time;
597 
598 	return 0;
599 }
600 
ll_length_max_get(uint16_t * max_tx_octets,uint16_t * max_tx_time,uint16_t * max_rx_octets,uint16_t * max_rx_time)601 void ll_length_max_get(uint16_t *max_tx_octets, uint16_t *max_tx_time,
602 		       uint16_t *max_rx_octets, uint16_t *max_rx_time)
603 {
604 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_PHY_CODED)
605 #define PHY (PHY_CODED)
606 #else /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
607 #define PHY (PHY_1M)
608 #endif /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
609 	*max_tx_octets = LL_LENGTH_OCTETS_RX_MAX;
610 	*max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
611 	*max_tx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
612 	*max_rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
613 #undef PHY
614 }
615 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
616 
617 #if defined(CONFIG_BT_CTLR_PHY)
ll_phy_get(uint16_t handle,uint8_t * tx,uint8_t * rx)618 uint8_t ll_phy_get(uint16_t handle, uint8_t *tx, uint8_t *rx)
619 {
620 	struct ll_conn *conn;
621 
622 	conn = ll_connected_get(handle);
623 	if (!conn) {
624 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
625 	}
626 
627 	/* TODO: context safe read */
628 	*tx = conn->lll.phy_tx;
629 	*rx = conn->lll.phy_rx;
630 
631 	return 0;
632 }
633 
ll_phy_default_set(uint8_t tx,uint8_t rx)634 uint8_t ll_phy_default_set(uint8_t tx, uint8_t rx)
635 {
636 	/* TODO: validate against supported phy */
637 
638 	default_phy_tx = tx;
639 	default_phy_rx = rx;
640 
641 	return 0;
642 }
643 
ll_phy_req_send(uint16_t handle,uint8_t tx,uint8_t flags,uint8_t rx)644 uint8_t ll_phy_req_send(uint16_t handle, uint8_t tx, uint8_t flags, uint8_t rx)
645 {
646 	struct ll_conn *conn;
647 
648 	conn = ll_connected_get(handle);
649 	if (!conn) {
650 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
651 	}
652 
653 	if (!feature_phy_2m(conn) && !feature_phy_coded(conn)) {
654 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
655 	}
656 
657 	uint8_t err;
658 
659 	err = ull_cp_phy_update(conn, tx, flags, rx, 1U);
660 	if (err) {
661 		return err;
662 	}
663 
664 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
665 		ull_periph_latency_cancel(conn, handle);
666 	}
667 
668 	return 0;
669 }
670 #endif /* CONFIG_BT_CTLR_PHY */
671 
672 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
ll_rssi_get(uint16_t handle,uint8_t * rssi)673 uint8_t ll_rssi_get(uint16_t handle, uint8_t *rssi)
674 {
675 	struct ll_conn *conn;
676 
677 	conn = ll_connected_get(handle);
678 	if (!conn) {
679 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
680 	}
681 
682 	*rssi = conn->lll.rssi_latest;
683 
684 	return 0;
685 }
686 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
687 
688 #if defined(CONFIG_BT_CTLR_LE_PING)
ll_apto_get(uint16_t handle,uint16_t * apto)689 uint8_t ll_apto_get(uint16_t handle, uint16_t *apto)
690 {
691 	struct ll_conn *conn;
692 
693 	conn = ll_connected_get(handle);
694 	if (!conn) {
695 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
696 	}
697 
698 	*apto = conn->apto_reload * conn->lll.interval * 125U / 1000;
699 
700 	return 0;
701 }
702 
ll_apto_set(uint16_t handle,uint16_t apto)703 uint8_t ll_apto_set(uint16_t handle, uint16_t apto)
704 {
705 	struct ll_conn *conn;
706 
707 	conn = ll_connected_get(handle);
708 	if (!conn) {
709 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
710 	}
711 
712 	conn->apto_reload = RADIO_CONN_EVENTS(apto * 10U * 1000U,
713 					      conn->lll.interval *
714 					      CONN_INT_UNIT_US);
715 
716 	return 0;
717 }
718 #endif /* CONFIG_BT_CTLR_LE_PING */
719 
ull_conn_init(void)720 int ull_conn_init(void)
721 {
722 	int err;
723 
724 	err = init_reset();
725 	if (err) {
726 		return err;
727 	}
728 
729 	return 0;
730 }
731 
ull_conn_reset(void)732 int ull_conn_reset(void)
733 {
734 	uint16_t handle;
735 	int err;
736 
737 #if defined(CONFIG_BT_CENTRAL)
738 	/* Reset initiator */
739 	(void)ull_central_reset();
740 #endif /* CONFIG_BT_CENTRAL */
741 
742 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
743 		disable(handle);
744 	}
745 
746 	/* Re-initialize the Tx mfifo */
747 	MFIFO_INIT(conn_tx);
748 
749 	/* Re-initialize the Tx Ack mfifo */
750 	MFIFO_INIT(conn_ack);
751 
752 	err = init_reset();
753 	if (err) {
754 		return err;
755 	}
756 
757 	return 0;
758 }
759 
ull_conn_lll_get(uint16_t handle)760 struct lll_conn *ull_conn_lll_get(uint16_t handle)
761 {
762 	struct ll_conn *conn;
763 
764 	conn = ll_conn_get(handle);
765 
766 	return &conn->lll;
767 }
768 
769 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_conn_default_tx_octets_get(void)770 uint16_t ull_conn_default_tx_octets_get(void)
771 {
772 	return default_tx_octets;
773 }
774 
775 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_tx_time_get(void)776 uint16_t ull_conn_default_tx_time_get(void)
777 {
778 	return default_tx_time;
779 }
780 #endif /* CONFIG_BT_CTLR_PHY */
781 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
782 
783 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_phy_tx_get(void)784 uint8_t ull_conn_default_phy_tx_get(void)
785 {
786 	return default_phy_tx;
787 }
788 
ull_conn_default_phy_rx_get(void)789 uint8_t ull_conn_default_phy_rx_get(void)
790 {
791 	return default_phy_rx;
792 }
793 #endif /* CONFIG_BT_CTLR_PHY */
794 
795 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
ull_conn_peer_connected(uint8_t const own_id_addr_type,uint8_t const * const own_id_addr,uint8_t const peer_id_addr_type,uint8_t const * const peer_id_addr)796 bool ull_conn_peer_connected(uint8_t const own_id_addr_type,
797 			     uint8_t const *const own_id_addr,
798 			     uint8_t const peer_id_addr_type,
799 			     uint8_t const *const peer_id_addr)
800 {
801 	uint16_t handle;
802 
803 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
804 		struct ll_conn *conn = ll_connected_get(handle);
805 
806 		if (conn &&
807 		    conn->peer_id_addr_type == peer_id_addr_type &&
808 		    !memcmp(conn->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
809 		    conn->own_id_addr_type == own_id_addr_type &&
810 		    !memcmp(conn->own_id_addr, own_id_addr, BDADDR_SIZE)) {
811 			return true;
812 		}
813 	}
814 
815 	return false;
816 }
817 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
818 
ull_conn_setup(memq_link_t * rx_link,struct node_rx_hdr * rx)819 void ull_conn_setup(memq_link_t *rx_link, struct node_rx_hdr *rx)
820 {
821 	struct node_rx_ftr *ftr;
822 	struct ull_hdr *hdr;
823 
824 	/* Store the link in the node rx so that when done event is
825 	 * processed it can be used to enqueue node rx towards LL context
826 	 */
827 	rx->link = rx_link;
828 
829 	/* NOTE: LLL conn context SHALL be after lll_hdr in
830 	 *       struct lll_adv and struct lll_scan.
831 	 */
832 	ftr = &(rx->rx_ftr);
833 
834 	/* Check for reference count and decide to setup connection
835 	 * here or when done event arrives.
836 	 */
837 	hdr = HDR_LLL2ULL(ftr->param);
838 	if (ull_ref_get(hdr)) {
839 		/* Setup connection in ULL disabled callback,
840 		 * pass the node rx as disabled callback parameter.
841 		 */
842 		LL_ASSERT(!hdr->disabled_cb);
843 		hdr->disabled_param = rx;
844 		hdr->disabled_cb = conn_setup_adv_scan_disabled_cb;
845 	} else {
846 		conn_setup_adv_scan_disabled_cb(rx);
847 	}
848 }
849 
ull_conn_rx(memq_link_t * link,struct node_rx_pdu ** rx)850 void ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
851 {
852 	struct pdu_data *pdu_rx;
853 	struct ll_conn *conn;
854 
855 	conn = ll_connected_get((*rx)->hdr.handle);
856 	if (!conn) {
857 		/* Mark for buffer for release */
858 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
859 
860 		return;
861 	}
862 
863 	ull_cp_tx_ntf(conn);
864 
865 	pdu_rx = (void *)(*rx)->pdu;
866 
867 	switch (pdu_rx->ll_id) {
868 	case PDU_DATA_LLID_CTRL:
869 	{
870 		/* Mark buffer for release */
871 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
872 
873 		ull_cp_rx(conn, link, *rx);
874 
875 		return;
876 	}
877 
878 	case PDU_DATA_LLID_DATA_CONTINUE:
879 	case PDU_DATA_LLID_DATA_START:
880 #if defined(CONFIG_BT_CTLR_LE_ENC)
881 		if (conn->pause_rx_data) {
882 			conn->llcp_terminate.reason_final =
883 				BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
884 
885 			/* Mark for buffer for release */
886 			(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
887 		}
888 #endif /* CONFIG_BT_CTLR_LE_ENC */
889 		break;
890 
891 	case PDU_DATA_LLID_RESV:
892 	default:
893 #if defined(CONFIG_BT_CTLR_LE_ENC)
894 		if (conn->pause_rx_data) {
895 			conn->llcp_terminate.reason_final =
896 				BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
897 		}
898 #endif /* CONFIG_BT_CTLR_LE_ENC */
899 
900 		/* Invalid LL id, drop it. */
901 
902 		/* Mark for buffer for release */
903 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
904 
905 		break;
906 	}
907 }
908 
ull_conn_llcp(struct ll_conn * conn,uint32_t ticks_at_expire,uint32_t remainder,uint16_t lazy)909 int ull_conn_llcp(struct ll_conn *conn, uint32_t ticks_at_expire,
910 		  uint32_t remainder, uint16_t lazy)
911 {
912 	LL_ASSERT(conn->lll.handle != LLL_HANDLE_INVALID);
913 
914 	conn->llcp.prep.ticks_at_expire = ticks_at_expire;
915 	conn->llcp.prep.remainder = remainder;
916 	conn->llcp.prep.lazy = lazy;
917 
918 	ull_cp_run(conn);
919 
920 	if (conn->cancel_prepare) {
921 		/* Reset signal */
922 		conn->cancel_prepare = 0U;
923 
924 		/* Cancel prepare */
925 		return -ECANCELED;
926 	}
927 
928 	/* Continue prepare */
929 	return 0;
930 }
931 
ull_conn_done(struct node_rx_event_done * done)932 void ull_conn_done(struct node_rx_event_done *done)
933 {
934 	uint32_t ticks_drift_minus;
935 	uint32_t ticks_drift_plus;
936 	uint32_t ticks_slot_minus;
937 	uint32_t ticks_slot_plus;
938 	uint16_t latency_event;
939 	uint16_t elapsed_event;
940 	struct lll_conn *lll;
941 	struct ll_conn *conn;
942 	uint8_t reason_final;
943 	uint16_t lazy;
944 	uint8_t force;
945 
946 	/* Get reference to ULL context */
947 	conn = CONTAINER_OF(done->param, struct ll_conn, ull);
948 	lll = &conn->lll;
949 
950 	/* Skip if connection terminated by local host */
951 	if (unlikely(lll->handle == LLL_HANDLE_INVALID)) {
952 		return;
953 	}
954 
955 	ull_cp_tx_ntf(conn);
956 
957 #if defined(CONFIG_BT_CTLR_LE_ENC)
958 	/* Check authenticated payload expiry or MIC failure */
959 	switch (done->extra.mic_state) {
960 	case LLL_CONN_MIC_NONE:
961 #if defined(CONFIG_BT_CTLR_LE_PING)
962 		if (lll->enc_rx && lll->enc_tx) {
963 			uint16_t appto_reload_new;
964 
965 			/* check for change in apto */
966 			appto_reload_new = (conn->apto_reload >
967 					    (lll->latency + 6)) ?
968 					   (conn->apto_reload -
969 					    (lll->latency + 6)) :
970 					   conn->apto_reload;
971 			if (conn->appto_reload != appto_reload_new) {
972 				conn->appto_reload = appto_reload_new;
973 				conn->apto_expire = 0U;
974 			}
975 
976 			/* start authenticated payload (pre) timeout */
977 			if (conn->apto_expire == 0U) {
978 				conn->appto_expire = conn->appto_reload;
979 				conn->apto_expire = conn->apto_reload;
980 			}
981 		}
982 #endif /* CONFIG_BT_CTLR_LE_PING */
983 		break;
984 
985 	case LLL_CONN_MIC_PASS:
986 #if defined(CONFIG_BT_CTLR_LE_PING)
987 		conn->appto_expire = conn->apto_expire = 0U;
988 #endif /* CONFIG_BT_CTLR_LE_PING */
989 		break;
990 
991 	case LLL_CONN_MIC_FAIL:
992 		conn->llcp_terminate.reason_final =
993 			BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
994 		break;
995 	}
996 #endif /* CONFIG_BT_CTLR_LE_ENC */
997 
998 	reason_final = conn->llcp_terminate.reason_final;
999 	if (reason_final) {
1000 		conn_cleanup(conn, reason_final);
1001 
1002 		return;
1003 	}
1004 
1005 	/* Events elapsed used in timeout checks below */
1006 #if defined(CONFIG_BT_CTLR_CONN_META)
1007 	/* If event has shallow expiry do not add latency, but rely on
1008 	 * accumulated lazy count.
1009 	 */
1010 	latency_event = conn->common.is_must_expire ? 0 : lll->latency_event;
1011 #else
1012 	latency_event = lll->latency_event;
1013 #endif
1014 	if (lll->latency_prepare) {
1015 		elapsed_event = latency_event + lll->latency_prepare;
1016 	} else {
1017 		elapsed_event = latency_event + 1U;
1018 	}
1019 
1020 	/* Peripheral drift compensation calc and new latency or
1021 	 * central terminate acked
1022 	 */
1023 	ticks_drift_plus = 0U;
1024 	ticks_drift_minus = 0U;
1025 	ticks_slot_plus = 0U;
1026 	ticks_slot_minus = 0U;
1027 
1028 	if (done->extra.trx_cnt) {
1029 		if (0) {
1030 #if defined(CONFIG_BT_PERIPHERAL)
1031 		} else if (lll->role) {
1032 			ull_drift_ticks_get(done, &ticks_drift_plus,
1033 					    &ticks_drift_minus);
1034 
1035 			if (!ull_tx_q_peek(&conn->tx_q)) {
1036 				ull_conn_tx_demux(UINT8_MAX);
1037 			}
1038 
1039 			if (ull_tx_q_peek(&conn->tx_q) ||
1040 			    memq_peek(lll->memq_tx.head,
1041 				      lll->memq_tx.tail, NULL)) {
1042 				lll->latency_event = 0U;
1043 			} else if (lll->periph.latency_enabled) {
1044 				lll->latency_event = lll->latency;
1045 			}
1046 #endif /* CONFIG_BT_PERIPHERAL */
1047 		}
1048 
1049 		/* Reset connection failed to establish countdown */
1050 		conn->connect_expire = 0U;
1051 	}
1052 
1053 	/* Reset supervision countdown */
1054 	if (done->extra.crc_valid) {
1055 		conn->supervision_expire = 0U;
1056 	}
1057 
1058 	/* check connection failed to establish */
1059 	else if (conn->connect_expire) {
1060 		if (conn->connect_expire > elapsed_event) {
1061 			conn->connect_expire -= elapsed_event;
1062 		} else {
1063 			conn_cleanup(conn, BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
1064 
1065 			return;
1066 		}
1067 	}
1068 
1069 	/* if anchor point not sync-ed, start supervision timeout, and break
1070 	 * latency if any.
1071 	 */
1072 	else {
1073 		/* Start supervision timeout, if not started already */
1074 		if (!conn->supervision_expire) {
1075 			const uint32_t conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
1076 
1077 			conn->supervision_expire = RADIO_CONN_EVENTS(
1078 				(conn->supervision_timeout * 10U * 1000U),
1079 				conn_interval_us);
1080 		}
1081 	}
1082 
1083 	/* check supervision timeout */
1084 	force = 0U;
1085 	if (conn->supervision_expire) {
1086 		if (conn->supervision_expire > elapsed_event) {
1087 			conn->supervision_expire -= elapsed_event;
1088 
1089 			/* break latency */
1090 			lll->latency_event = 0U;
1091 
1092 			/* Force both central and peripheral when close to
1093 			 * supervision timeout.
1094 			 */
1095 			if (conn->supervision_expire <= 6U) {
1096 				force = 1U;
1097 			}
1098 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
1099 			/* use randomness to force peripheral role when anchor
1100 			 * points are being missed.
1101 			 */
1102 			else if (lll->role) {
1103 				if (latency_event) {
1104 					force = 1U;
1105 				} else {
1106 					force = conn->periph.force & 0x01;
1107 
1108 					/* rotate force bits */
1109 					conn->periph.force >>= 1U;
1110 					if (force) {
1111 						conn->periph.force |= BIT(31);
1112 					}
1113 				}
1114 			}
1115 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
1116 		} else {
1117 			conn_cleanup(conn, BT_HCI_ERR_CONN_TIMEOUT);
1118 
1119 			return;
1120 		}
1121 	}
1122 
1123 	/* check procedure timeout */
1124 	uint8_t error_code;
1125 
1126 	if (-ETIMEDOUT == ull_cp_prt_elapse(conn, elapsed_event, &error_code)) {
1127 		conn_cleanup(conn, error_code);
1128 
1129 		return;
1130 	}
1131 
1132 #if defined(CONFIG_BT_CTLR_LE_PING)
1133 	/* check apto */
1134 	if (conn->apto_expire != 0U) {
1135 		if (conn->apto_expire > elapsed_event) {
1136 			conn->apto_expire -= elapsed_event;
1137 		} else {
1138 			struct node_rx_hdr *rx;
1139 
1140 			rx = ll_pdu_rx_alloc();
1141 			if (rx) {
1142 				conn->apto_expire = 0U;
1143 
1144 				rx->handle = lll->handle;
1145 				rx->type = NODE_RX_TYPE_APTO;
1146 
1147 				/* enqueue apto event into rx queue */
1148 				ll_rx_put_sched(rx->link, rx);
1149 			} else {
1150 				conn->apto_expire = 1U;
1151 			}
1152 		}
1153 	}
1154 
1155 	/* check appto */
1156 	if (conn->appto_expire != 0U) {
1157 		if (conn->appto_expire > elapsed_event) {
1158 			conn->appto_expire -= elapsed_event;
1159 		} else {
1160 			conn->appto_expire = 0U;
1161 
1162 			/* Initiate LE_PING procedure */
1163 			ull_cp_le_ping(conn);
1164 		}
1165 	}
1166 #endif /* CONFIG_BT_CTLR_LE_PING */
1167 
1168 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1169 	/* Check if the CTE_REQ procedure is periodic and counter has been started.
1170 	 * req_expire is set when: new CTE_REQ is started, after completion of last periodic run.
1171 	 */
1172 	if (conn->llcp.cte_req.req_interval != 0U && conn->llcp.cte_req.req_expire != 0U) {
1173 		if (conn->llcp.cte_req.req_expire > elapsed_event) {
1174 			conn->llcp.cte_req.req_expire -= elapsed_event;
1175 		} else {
1176 			uint8_t err;
1177 
1178 			/* Set req_expire to zero to mark that new periodic CTE_REQ was started.
1179 			 * The counter is re-started after completion of this run.
1180 			 */
1181 			conn->llcp.cte_req.req_expire = 0U;
1182 
1183 			err = ull_cp_cte_req(conn, conn->llcp.cte_req.min_cte_len,
1184 					     conn->llcp.cte_req.cte_type);
1185 
1186 			if (err == BT_HCI_ERR_CMD_DISALLOWED) {
1187 				/* Conditions has changed e.g. PHY was changed to CODED.
1188 				 * New CTE REQ is not possible. Disable the periodic requests.
1189 				 */
1190 				ull_cp_cte_req_set_disable(conn);
1191 			}
1192 		}
1193 	}
1194 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1195 
1196 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1197 	/* generate RSSI event */
1198 	if (lll->rssi_sample_count == 0U) {
1199 		struct node_rx_pdu *rx;
1200 		struct pdu_data *pdu_data_rx;
1201 
1202 		rx = ll_pdu_rx_alloc();
1203 		if (rx) {
1204 			lll->rssi_reported = lll->rssi_latest;
1205 			lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT;
1206 
1207 			/* Prepare the rx packet structure */
1208 			rx->hdr.handle = lll->handle;
1209 			rx->hdr.type = NODE_RX_TYPE_RSSI;
1210 
1211 			/* prepare connection RSSI structure */
1212 			pdu_data_rx = (void *)rx->pdu;
1213 			pdu_data_rx->rssi = lll->rssi_reported;
1214 
1215 			/* enqueue connection RSSI structure into queue */
1216 			ll_rx_put_sched(rx->hdr.link, rx);
1217 		}
1218 	}
1219 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1220 
1221 	/* check if latency needs update */
1222 	lazy = 0U;
1223 	if ((force) || (latency_event != lll->latency_event)) {
1224 		lazy = lll->latency_event + 1U;
1225 	}
1226 
1227 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
1228 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) || defined(CONFIG_BT_CTLR_PHY)
1229 	if (lll->evt_len_upd) {
1230 		uint32_t ready_delay, rx_time, tx_time, ticks_slot, slot_us;
1231 
1232 		lll->evt_len_upd = 0;
1233 #if defined(CONFIG_BT_CTLR_PHY)
1234 		ready_delay = (lll->role) ?
1235 			lll_radio_rx_ready_delay_get(lll->phy_rx, PHY_FLAGS_S8) :
1236 			lll_radio_tx_ready_delay_get(lll->phy_tx, lll->phy_flags);
1237 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1238 		tx_time = lll->dle.eff.max_tx_time;
1239 		rx_time = lll->dle.eff.max_rx_time;
1240 #else /* CONFIG_BT_CTLR_DATA_LENGTH */
1241 
1242 		tx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1243 			      PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
1244 		rx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1245 			      PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
1246 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1247 #else /* CONFIG_BT_CTLR_PHY */
1248 		ready_delay = (lll->role) ?
1249 			lll_radio_rx_ready_delay_get(0, 0) :
1250 			lll_radio_tx_ready_delay_get(0, 0);
1251 		tx_time = PDU_DC_MAX_US(lll->dle.eff.max_tx_octets, 0);
1252 		rx_time = PDU_DC_MAX_US(lll->dle.eff.max_rx_octets, 0);
1253 #endif /* CONFIG_BT_CTLR_PHY */
1254 
1255 		/* Calculate event time reservation */
1256 		slot_us = tx_time + rx_time;
1257 		slot_us += EVENT_IFS_US + (EVENT_CLOCK_JITTER_US << 1);
1258 		slot_us += ready_delay;
1259 
1260 		if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX) ||
1261 		    !conn->lll.role) {
1262 			slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1263 		}
1264 
1265 		ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1266 		if (ticks_slot > conn->ull.ticks_slot) {
1267 			ticks_slot_plus = ticks_slot - conn->ull.ticks_slot;
1268 		} else {
1269 			ticks_slot_minus = conn->ull.ticks_slot - ticks_slot;
1270 		}
1271 		conn->ull.ticks_slot = ticks_slot;
1272 	}
1273 #endif /* CONFIG_BT_CTLR_DATA_LENGTH || CONFIG_BT_CTLR_PHY */
1274 #else /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1275 	ticks_slot_plus = 0;
1276 	ticks_slot_minus = 0;
1277 #endif /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1278 
1279 	/* update conn ticker */
1280 	if (ticks_drift_plus || ticks_drift_minus ||
1281 	    ticks_slot_plus || ticks_slot_minus ||
1282 	    lazy || force) {
1283 		uint8_t ticker_id = TICKER_ID_CONN_BASE + lll->handle;
1284 		struct ll_conn *conn_ll = lll->hdr.parent;
1285 		uint32_t ticker_status;
1286 
1287 		/* Call to ticker_update can fail under the race
1288 		 * condition where in the peripheral role is being stopped but
1289 		 * at the same time it is preempted by peripheral event that
1290 		 * gets into close state. Accept failure when peripheral role
1291 		 * is being stopped.
1292 		 */
1293 		ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
1294 					      TICKER_USER_ID_ULL_HIGH,
1295 					      ticker_id,
1296 					      ticks_drift_plus, ticks_drift_minus,
1297 					      ticks_slot_plus, ticks_slot_minus,
1298 					      lazy, force,
1299 					      ticker_update_conn_op_cb,
1300 					      conn_ll);
1301 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1302 			  (ticker_status == TICKER_STATUS_BUSY) ||
1303 			  ((void *)conn_ll == ull_disable_mark_get()));
1304 	}
1305 }
1306 
1307 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
ull_conn_lll_tx_demux_sched(struct lll_conn * lll)1308 void ull_conn_lll_tx_demux_sched(struct lll_conn *lll)
1309 {
1310 	static memq_link_t link;
1311 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1312 
1313 	mfy.param = HDR_LLL2ULL(lll);
1314 
1315 	mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1U, &mfy);
1316 }
1317 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
1318 
ull_conn_tx_demux(uint8_t count)1319 void ull_conn_tx_demux(uint8_t count)
1320 {
1321 	do {
1322 		struct lll_tx *lll_tx;
1323 		struct ll_conn *conn;
1324 
1325 		lll_tx = MFIFO_DEQUEUE_GET(conn_tx);
1326 		if (!lll_tx) {
1327 			break;
1328 		}
1329 
1330 		conn = ll_connected_get(lll_tx->handle);
1331 		if (conn) {
1332 			struct node_tx *tx = lll_tx->node;
1333 
1334 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1335 			if (empty_data_start_release(conn, tx)) {
1336 				goto ull_conn_tx_demux_release;
1337 			}
1338 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1339 
1340 			ull_tx_q_enqueue_data(&conn->tx_q, tx);
1341 		} else {
1342 			struct node_tx *tx = lll_tx->node;
1343 			struct pdu_data *p = (void *)tx->pdu;
1344 
1345 			p->ll_id = PDU_DATA_LLID_RESV;
1346 			ll_tx_ack_put(LLL_HANDLE_INVALID, tx);
1347 		}
1348 
1349 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1350 ull_conn_tx_demux_release:
1351 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1352 
1353 		MFIFO_DEQUEUE(conn_tx);
1354 	} while (--count);
1355 }
1356 
ull_conn_tx_lll_enqueue(struct ll_conn * conn,uint8_t count)1357 void ull_conn_tx_lll_enqueue(struct ll_conn *conn, uint8_t count)
1358 {
1359 	while (count--) {
1360 		struct node_tx *tx;
1361 		memq_link_t *link;
1362 
1363 		tx = tx_ull_dequeue(conn, NULL);
1364 		if (!tx) {
1365 			/* No more tx nodes available */
1366 			break;
1367 		}
1368 
1369 		link = mem_acquire(&mem_link_tx.free);
1370 		LL_ASSERT(link);
1371 
1372 		/* Enqueue towards LLL */
1373 		memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1374 	}
1375 }
1376 
ull_conn_link_tx_release(void * link)1377 void ull_conn_link_tx_release(void *link)
1378 {
1379 	mem_release(link, &mem_link_tx.free);
1380 }
1381 
ull_conn_ack_last_idx_get(void)1382 uint8_t ull_conn_ack_last_idx_get(void)
1383 {
1384 	return mfifo_conn_ack.l;
1385 }
1386 
ull_conn_ack_peek(uint8_t * ack_last,uint16_t * handle,struct node_tx ** tx)1387 memq_link_t *ull_conn_ack_peek(uint8_t *ack_last, uint16_t *handle,
1388 			       struct node_tx **tx)
1389 {
1390 	struct lll_tx *lll_tx;
1391 
1392 	lll_tx = MFIFO_DEQUEUE_GET(conn_ack);
1393 	if (!lll_tx) {
1394 		return NULL;
1395 	}
1396 
1397 	*ack_last = mfifo_conn_ack.l;
1398 
1399 	*handle = lll_tx->handle;
1400 	*tx = lll_tx->node;
1401 
1402 	return (*tx)->link;
1403 }
1404 
ull_conn_ack_by_last_peek(uint8_t last,uint16_t * handle,struct node_tx ** tx)1405 memq_link_t *ull_conn_ack_by_last_peek(uint8_t last, uint16_t *handle,
1406 				       struct node_tx **tx)
1407 {
1408 	struct lll_tx *lll_tx;
1409 
1410 	lll_tx = mfifo_dequeue_get(mfifo_conn_ack.m, mfifo_conn_ack.s,
1411 				   mfifo_conn_ack.f, last);
1412 	if (!lll_tx) {
1413 		return NULL;
1414 	}
1415 
1416 	*handle = lll_tx->handle;
1417 	*tx = lll_tx->node;
1418 
1419 	return (*tx)->link;
1420 }
1421 
ull_conn_ack_dequeue(void)1422 void *ull_conn_ack_dequeue(void)
1423 {
1424 	return MFIFO_DEQUEUE(conn_ack);
1425 }
1426 
ull_conn_lll_ack_enqueue(uint16_t handle,struct node_tx * tx)1427 void ull_conn_lll_ack_enqueue(uint16_t handle, struct node_tx *tx)
1428 {
1429 	struct lll_tx *lll_tx;
1430 	uint8_t idx;
1431 
1432 	idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&lll_tx);
1433 	LL_ASSERT(lll_tx);
1434 
1435 	lll_tx->handle = handle;
1436 	lll_tx->node = tx;
1437 
1438 	MFIFO_ENQUEUE(conn_ack, idx);
1439 }
1440 
ull_conn_tx_ack(uint16_t handle,memq_link_t * link,struct node_tx * tx)1441 void ull_conn_tx_ack(uint16_t handle, memq_link_t *link, struct node_tx *tx)
1442 {
1443 	struct pdu_data *pdu_tx;
1444 
1445 	pdu_tx = (void *)tx->pdu;
1446 	LL_ASSERT(pdu_tx->len);
1447 
1448 	if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1449 		if (handle != LLL_HANDLE_INVALID) {
1450 			struct ll_conn *conn = ll_conn_get(handle);
1451 
1452 			ull_cp_tx_ack(conn, tx);
1453 		}
1454 
1455 		/* release ctrl mem if points to itself */
1456 		if (link->next == (void *)tx) {
1457 			LL_ASSERT(link->next);
1458 
1459 			struct ll_conn *conn = ll_connected_get(handle);
1460 
1461 			ull_cp_release_tx(conn, tx);
1462 			return;
1463 		} else if (!tx) {
1464 			/* Tx Node re-used to enqueue new ctrl PDU */
1465 			return;
1466 		}
1467 		LL_ASSERT(!link->next);
1468 	} else if (handle == LLL_HANDLE_INVALID) {
1469 		pdu_tx->ll_id = PDU_DATA_LLID_RESV;
1470 	} else {
1471 		LL_ASSERT(handle != LLL_HANDLE_INVALID);
1472 	}
1473 
1474 	ll_tx_ack_put(handle, tx);
1475 }
1476 
ull_conn_lll_max_tx_octets_get(struct lll_conn * lll)1477 uint16_t ull_conn_lll_max_tx_octets_get(struct lll_conn *lll)
1478 {
1479 	uint16_t max_tx_octets;
1480 
1481 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1482 #if defined(CONFIG_BT_CTLR_PHY)
1483 	switch (lll->phy_tx_time) {
1484 	default:
1485 	case PHY_1M:
1486 		/* 1M PHY, 1us = 1 bit, hence divide by 8.
1487 		 * Deduct 10 bytes for preamble (1), access address (4),
1488 		 * header (2), and CRC (3).
1489 		 */
1490 		max_tx_octets = (lll->dle.eff.max_tx_time >> 3) - 10;
1491 		break;
1492 
1493 	case PHY_2M:
1494 		/* 2M PHY, 1us = 2 bits, hence divide by 4.
1495 		 * Deduct 11 bytes for preamble (2), access address (4),
1496 		 * header (2), and CRC (3).
1497 		 */
1498 		max_tx_octets = (lll->dle.eff.max_tx_time >> 2) - 11;
1499 		break;
1500 
1501 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1502 	case PHY_CODED:
1503 		if (lll->phy_flags & 0x01) {
1504 			/* S8 Coded PHY, 8us = 1 bit, hence divide by
1505 			 * 64.
1506 			 * Subtract time for preamble (80), AA (256),
1507 			 * CI (16), TERM1 (24), CRC (192) and
1508 			 * TERM2 (24), total 592 us.
1509 			 * Subtract 2 bytes for header.
1510 			 */
1511 			max_tx_octets = ((lll->dle.eff.max_tx_time - 592) >>
1512 					  6) - 2;
1513 		} else {
1514 			/* S2 Coded PHY, 2us = 1 bit, hence divide by
1515 			 * 16.
1516 			 * Subtract time for preamble (80), AA (256),
1517 			 * CI (16), TERM1 (24), CRC (48) and
1518 			 * TERM2 (6), total 430 us.
1519 			 * Subtract 2 bytes for header.
1520 			 */
1521 			max_tx_octets = ((lll->dle.eff.max_tx_time - 430) >>
1522 					  4) - 2;
1523 		}
1524 		break;
1525 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1526 	}
1527 
1528 #if defined(CONFIG_BT_CTLR_LE_ENC)
1529 	if (lll->enc_tx) {
1530 		/* deduct the MIC */
1531 		max_tx_octets -= 4U;
1532 	}
1533 #endif /* CONFIG_BT_CTLR_LE_ENC */
1534 
1535 	if (max_tx_octets > lll->dle.eff.max_tx_octets) {
1536 		max_tx_octets = lll->dle.eff.max_tx_octets;
1537 	}
1538 
1539 #else /* !CONFIG_BT_CTLR_PHY */
1540 	max_tx_octets = lll->dle.eff.max_tx_octets;
1541 #endif /* !CONFIG_BT_CTLR_PHY */
1542 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
1543 	max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1544 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
1545 	return max_tx_octets;
1546 }
1547 
1548 /**
1549  * @brief Initialize pdu_data members that are read only in lower link layer.
1550  *
1551  * @param pdu Pointer to pdu_data object to be initialized
1552  */
ull_pdu_data_init(struct pdu_data * pdu)1553 void ull_pdu_data_init(struct pdu_data *pdu)
1554 {
1555 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1556 	pdu->cp = 0U;
1557 	pdu->octet3.resv[0] = 0U;
1558 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1559 }
1560 
init_reset(void)1561 static int init_reset(void)
1562 {
1563 	/* Initialize conn pool. */
1564 	mem_init(conn_pool, sizeof(struct ll_conn),
1565 		 sizeof(conn_pool) / sizeof(struct ll_conn), &conn_free);
1566 
1567 	/* Initialize tx pool. */
1568 	mem_init(mem_conn_tx.pool, CONN_TX_BUF_SIZE, CONN_DATA_BUFFERS,
1569 		 &mem_conn_tx.free);
1570 
1571 	/* Initialize tx link pool. */
1572 	mem_init(mem_link_tx.pool, sizeof(memq_link_t),
1573 		 (CONN_DATA_BUFFERS +
1574 		  LLCP_TX_CTRL_BUF_COUNT),
1575 		 &mem_link_tx.free);
1576 
1577 	/* Initialize control procedure system. */
1578 	ull_cp_init();
1579 
1580 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1581 	/* Reset CPR mutex */
1582 	cpr_active_reset();
1583 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1584 
1585 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1586 	/* Initialize the DLE defaults */
1587 	default_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1588 	default_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
1589 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1590 
1591 #if defined(CONFIG_BT_CTLR_PHY)
1592 	/* Initialize the PHY defaults */
1593 	default_phy_tx = PHY_1M;
1594 	default_phy_rx = PHY_1M;
1595 
1596 #if defined(CONFIG_BT_CTLR_PHY_2M)
1597 	default_phy_tx |= PHY_2M;
1598 	default_phy_rx |= PHY_2M;
1599 #endif /* CONFIG_BT_CTLR_PHY_2M */
1600 
1601 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1602 	default_phy_tx |= PHY_CODED;
1603 	default_phy_rx |= PHY_CODED;
1604 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1605 #endif /* CONFIG_BT_CTLR_PHY */
1606 
1607 	return 0;
1608 }
1609 
1610 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
tx_demux_sched(struct ll_conn * conn)1611 static void tx_demux_sched(struct ll_conn *conn)
1612 {
1613 	static memq_link_t link;
1614 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1615 
1616 	mfy.param = conn;
1617 
1618 	mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1619 }
1620 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
1621 
tx_demux(void * param)1622 static void tx_demux(void *param)
1623 {
1624 	ull_conn_tx_demux(1);
1625 
1626 	ull_conn_tx_lll_enqueue(param, 1);
1627 }
1628 
tx_ull_dequeue(struct ll_conn * conn,struct node_tx * unused)1629 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *unused)
1630 {
1631 	struct node_tx *tx = NULL;
1632 
1633 	tx = ull_tx_q_dequeue(&conn->tx_q);
1634 	if (tx) {
1635 		struct pdu_data *pdu_tx;
1636 
1637 		pdu_tx = (void *)tx->pdu;
1638 		if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1639 			/* Mark the tx node as belonging to the ctrl pool */
1640 			tx->next = tx;
1641 		} else {
1642 			/* Mark the tx node as belonging to the data pool */
1643 			tx->next = NULL;
1644 		}
1645 	}
1646 	return tx;
1647 }
1648 
ticker_update_conn_op_cb(uint32_t status,void * param)1649 static void ticker_update_conn_op_cb(uint32_t status, void *param)
1650 {
1651 	/* Peripheral drift compensation succeeds, or it fails in a race condition
1652 	 * when disconnecting or connection update (race between ticker_update
1653 	 * and ticker_stop calls).
1654 	 */
1655 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1656 		  param == ull_update_mark_get() ||
1657 		  param == ull_disable_mark_get());
1658 }
1659 
ticker_stop_conn_op_cb(uint32_t status,void * param)1660 static void ticker_stop_conn_op_cb(uint32_t status, void *param)
1661 {
1662 	void *p;
1663 
1664 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1665 
1666 	p = ull_update_mark(param);
1667 	LL_ASSERT(p == param);
1668 }
1669 
ticker_start_conn_op_cb(uint32_t status,void * param)1670 static void ticker_start_conn_op_cb(uint32_t status, void *param)
1671 {
1672 	void *p;
1673 
1674 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1675 
1676 	p = ull_update_unmark(param);
1677 	LL_ASSERT(p == param);
1678 }
1679 
conn_setup_adv_scan_disabled_cb(void * param)1680 static void conn_setup_adv_scan_disabled_cb(void *param)
1681 {
1682 	struct node_rx_ftr *ftr;
1683 	struct node_rx_hdr *rx;
1684 	struct lll_conn *lll;
1685 
1686 	/* NOTE: LLL conn context SHALL be after lll_hdr in
1687 	 *       struct lll_adv and struct lll_scan.
1688 	 */
1689 	rx = param;
1690 	ftr = &(rx->rx_ftr);
1691 	lll = *((struct lll_conn **)((uint8_t *)ftr->param +
1692 				     sizeof(struct lll_hdr)));
1693 
1694 	if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING)) {
1695 		struct ull_hdr *hdr;
1696 
1697 		/* Prevent fast ADV re-scheduling from re-triggering */
1698 		hdr = HDR_LLL2ULL(ftr->param);
1699 		hdr->disabled_cb = NULL;
1700 	}
1701 
1702 	switch (lll->role) {
1703 #if defined(CONFIG_BT_CENTRAL)
1704 	case 0:
1705 		ull_central_setup(rx, ftr, lll);
1706 		break;
1707 #endif /* CONFIG_BT_CENTRAL */
1708 
1709 #if defined(CONFIG_BT_PERIPHERAL)
1710 	case 1:
1711 		ull_periph_setup(rx, ftr, lll);
1712 		break;
1713 #endif /* CONFIG_BT_PERIPHERAL */
1714 
1715 	default:
1716 		LL_ASSERT(0);
1717 		break;
1718 	}
1719 }
1720 
disable(uint16_t handle)1721 static inline void disable(uint16_t handle)
1722 {
1723 	struct ll_conn *conn;
1724 	int err;
1725 
1726 	conn = ll_conn_get(handle);
1727 
1728 	err = ull_ticker_stop_with_mark(TICKER_ID_CONN_BASE + handle,
1729 					conn, &conn->lll);
1730 	LL_ASSERT(err == 0 || err == -EALREADY);
1731 
1732 	conn->lll.handle = LLL_HANDLE_INVALID;
1733 	conn->lll.link_tx_free = NULL;
1734 }
1735 
1736 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
conn_cleanup_iso_cis_released_cb(struct ll_conn * conn)1737 static void conn_cleanup_iso_cis_released_cb(struct ll_conn *conn)
1738 {
1739 	struct ll_conn_iso_stream *cis;
1740 
1741 	cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1742 	if (cis) {
1743 		struct node_rx_pdu *rx;
1744 		uint8_t reason;
1745 
1746 		/* More associated CISes - stop next */
1747 		rx = (void *)&conn->llcp_terminate.node_rx;
1748 		reason = *(uint8_t *)rx->pdu;
1749 
1750 		ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1751 				      reason);
1752 	} else {
1753 		/* No more CISes associated with conn - finalize */
1754 		conn_cleanup_finalize(conn);
1755 	}
1756 }
1757 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1758 
conn_cleanup_finalize(struct ll_conn * conn)1759 static void conn_cleanup_finalize(struct ll_conn *conn)
1760 {
1761 	struct lll_conn *lll = &conn->lll;
1762 	uint32_t ticker_status;
1763 
1764 	ull_cp_state_set(conn, ULL_CP_DISCONNECTED);
1765 
1766 	/* Update tx buffer queue handling */
1767 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
1768 	ull_cp_update_tx_buffer_queue(conn);
1769 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
1770 	ull_cp_release_nodes(conn);
1771 
1772 	/* flush demux-ed Tx buffer still in ULL context */
1773 	tx_ull_flush(conn);
1774 
1775 	/* Stop Central or Peripheral role ticker */
1776 	ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1777 				    TICKER_USER_ID_ULL_HIGH,
1778 				    TICKER_ID_CONN_BASE + lll->handle,
1779 				    ticker_stop_op_cb, conn);
1780 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1781 		  (ticker_status == TICKER_STATUS_BUSY));
1782 
1783 	/* Invalidate the connection context */
1784 	lll->handle = LLL_HANDLE_INVALID;
1785 
1786 	/* Demux and flush Tx PDUs that remain enqueued in thread context */
1787 	ull_conn_tx_demux(UINT8_MAX);
1788 }
1789 
conn_cleanup(struct ll_conn * conn,uint8_t reason)1790 static void conn_cleanup(struct ll_conn *conn, uint8_t reason)
1791 {
1792 	struct node_rx_pdu *rx;
1793 
1794 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1795 	struct ll_conn_iso_stream *cis;
1796 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1797 
1798 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1799 	/* Reset CPR mutex */
1800 	cpr_active_check_and_reset(conn);
1801 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1802 
1803 	/* Only termination structure is populated here in ULL context
1804 	 * but the actual enqueue happens in the LLL context in
1805 	 * tx_lll_flush. The reason being to avoid passing the reason
1806 	 * value and handle through the mayfly scheduling of the
1807 	 * tx_lll_flush.
1808 	 */
1809 	rx = (void *)&conn->llcp_terminate.node_rx;
1810 	rx->hdr.handle = conn->lll.handle;
1811 	rx->hdr.type = NODE_RX_TYPE_TERMINATE;
1812 	*((uint8_t *)rx->pdu) = reason;
1813 
1814 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1815 	cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1816 	if (cis) {
1817 		/* Stop CIS and defer cleanup to after teardown. */
1818 		ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1819 				      reason);
1820 		return;
1821 	}
1822 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1823 
1824 	conn_cleanup_finalize(conn);
1825 }
1826 
tx_ull_flush(struct ll_conn * conn)1827 static void tx_ull_flush(struct ll_conn *conn)
1828 {
1829 	struct node_tx *tx;
1830 
1831 	ull_tx_q_resume_data(&conn->tx_q);
1832 
1833 	tx = tx_ull_dequeue(conn, NULL);
1834 	while (tx) {
1835 		memq_link_t *link;
1836 
1837 		link = mem_acquire(&mem_link_tx.free);
1838 		LL_ASSERT(link);
1839 
1840 		/* Enqueue towards LLL */
1841 		memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1842 
1843 		tx = tx_ull_dequeue(conn, NULL);
1844 	}
1845 }
1846 
ticker_stop_op_cb(uint32_t status,void * param)1847 static void ticker_stop_op_cb(uint32_t status, void *param)
1848 {
1849 	static memq_link_t link;
1850 	static struct mayfly mfy = {0, 0, &link, NULL, conn_disable};
1851 	uint32_t ret;
1852 
1853 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1854 
1855 	/* Check if any pending LLL events that need to be aborted */
1856 	mfy.param = param;
1857 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1858 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1859 	LL_ASSERT(!ret);
1860 }
1861 
conn_disable(void * param)1862 static void conn_disable(void *param)
1863 {
1864 	struct ll_conn *conn;
1865 	struct ull_hdr *hdr;
1866 
1867 	/* Check ref count to determine if any pending LLL events in pipeline */
1868 	conn = param;
1869 	hdr = &conn->ull;
1870 	if (ull_ref_get(hdr)) {
1871 		static memq_link_t link;
1872 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1873 		uint32_t ret;
1874 
1875 		mfy.param = &conn->lll;
1876 
1877 		/* Setup disabled callback to be called when ref count
1878 		 * returns to zero.
1879 		 */
1880 		LL_ASSERT(!hdr->disabled_cb);
1881 		hdr->disabled_param = mfy.param;
1882 		hdr->disabled_cb = disabled_cb;
1883 
1884 		/* Trigger LLL disable */
1885 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1886 				     TICKER_USER_ID_LLL, 0, &mfy);
1887 		LL_ASSERT(!ret);
1888 	} else {
1889 		/* No pending LLL events */
1890 		disabled_cb(&conn->lll);
1891 	}
1892 }
1893 
disabled_cb(void * param)1894 static void disabled_cb(void *param)
1895 {
1896 	static memq_link_t link;
1897 	static struct mayfly mfy = {0, 0, &link, NULL, tx_lll_flush};
1898 	uint32_t ret;
1899 
1900 	mfy.param = param;
1901 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1902 			     TICKER_USER_ID_LLL, 0, &mfy);
1903 	LL_ASSERT(!ret);
1904 }
1905 
tx_lll_flush(void * param)1906 static void tx_lll_flush(void *param)
1907 {
1908 	struct node_rx_pdu *rx;
1909 	struct lll_conn *lll;
1910 	struct ll_conn *conn;
1911 	struct node_tx *tx;
1912 	memq_link_t *link;
1913 	uint16_t handle;
1914 
1915 	/* Get reference to ULL context */
1916 	lll = param;
1917 	conn = HDR_LLL2ULL(lll);
1918 	handle = ll_conn_handle_get(conn);
1919 
1920 	lll_conn_flush(handle, lll);
1921 
1922 	link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
1923 			    (void **)&tx);
1924 	while (link) {
1925 		uint8_t idx;
1926 		struct lll_tx *tx_buf;
1927 
1928 		idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx_buf);
1929 		LL_ASSERT(tx_buf);
1930 
1931 		tx_buf->handle = LLL_HANDLE_INVALID;
1932 		tx_buf->node = tx;
1933 
1934 		/* TX node UPSTREAM, i.e. Tx node ack path */
1935 		link->next = tx->next; /* Indicates ctrl pool or data pool */
1936 		tx->next = link;
1937 
1938 		MFIFO_ENQUEUE(conn_ack, idx);
1939 
1940 		link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
1941 				    (void **)&tx);
1942 	}
1943 
1944 	/* Get the terminate structure reserved in the connection context.
1945 	 * The terminate reason and connection handle should already be
1946 	 * populated before this mayfly function was scheduled.
1947 	 */
1948 	rx = (void *)&conn->llcp_terminate.node_rx;
1949 	LL_ASSERT(rx->hdr.link);
1950 	link = rx->hdr.link;
1951 	rx->hdr.link = NULL;
1952 
1953 	/* Enqueue the terminate towards ULL context */
1954 	ull_rx_put_sched(link, rx);
1955 }
1956 
1957 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
empty_data_start_release(struct ll_conn * conn,struct node_tx * tx)1958 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx)
1959 {
1960 	struct pdu_data *p = (void *)tx->pdu;
1961 
1962 	if ((p->ll_id == PDU_DATA_LLID_DATA_START) && !p->len) {
1963 		conn->start_empty = 1U;
1964 
1965 		ll_tx_ack_put(conn->lll.handle, tx);
1966 
1967 		return -EINVAL;
1968 	} else if (p->len && conn->start_empty) {
1969 		conn->start_empty = 0U;
1970 
1971 		if (p->ll_id == PDU_DATA_LLID_DATA_CONTINUE) {
1972 			p->ll_id = PDU_DATA_LLID_DATA_START;
1973 		}
1974 	}
1975 
1976 	return 0;
1977 }
1978 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1979 
1980 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
force_md_cnt_calc(struct lll_conn * lll_connection,uint32_t tx_rate)1981 static uint8_t force_md_cnt_calc(struct lll_conn *lll_connection, uint32_t tx_rate)
1982 {
1983 	uint32_t time_incoming, time_outgoing;
1984 	uint8_t force_md_cnt;
1985 	uint8_t phy_flags;
1986 	uint8_t mic_size;
1987 	uint8_t phy;
1988 
1989 #if defined(CONFIG_BT_CTLR_PHY)
1990 	phy = lll_connection->phy_tx;
1991 	phy_flags = lll_connection->phy_flags;
1992 #else /* !CONFIG_BT_CTLR_PHY */
1993 	phy = PHY_1M;
1994 	phy_flags = 0U;
1995 #endif /* !CONFIG_BT_CTLR_PHY */
1996 
1997 #if defined(CONFIG_BT_CTLR_LE_ENC)
1998 	mic_size = PDU_MIC_SIZE * lll_connection->enc_tx;
1999 #else /* !CONFIG_BT_CTLR_LE_ENC */
2000 	mic_size = 0U;
2001 #endif /* !CONFIG_BT_CTLR_LE_ENC */
2002 
2003 	time_incoming = (LL_LENGTH_OCTETS_RX_MAX << 3) *
2004 			1000000UL / tx_rate;
2005 	time_outgoing = PDU_DC_US(LL_LENGTH_OCTETS_RX_MAX, mic_size, phy,
2006 				  phy_flags) +
2007 			PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
2008 			(EVENT_IFS_US << 1);
2009 
2010 	force_md_cnt = 0U;
2011 	if (time_incoming > time_outgoing) {
2012 		uint32_t delta;
2013 		uint32_t time_keep_alive;
2014 
2015 		delta = (time_incoming << 1) - time_outgoing;
2016 		time_keep_alive = (PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
2017 				   EVENT_IFS_US) << 1;
2018 		force_md_cnt = (delta + (time_keep_alive - 1)) /
2019 			       time_keep_alive;
2020 		LOG_DBG("Time: incoming= %u, expected outgoing= %u, delta= %u, "
2021 		       "keepalive= %u, force_md_cnt = %u.",
2022 		       time_incoming, time_outgoing, delta, time_keep_alive,
2023 		       force_md_cnt);
2024 	}
2025 
2026 	return force_md_cnt;
2027 }
2028 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
2029 
2030 #if defined(CONFIG_BT_CTLR_LE_ENC)
2031 /**
2032  * @brief Pause the data path of a rx queue.
2033  */
ull_conn_pause_rx_data(struct ll_conn * conn)2034 void ull_conn_pause_rx_data(struct ll_conn *conn)
2035 {
2036 	conn->pause_rx_data = 1U;
2037 }
2038 
2039 /**
2040  * @brief Resume the data path of a rx queue.
2041  */
ull_conn_resume_rx_data(struct ll_conn * conn)2042 void ull_conn_resume_rx_data(struct ll_conn *conn)
2043 {
2044 	conn->pause_rx_data = 0U;
2045 }
2046 #endif /* CONFIG_BT_CTLR_LE_ENC */
2047 
ull_conn_event_counter(struct ll_conn * conn)2048 uint16_t ull_conn_event_counter(struct ll_conn *conn)
2049 {
2050 	struct lll_conn *lll;
2051 	uint16_t event_counter;
2052 
2053 	lll = &conn->lll;
2054 
2055 	/* Calculate current event counter. If refcount is non-zero, we have called
2056 	 * prepare and the LLL implementation has calculated and incremented the event
2057 	 * counter (RX path). In this case we need to subtract one from the current
2058 	 * event counter.
2059 	 * Otherwise we are in the TX path, and we calculate the current event counter
2060 	 * similar to LLL by taking the expected event counter value plus accumulated
2061 	 * latency.
2062 	 */
2063 	if (ull_ref_get(&conn->ull)) {
2064 		/* We are in post-prepare (RX path). Event counter is already
2065 		 * calculated and incremented by 1 for next event.
2066 		 */
2067 		event_counter = lll->event_counter - 1;
2068 	} else {
2069 		event_counter = lll->event_counter + lll->latency_prepare +
2070 				conn->llcp.prep.lazy;
2071 	}
2072 
2073 	return event_counter;
2074 }
ull_conn_update_ticker(struct ll_conn * conn,uint32_t ticks_win_offset,uint32_t ticks_slot_overhead,uint32_t periodic_us,uint32_t ticks_at_expire)2075 static void ull_conn_update_ticker(struct ll_conn *conn,
2076 				   uint32_t ticks_win_offset,
2077 				   uint32_t ticks_slot_overhead,
2078 				   uint32_t periodic_us,
2079 				   uint32_t ticks_at_expire)
2080 {
2081 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2082 	/* disable ticker job, in order to chain stop and start
2083 	 * to avoid RTC being stopped if no tickers active.
2084 	 */
2085 	uint32_t mayfly_was_enabled =
2086 		mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW);
2087 
2088 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0U);
2089 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2090 
2091 	/* start periph/central with new timings */
2092 	uint8_t ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
2093 	uint32_t ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2094 				    ticker_id_conn, ticker_stop_conn_op_cb, (void *)conn);
2095 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2096 		  (ticker_status == TICKER_STATUS_BUSY));
2097 	ticker_status = ticker_start(
2098 		TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH, ticker_id_conn, ticks_at_expire,
2099 		ticks_win_offset, HAL_TICKER_US_TO_TICKS(periodic_us),
2100 		HAL_TICKER_REMAINDER(periodic_us),
2101 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2102 		TICKER_NULL_LAZY,
2103 #else /* !CONFIG_BT_TICKER_LOW_LAT */
2104 		TICKER_LAZY_MUST_EXPIRE_KEEP,
2105 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2106 		(ticks_slot_overhead + conn->ull.ticks_slot),
2107 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CENTRAL)
2108 		conn->lll.role == BT_HCI_ROLE_PERIPHERAL ?
2109 		ull_periph_ticker_cb : ull_central_ticker_cb,
2110 #elif defined(CONFIG_BT_PERIPHERAL)
2111 		ull_periph_ticker_cb,
2112 #else
2113 		ull_central_ticker_cb,
2114 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CENTRAL */
2115 		conn, ticker_start_conn_op_cb, (void *)conn);
2116 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2117 		  (ticker_status == TICKER_STATUS_BUSY));
2118 
2119 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2120 	/* enable ticker job, if disabled in this function */
2121 	if (mayfly_was_enabled) {
2122 		mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1U);
2123 	}
2124 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2125 }
2126 
ull_conn_update_parameters(struct ll_conn * conn,uint8_t is_cu_proc,uint8_t win_size,uint32_t win_offset_us,uint16_t interval,uint16_t latency,uint16_t timeout,uint16_t instant)2127 void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_t win_size,
2128 				uint32_t win_offset_us, uint16_t interval, uint16_t latency,
2129 				uint16_t timeout, uint16_t instant)
2130 {
2131 	struct lll_conn *lll;
2132 	uint32_t ticks_win_offset = 0U;
2133 	uint32_t ticks_slot_overhead;
2134 	uint16_t conn_interval_old;
2135 	uint16_t conn_interval_new;
2136 	uint32_t conn_interval_us;
2137 	uint32_t periodic_us;
2138 	uint16_t latency_upd;
2139 	uint16_t instant_latency;
2140 	uint16_t event_counter;
2141 	uint32_t ticks_at_expire;
2142 
2143 	lll = &conn->lll;
2144 
2145 	/* Calculate current event counter */
2146 	event_counter = ull_conn_event_counter(conn);
2147 
2148 	instant_latency = (event_counter - instant) & 0xFFFF;
2149 
2150 
2151 	ticks_at_expire = conn->llcp.prep.ticks_at_expire;
2152 
2153 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
2154 	/* restore to normal prepare */
2155 	if (conn->ull.ticks_prepare_to_start & XON_BITMASK) {
2156 		uint32_t ticks_prepare_to_start =
2157 			MAX(conn->ull.ticks_active_to_start, conn->ull.ticks_preempt_to_start);
2158 
2159 		conn->ull.ticks_prepare_to_start &= ~XON_BITMASK;
2160 
2161 		ticks_at_expire -= (conn->ull.ticks_prepare_to_start - ticks_prepare_to_start);
2162 	}
2163 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
2164 
2165 	/* compensate for instant_latency due to laziness */
2166 	conn_interval_old = instant_latency * lll->interval;
2167 	latency_upd = conn_interval_old / interval;
2168 	conn_interval_new = latency_upd * interval;
2169 	if (conn_interval_new > conn_interval_old) {
2170 		ticks_at_expire += HAL_TICKER_US_TO_TICKS((conn_interval_new - conn_interval_old) *
2171 							  CONN_INT_UNIT_US);
2172 	} else {
2173 		ticks_at_expire -= HAL_TICKER_US_TO_TICKS((conn_interval_old - conn_interval_new) *
2174 							  CONN_INT_UNIT_US);
2175 	}
2176 
2177 	lll->latency_prepare += conn->llcp.prep.lazy;
2178 	lll->latency_prepare -= (instant_latency - latency_upd);
2179 
2180 	/* calculate the offset */
2181 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
2182 		ticks_slot_overhead =
2183 			MAX(conn->ull.ticks_active_to_start, conn->ull.ticks_prepare_to_start);
2184 	} else {
2185 		ticks_slot_overhead = 0U;
2186 	}
2187 
2188 	/* calculate the window widening and interval */
2189 	conn_interval_us = interval * CONN_INT_UNIT_US;
2190 	periodic_us = conn_interval_us;
2191 
2192 	switch (lll->role) {
2193 #if defined(CONFIG_BT_PERIPHERAL)
2194 	case BT_HCI_ROLE_PERIPHERAL:
2195 		lll->periph.window_widening_prepare_us -=
2196 			lll->periph.window_widening_periodic_us * instant_latency;
2197 
2198 		lll->periph.window_widening_periodic_us =
2199 			DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2200 					   lll_clock_ppm_get(conn->periph.sca)) *
2201 					  conn_interval_us), 1000000U);
2202 		lll->periph.window_widening_max_us = (conn_interval_us >> 1U) - EVENT_IFS_US;
2203 		lll->periph.window_size_prepare_us = win_size * CONN_INT_UNIT_US;
2204 
2205 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2206 		conn->periph.ticks_to_offset = 0U;
2207 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2208 
2209 		lll->periph.window_widening_prepare_us +=
2210 			lll->periph.window_widening_periodic_us * latency_upd;
2211 		if (lll->periph.window_widening_prepare_us > lll->periph.window_widening_max_us) {
2212 			lll->periph.window_widening_prepare_us = lll->periph.window_widening_max_us;
2213 		}
2214 
2215 		ticks_at_expire -= HAL_TICKER_US_TO_TICKS(lll->periph.window_widening_periodic_us *
2216 							  latency_upd);
2217 		ticks_win_offset = HAL_TICKER_US_TO_TICKS((win_offset_us / CONN_INT_UNIT_US) *
2218 							  CONN_INT_UNIT_US);
2219 		periodic_us -= lll->periph.window_widening_periodic_us;
2220 		break;
2221 #endif /* CONFIG_BT_PERIPHERAL */
2222 #if defined(CONFIG_BT_CENTRAL)
2223 	case BT_HCI_ROLE_CENTRAL:
2224 		ticks_win_offset = HAL_TICKER_US_TO_TICKS(win_offset_us);
2225 
2226 		/* Workaround: Due to the missing remainder param in
2227 		 * ticker_start function for first interval; add a
2228 		 * tick so as to use the ceiled value.
2229 		 */
2230 		ticks_win_offset += 1U;
2231 		break;
2232 #endif /*CONFIG_BT_CENTRAL */
2233 	default:
2234 		LL_ASSERT(0);
2235 		break;
2236 	}
2237 
2238 	lll->interval = interval;
2239 	lll->latency = latency;
2240 
2241 	conn->supervision_timeout = timeout;
2242 	ull_cp_prt_reload_set(conn, conn_interval_us);
2243 
2244 #if defined(CONFIG_BT_CTLR_LE_PING)
2245 	/* APTO in no. of connection events */
2246 	conn->apto_reload = RADIO_CONN_EVENTS((30U * 1000U * 1000U), conn_interval_us);
2247 	/* Dispatch LE Ping PDU 6 connection events (that peer would
2248 	 * listen to) before 30s timeout
2249 	 * TODO: "peer listens to" is greater than 30s due to latency
2250 	 */
2251 	conn->appto_reload = (conn->apto_reload > (lll->latency + 6U)) ?
2252 					   (conn->apto_reload - (lll->latency + 6U)) :
2253 					   conn->apto_reload;
2254 #endif /* CONFIG_BT_CTLR_LE_PING */
2255 
2256 	if (is_cu_proc) {
2257 		conn->supervision_expire = 0U;
2258 	}
2259 
2260 	/* Update ACL ticker */
2261 	ull_conn_update_ticker(conn, ticks_win_offset, ticks_slot_overhead, periodic_us,
2262 			       ticks_at_expire);
2263 	/* Signal that the prepare needs to be canceled */
2264 	conn->cancel_prepare = 1U;
2265 }
2266 
2267 #if defined(CONFIG_BT_PERIPHERAL)
ull_conn_update_peer_sca(struct ll_conn * conn)2268 void ull_conn_update_peer_sca(struct ll_conn *conn)
2269 {
2270 	struct lll_conn *lll;
2271 
2272 	uint32_t conn_interval_us;
2273 	uint32_t periodic_us;
2274 
2275 	lll = &conn->lll;
2276 
2277 	/* calculate the window widening and interval */
2278 	conn_interval_us = lll->interval * CONN_INT_UNIT_US;
2279 	periodic_us = conn_interval_us;
2280 
2281 	lll->periph.window_widening_periodic_us =
2282 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2283 				   lll_clock_ppm_get(conn->periph.sca)) *
2284 				  conn_interval_us), 1000000U);
2285 
2286 	periodic_us -= lll->periph.window_widening_periodic_us;
2287 
2288 	/* Update ACL ticker */
2289 	ull_conn_update_ticker(conn, HAL_TICKER_US_TO_TICKS(periodic_us), 0, periodic_us,
2290 				   conn->llcp.prep.ticks_at_expire);
2291 
2292 }
2293 #endif /* CONFIG_BT_PERIPHERAL */
2294 
ull_conn_chan_map_set(struct ll_conn * conn,const uint8_t chm[5])2295 void ull_conn_chan_map_set(struct ll_conn *conn, const uint8_t chm[5])
2296 {
2297 	struct lll_conn *lll = &conn->lll;
2298 
2299 	memcpy(lll->data_chan_map, chm, sizeof(lll->data_chan_map));
2300 	lll->data_chan_count = util_ones_count_get(lll->data_chan_map, sizeof(lll->data_chan_map));
2301 }
2302 
2303 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2304 static inline void dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2305 				    uint16_t *max_tx_time)
2306 {
2307 	uint8_t phy_select = PHY_1M;
2308 	uint16_t rx_time = 0U;
2309 	uint16_t tx_time = 0U;
2310 
2311 #if defined(CONFIG_BT_CTLR_PHY)
2312 	if (conn->llcp.fex.valid && feature_phy_coded(conn)) {
2313 		/* If coded PHY is supported on the connection
2314 		 * this will define the max times
2315 		 */
2316 		phy_select = PHY_CODED;
2317 		/* If not, max times should be defined by 1M timing */
2318 	}
2319 #endif
2320 
2321 	rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select);
2322 
2323 #if defined(CONFIG_BT_CTLR_PHY)
2324 	tx_time = MIN(conn->lll.dle.default_tx_time,
2325 		      PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select));
2326 #else /* !CONFIG_BT_CTLR_PHY */
2327 	tx_time = PDU_DC_MAX_US(conn->lll.dle.default_tx_octets, phy_select);
2328 #endif /* !CONFIG_BT_CTLR_PHY */
2329 
2330 	/*
2331 	 * see Vol. 6 Part B chapter 4.5.10
2332 	 * minimum value for time is 328 us
2333 	 */
2334 	rx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, rx_time);
2335 	tx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, tx_time);
2336 
2337 	*max_rx_time = rx_time;
2338 	*max_tx_time = tx_time;
2339 }
2340 
ull_dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2341 void ull_dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2342 				    uint16_t *max_tx_time)
2343 {
2344 	return dle_max_time_get(conn, max_rx_time, max_tx_time);
2345 }
2346 
2347 /*
2348  * TODO: this probably can be optimised for ex. by creating a macro for the
2349  * ull_dle_update_eff function
2350  */
ull_dle_update_eff(struct ll_conn * conn)2351 uint8_t ull_dle_update_eff(struct ll_conn *conn)
2352 {
2353 	uint8_t dle_changed = 0U;
2354 
2355 	/* Note that we must use bitwise or and not logical or */
2356 	dle_changed = ull_dle_update_eff_rx(conn);
2357 	dle_changed |= ull_dle_update_eff_tx(conn);
2358 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2359 	if (dle_changed) {
2360 		conn->lll.evt_len_upd = 1U;
2361 	}
2362 #endif
2363 
2364 
2365 	return dle_changed;
2366 }
2367 
ull_dle_update_eff_rx(struct ll_conn * conn)2368 uint8_t ull_dle_update_eff_rx(struct ll_conn *conn)
2369 {
2370 	uint8_t dle_changed = 0U;
2371 
2372 	const uint16_t eff_rx_octets =
2373 		MAX(MIN(conn->lll.dle.local.max_rx_octets, conn->lll.dle.remote.max_tx_octets),
2374 		    PDU_DC_PAYLOAD_SIZE_MIN);
2375 
2376 #if defined(CONFIG_BT_CTLR_PHY)
2377 	unsigned int min_eff_rx_time = (conn->lll.phy_rx == PHY_CODED) ?
2378 			PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2379 
2380 	const uint16_t eff_rx_time =
2381 		MAX(MIN(conn->lll.dle.local.max_rx_time, conn->lll.dle.remote.max_tx_time),
2382 		    min_eff_rx_time);
2383 
2384 	if (eff_rx_time != conn->lll.dle.eff.max_rx_time) {
2385 		conn->lll.dle.eff.max_rx_time = eff_rx_time;
2386 		dle_changed = 1U;
2387 	}
2388 #else
2389 	conn->lll.dle.eff.max_rx_time = PDU_DC_MAX_US(eff_rx_octets, PHY_1M);
2390 #endif
2391 
2392 	if (eff_rx_octets != conn->lll.dle.eff.max_rx_octets) {
2393 		conn->lll.dle.eff.max_rx_octets = eff_rx_octets;
2394 		dle_changed = 1U;
2395 	}
2396 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2397 	/* we delay the update of event length to after the DLE procedure is finishede */
2398 	if (dle_changed) {
2399 		conn->lll.evt_len_upd_delayed = 1;
2400 	}
2401 #endif
2402 
2403 	return dle_changed;
2404 }
2405 
ull_dle_update_eff_tx(struct ll_conn * conn)2406 uint8_t ull_dle_update_eff_tx(struct ll_conn *conn)
2407 
2408 {
2409 	uint8_t dle_changed = 0U;
2410 
2411 	const uint16_t eff_tx_octets =
2412 		MAX(MIN(conn->lll.dle.local.max_tx_octets, conn->lll.dle.remote.max_rx_octets),
2413 		    PDU_DC_PAYLOAD_SIZE_MIN);
2414 
2415 #if defined(CONFIG_BT_CTLR_PHY)
2416 	unsigned int min_eff_tx_time = (conn->lll.phy_tx == PHY_CODED) ?
2417 			PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2418 
2419 	const uint16_t eff_tx_time =
2420 		MAX(MIN(conn->lll.dle.local.max_tx_time, conn->lll.dle.remote.max_rx_time),
2421 		    min_eff_tx_time);
2422 
2423 	if (eff_tx_time != conn->lll.dle.eff.max_tx_time) {
2424 		conn->lll.dle.eff.max_tx_time = eff_tx_time;
2425 		dle_changed = 1U;
2426 	}
2427 #else
2428 	conn->lll.dle.eff.max_tx_time = PDU_DC_MAX_US(eff_tx_octets, PHY_1M);
2429 #endif
2430 
2431 	if (eff_tx_octets != conn->lll.dle.eff.max_tx_octets) {
2432 		conn->lll.dle.eff.max_tx_octets = eff_tx_octets;
2433 		dle_changed = 1U;
2434 	}
2435 
2436 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2437 	if (dle_changed) {
2438 		conn->lll.evt_len_upd = 1U;
2439 	}
2440 	conn->lll.evt_len_upd |= conn->lll.evt_len_upd_delayed;
2441 	conn->lll.evt_len_upd_delayed = 0;
2442 #endif
2443 
2444 	return dle_changed;
2445 }
2446 
ull_len_data_length_trim(uint16_t * tx_octets,uint16_t * tx_time)2447 static void ull_len_data_length_trim(uint16_t *tx_octets, uint16_t *tx_time)
2448 {
2449 #if defined(CONFIG_BT_CTLR_PHY_CODED)
2450 	uint16_t tx_time_max =
2451 			PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_CODED);
2452 #else /* !CONFIG_BT_CTLR_PHY_CODED */
2453 	uint16_t tx_time_max =
2454 			PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_1M);
2455 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
2456 
2457 	/* trim to supported values */
2458 	if (*tx_octets > LL_LENGTH_OCTETS_TX_MAX) {
2459 		*tx_octets = LL_LENGTH_OCTETS_TX_MAX;
2460 	}
2461 
2462 	if (*tx_time > tx_time_max) {
2463 		*tx_time = tx_time_max;
2464 	}
2465 }
2466 
ull_dle_local_tx_update(struct ll_conn * conn,uint16_t tx_octets,uint16_t tx_time)2467 void ull_dle_local_tx_update(struct ll_conn *conn, uint16_t tx_octets, uint16_t tx_time)
2468 {
2469 	/* Trim to supported values */
2470 	ull_len_data_length_trim(&tx_octets, &tx_time);
2471 
2472 	conn->lll.dle.default_tx_octets = tx_octets;
2473 
2474 #if defined(CONFIG_BT_CTLR_PHY)
2475 	conn->lll.dle.default_tx_time = tx_time;
2476 #endif /* CONFIG_BT_CTLR_PHY */
2477 
2478 	dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time, &conn->lll.dle.local.max_tx_time);
2479 	conn->lll.dle.local.max_tx_octets = conn->lll.dle.default_tx_octets;
2480 }
2481 
ull_dle_init(struct ll_conn * conn,uint8_t phy)2482 void ull_dle_init(struct ll_conn *conn, uint8_t phy)
2483 {
2484 #if defined(CONFIG_BT_CTLR_PHY)
2485 	const uint16_t max_time_min = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy);
2486 	const uint16_t max_time_max = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy);
2487 #endif /* CONFIG_BT_CTLR_PHY */
2488 
2489 	/* Clear DLE data set */
2490 	memset(&conn->lll.dle, 0, sizeof(conn->lll.dle));
2491 	/* See BT. 5.2 Spec - Vol 6, Part B, Sect 4.5.10
2492 	 * Default to locally max supported rx/tx length/time
2493 	 */
2494 	ull_dle_local_tx_update(conn, default_tx_octets, default_tx_time);
2495 
2496 	conn->lll.dle.local.max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
2497 #if defined(CONFIG_BT_CTLR_PHY)
2498 	conn->lll.dle.local.max_rx_time = max_time_max;
2499 #endif /* CONFIG_BT_CTLR_PHY */
2500 
2501 	/* Default to minimum rx/tx data length/time */
2502 	conn->lll.dle.remote.max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2503 	conn->lll.dle.remote.max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2504 
2505 #if defined(CONFIG_BT_CTLR_PHY)
2506 	conn->lll.dle.remote.max_tx_time = max_time_min;
2507 	conn->lll.dle.remote.max_rx_time = max_time_min;
2508 #endif /* CONFIG_BT_CTLR_PHY */
2509 
2510 	/*
2511 	 * ref. Bluetooth Core Specification version 5.3, Vol. 6,
2512 	 * Part B, section 4.5.10 we can call ull_dle_update_eff
2513 	 * for initialisation
2514 	 */
2515 	(void)ull_dle_update_eff(conn);
2516 
2517 	/* Check whether the controller should perform a data length update after
2518 	 * connection is established
2519 	 */
2520 #if defined(CONFIG_BT_CTLR_PHY)
2521 	if ((conn->lll.dle.local.max_rx_time != max_time_min ||
2522 	     conn->lll.dle.local.max_tx_time != max_time_min)) {
2523 		conn->lll.dle.update = 1;
2524 	} else
2525 #endif
2526 	{
2527 		if (conn->lll.dle.local.max_tx_octets != PDU_DC_PAYLOAD_SIZE_MIN ||
2528 		    conn->lll.dle.local.max_rx_octets != PDU_DC_PAYLOAD_SIZE_MIN) {
2529 			conn->lll.dle.update = 1;
2530 		}
2531 	}
2532 }
2533 
ull_conn_default_tx_octets_set(uint16_t tx_octets)2534 void ull_conn_default_tx_octets_set(uint16_t tx_octets)
2535 {
2536 	default_tx_octets = tx_octets;
2537 }
2538 
ull_conn_default_tx_time_set(uint16_t tx_time)2539 void ull_conn_default_tx_time_set(uint16_t tx_time)
2540 {
2541 	default_tx_time = tx_time;
2542 }
2543 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2544 
ull_conn_lll_phy_active(struct ll_conn * conn,uint8_t phys)2545 uint8_t ull_conn_lll_phy_active(struct ll_conn *conn, uint8_t phys)
2546 {
2547 #if defined(CONFIG_BT_CTLR_PHY)
2548 	if (!(phys & (conn->lll.phy_tx | conn->lll.phy_rx))) {
2549 #else /* !CONFIG_BT_CTLR_PHY */
2550 	if (!(phys & 0x01)) {
2551 #endif /* !CONFIG_BT_CTLR_PHY */
2552 		return 0;
2553 	}
2554 	return 1;
2555 }
2556 
2557 uint8_t ull_is_lll_tx_queue_empty(struct ll_conn *conn)
2558 {
2559 	return (memq_peek(conn->lll.memq_tx.head, conn->lll.memq_tx.tail, NULL) == NULL);
2560 }
2561