1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 #include <zephyr/sys/byteorder.h>
12 
13 #include "hal/cpu.h"
14 #include "hal/ecb.h"
15 #include "hal/ccm.h"
16 #include "hal/ticker.h"
17 
18 #include "util/util.h"
19 #include "util/mem.h"
20 #include "util/memq.h"
21 #include "util/mfifo.h"
22 #include "util/mayfly.h"
23 #include "util/dbuf.h"
24 
25 #include "ticker/ticker.h"
26 
27 #include "pdu_df.h"
28 #include "lll/pdu_vendor.h"
29 #include "pdu.h"
30 
31 #include "lll.h"
32 #include "lll_clock.h"
33 #include "lll/lll_df_types.h"
34 #include "lll_conn.h"
35 #include "lll_conn_iso.h"
36 #include "lll/lll_vendor.h"
37 
38 #include "ll_sw/ull_tx_queue.h"
39 
40 #include "isoal.h"
41 #include "ull_iso_types.h"
42 #include "ull_conn_types.h"
43 #include "ull_conn_iso_types.h"
44 
45 #if defined(CONFIG_BT_CTLR_USER_EXT)
46 #include "ull_vendor.h"
47 #endif /* CONFIG_BT_CTLR_USER_EXT */
48 
49 #include "ull_internal.h"
50 #include "ull_llcp_internal.h"
51 #include "ull_sched_internal.h"
52 #include "ull_chan_internal.h"
53 #include "ull_conn_internal.h"
54 #include "ull_peripheral_internal.h"
55 #include "ull_central_internal.h"
56 
57 #include "ull_iso_internal.h"
58 #include "ull_conn_iso_internal.h"
59 #include "ull_peripheral_iso_internal.h"
60 #include "lll/lll_adv_types.h"
61 #include "lll_adv.h"
62 #include "ull_adv_types.h"
63 #include "ull_adv_internal.h"
64 #include "lll_sync.h"
65 #include "lll_sync_iso.h"
66 #include "ull_sync_types.h"
67 #include "lll_scan.h"
68 #include "ull_scan_types.h"
69 #include "ull_sync_internal.h"
70 
71 #include "ll.h"
72 #include "ll_feat.h"
73 #include "ll_settings.h"
74 
75 #include "ll_sw/ull_llcp.h"
76 #include "ll_sw/ull_llcp_features.h"
77 
78 #include "hal/debug.h"
79 
80 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
81 #include <zephyr/logging/log.h>
82 LOG_MODULE_REGISTER(bt_ctlr_ull_conn);
83 
84 static int init_reset(void);
85 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
86 static void tx_demux_sched(struct ll_conn *conn);
87 #endif /* CONFIG_BT_CTLR_LOW_LAT */
88 static void tx_demux(void *param);
89 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx);
90 
91 static void ticker_update_conn_op_cb(uint32_t status, void *param);
92 static void ticker_stop_conn_op_cb(uint32_t status, void *param);
93 static void ticker_start_conn_op_cb(uint32_t status, void *param);
94 
95 static void conn_setup_adv_scan_disabled_cb(void *param);
96 static inline void disable(uint16_t handle);
97 static void conn_cleanup(struct ll_conn *conn, uint8_t reason);
98 static void conn_cleanup_finalize(struct ll_conn *conn);
99 static void tx_ull_flush(struct ll_conn *conn);
100 static void ticker_stop_op_cb(uint32_t status, void *param);
101 static void conn_disable(void *param);
102 static void disabled_cb(void *param);
103 static void tx_lll_flush(void *param);
104 
105 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
106 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx);
107 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
108 
109 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
110 /* Connection context pointer used as CPR mutex to serialize connection
111  * parameter requests procedures across simultaneous connections so that
112  * offsets exchanged to the peer do not get changed.
113  */
114 struct ll_conn *conn_upd_curr;
115 #endif /* defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) */
116 
117 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
118 static uint8_t force_md_cnt_calc(struct lll_conn *lll_conn, uint32_t tx_rate);
119 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
120 
121 #if !defined(BT_CTLR_USER_TX_BUFFER_OVERHEAD)
122 #define BT_CTLR_USER_TX_BUFFER_OVERHEAD 0
123 #endif /* BT_CTLR_USER_TX_BUFFER_OVERHEAD */
124 
125 #define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
126 				offsetof(struct pdu_data, lldata) + \
127 				(LL_LENGTH_OCTETS_TX_MAX + \
128 				BT_CTLR_USER_TX_BUFFER_OVERHEAD))
129 
130 #define CONN_DATA_BUFFERS CONFIG_BT_BUF_ACL_TX_COUNT
131 
132 static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx), CONN_DATA_BUFFERS);
133 static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx),
134 		    (CONN_DATA_BUFFERS +
135 		     LLCP_TX_CTRL_BUF_COUNT));
136 
137 static struct {
138 	void *free;
139 	uint8_t pool[CONN_TX_BUF_SIZE * CONN_DATA_BUFFERS];
140 } mem_conn_tx;
141 
142 static struct {
143 	void *free;
144 	uint8_t pool[sizeof(memq_link_t) *
145 		     (CONN_DATA_BUFFERS +
146 		      LLCP_TX_CTRL_BUF_COUNT)];
147 } mem_link_tx;
148 
149 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
150 static uint16_t default_tx_octets;
151 static uint16_t default_tx_time;
152 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
153 
154 #if defined(CONFIG_BT_CTLR_PHY)
155 static uint8_t default_phy_tx;
156 static uint8_t default_phy_rx;
157 #endif /* CONFIG_BT_CTLR_PHY */
158 
159 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
160 static struct past_params default_past_params;
161 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
162 
163 static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN];
164 static void *conn_free;
165 
ll_conn_acquire(void)166 struct ll_conn *ll_conn_acquire(void)
167 {
168 	return mem_acquire(&conn_free);
169 }
170 
ll_conn_release(struct ll_conn * conn)171 void ll_conn_release(struct ll_conn *conn)
172 {
173 	mem_release(conn, &conn_free);
174 }
175 
ll_conn_handle_get(struct ll_conn * conn)176 uint16_t ll_conn_handle_get(struct ll_conn *conn)
177 {
178 	return mem_index_get(conn, conn_pool, sizeof(struct ll_conn));
179 }
180 
ll_conn_get(uint16_t handle)181 struct ll_conn *ll_conn_get(uint16_t handle)
182 {
183 	return mem_get(conn_pool, sizeof(struct ll_conn), handle);
184 }
185 
ll_connected_get(uint16_t handle)186 struct ll_conn *ll_connected_get(uint16_t handle)
187 {
188 	struct ll_conn *conn;
189 
190 	if (handle >= CONFIG_BT_MAX_CONN) {
191 		return NULL;
192 	}
193 
194 	conn = ll_conn_get(handle);
195 	if (conn->lll.handle != handle) {
196 		return NULL;
197 	}
198 
199 	return conn;
200 }
201 
ll_conn_free_count_get(void)202 uint16_t ll_conn_free_count_get(void)
203 {
204 	return mem_free_count_get(conn_free);
205 }
206 
ll_tx_mem_acquire(void)207 void *ll_tx_mem_acquire(void)
208 {
209 	return mem_acquire(&mem_conn_tx.free);
210 }
211 
ll_tx_mem_release(void * tx)212 void ll_tx_mem_release(void *tx)
213 {
214 	mem_release(tx, &mem_conn_tx.free);
215 }
216 
ll_tx_mem_enqueue(uint16_t handle,void * tx)217 int ll_tx_mem_enqueue(uint16_t handle, void *tx)
218 {
219 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
220 #define BT_CTLR_THROUGHPUT_PERIOD 1000000000UL
221 	static uint32_t tx_rate;
222 	static uint32_t tx_cnt;
223 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
224 	struct lll_tx *lll_tx;
225 	struct ll_conn *conn;
226 	uint8_t idx;
227 
228 	conn = ll_connected_get(handle);
229 	if (!conn) {
230 		return -EINVAL;
231 	}
232 
233 	idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx);
234 	if (!lll_tx) {
235 		return -ENOBUFS;
236 	}
237 
238 	lll_tx->handle = handle;
239 	lll_tx->node = tx;
240 
241 	MFIFO_ENQUEUE(conn_tx, idx);
242 
243 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
244 	if (ull_ref_get(&conn->ull)) {
245 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
246 		if (tx_cnt >= CONFIG_BT_BUF_ACL_TX_COUNT) {
247 			uint8_t previous, force_md_cnt;
248 
249 			force_md_cnt = force_md_cnt_calc(&conn->lll, tx_rate);
250 			previous = lll_conn_force_md_cnt_set(force_md_cnt);
251 			if (previous != force_md_cnt) {
252 				LOG_INF("force_md_cnt: old= %u, new= %u.", previous, force_md_cnt);
253 			}
254 		}
255 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
256 
257 		tx_demux_sched(conn);
258 
259 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
260 	} else {
261 		lll_conn_force_md_cnt_set(0U);
262 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
263 	}
264 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
265 
266 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
267 		ull_periph_latency_cancel(conn, handle);
268 	}
269 
270 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
271 	static uint32_t last_cycle_stamp;
272 	static uint32_t tx_len;
273 	struct pdu_data *pdu;
274 	uint32_t cycle_stamp;
275 	uint64_t delta;
276 
277 	cycle_stamp = k_cycle_get_32();
278 	delta = k_cyc_to_ns_floor64(cycle_stamp - last_cycle_stamp);
279 	if (delta > BT_CTLR_THROUGHPUT_PERIOD) {
280 		LOG_INF("incoming Tx: count= %u, len= %u, rate= %u bps.", tx_cnt, tx_len, tx_rate);
281 
282 		last_cycle_stamp = cycle_stamp;
283 		tx_cnt = 0U;
284 		tx_len = 0U;
285 	}
286 
287 	pdu = (void *)((struct node_tx *)tx)->pdu;
288 	tx_len += pdu->len;
289 	if (delta == 0) { /* Let's avoid a division by 0 if we happen to have a really fast HCI IF*/
290 		delta = 1;
291 	}
292 	tx_rate = ((uint64_t)tx_len << 3) * BT_CTLR_THROUGHPUT_PERIOD / delta;
293 	tx_cnt++;
294 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
295 
296 	return 0;
297 }
298 
ll_conn_update(uint16_t handle,uint8_t cmd,uint8_t status,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout,uint16_t * offset)299 uint8_t ll_conn_update(uint16_t handle, uint8_t cmd, uint8_t status, uint16_t interval_min,
300 		    uint16_t interval_max, uint16_t latency, uint16_t timeout, uint16_t *offset)
301 {
302 	struct ll_conn *conn;
303 
304 	conn = ll_connected_get(handle);
305 	if (!conn) {
306 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
307 	}
308 
309 	if (cmd == 0U) {
310 		uint8_t err;
311 
312 		err = ull_cp_conn_update(conn, interval_min, interval_max, latency, timeout,
313 					 offset);
314 		if (err) {
315 			return err;
316 		}
317 
318 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
319 		    conn->lll.role) {
320 			ull_periph_latency_cancel(conn, handle);
321 		}
322 	} else if (cmd == 2U) {
323 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
324 		if (status == 0U) {
325 			ull_cp_conn_param_req_reply(conn);
326 		} else {
327 			ull_cp_conn_param_req_neg_reply(conn, status);
328 		}
329 		return BT_HCI_ERR_SUCCESS;
330 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
331 		/* CPR feature not supported */
332 		return BT_HCI_ERR_CMD_DISALLOWED;
333 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
334 	} else {
335 		return BT_HCI_ERR_UNKNOWN_CMD;
336 	}
337 
338 	return 0;
339 }
340 
ll_chm_get(uint16_t handle,uint8_t * chm)341 uint8_t ll_chm_get(uint16_t handle, uint8_t *chm)
342 {
343 	struct ll_conn *conn;
344 
345 	conn = ll_connected_get(handle);
346 	if (!conn) {
347 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
348 	}
349 
350 	/*
351 	 * Core Spec 5.2 Vol4: 7.8.20:
352 	 * The HCI_LE_Read_Channel_Map command returns the current Channel_Map
353 	 * for the specified Connection_Handle. The returned value indicates the state of
354 	 * the Channel_Map specified by the last transmitted or received Channel_Map
355 	 * (in a CONNECT_IND or LL_CHANNEL_MAP_IND message) for the specified
356 	 * Connection_Handle, regardless of whether the Central has received an
357 	 * acknowledgment
358 	 */
359 	const uint8_t *pending_chm;
360 
361 	pending_chm = ull_cp_chan_map_update_pending(conn);
362 	if (pending_chm) {
363 		memcpy(chm, pending_chm, sizeof(conn->lll.data_chan_map));
364 	} else {
365 		memcpy(chm, conn->lll.data_chan_map, sizeof(conn->lll.data_chan_map));
366 	}
367 
368 	return 0;
369 }
370 
371 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
ll_req_peer_sca(uint16_t handle)372 uint8_t ll_req_peer_sca(uint16_t handle)
373 {
374 	struct ll_conn *conn;
375 
376 	conn = ll_connected_get(handle);
377 	if (!conn) {
378 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
379 	}
380 
381 	return ull_cp_req_peer_sca(conn);
382 }
383 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
384 
is_valid_disconnect_reason(uint8_t reason)385 static bool is_valid_disconnect_reason(uint8_t reason)
386 {
387 	switch (reason) {
388 	case BT_HCI_ERR_AUTH_FAIL:
389 	case BT_HCI_ERR_REMOTE_USER_TERM_CONN:
390 	case BT_HCI_ERR_REMOTE_LOW_RESOURCES:
391 	case BT_HCI_ERR_REMOTE_POWER_OFF:
392 	case BT_HCI_ERR_UNSUPP_REMOTE_FEATURE:
393 	case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
394 	case BT_HCI_ERR_UNACCEPT_CONN_PARAM:
395 		return true;
396 	default:
397 		return false;
398 	}
399 }
400 
ll_terminate_ind_send(uint16_t handle,uint8_t reason)401 uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
402 {
403 	struct ll_conn *conn;
404 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
405 	struct ll_conn_iso_stream *cis;
406 #endif
407 
408 	if (IS_ACL_HANDLE(handle)) {
409 		conn = ll_connected_get(handle);
410 
411 		/* Is conn still connected? */
412 		if (!conn) {
413 			return BT_HCI_ERR_CMD_DISALLOWED;
414 		}
415 
416 		if (!is_valid_disconnect_reason(reason)) {
417 			return BT_HCI_ERR_INVALID_PARAM;
418 		}
419 
420 		uint8_t err;
421 
422 		err = ull_cp_terminate(conn, reason);
423 		if (err) {
424 			return err;
425 		}
426 
427 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
428 			ull_periph_latency_cancel(conn, handle);
429 		}
430 		return 0;
431 	}
432 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
433 	if (IS_CIS_HANDLE(handle)) {
434 		cis = ll_iso_stream_connected_get(handle);
435 		if (!cis) {
436 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
437 			/* CIS is not connected - get the unconnected instance */
438 			cis = ll_conn_iso_stream_get(handle);
439 
440 			/* Sanity-check instance to make sure it's created but not connected */
441 			if (cis->group && cis->lll.handle == handle && !cis->established) {
442 				if (cis->group->state == CIG_STATE_CONFIGURABLE) {
443 					/* Disallow if CIG is still in configurable state */
444 					return BT_HCI_ERR_CMD_DISALLOWED;
445 
446 				} else if (cis->group->state == CIG_STATE_INITIATING) {
447 					conn = ll_connected_get(cis->lll.acl_handle);
448 
449 					/* CIS is not yet established - try to cancel procedure */
450 					if (ull_cp_cc_cancel(conn)) {
451 						/* Successfully canceled - complete disconnect */
452 						struct node_rx_pdu *node_terminate;
453 
454 						node_terminate = ull_pdu_rx_alloc();
455 						LL_ASSERT(node_terminate);
456 
457 						node_terminate->hdr.handle = handle;
458 						node_terminate->hdr.type = NODE_RX_TYPE_TERMINATE;
459 						*((uint8_t *)node_terminate->pdu) =
460 							BT_HCI_ERR_LOCALHOST_TERM_CONN;
461 
462 						ll_rx_put_sched(node_terminate->hdr.link,
463 							node_terminate);
464 
465 						/* We're no longer initiating a connection */
466 						cis->group->state = CIG_STATE_CONFIGURABLE;
467 
468 						/* This is now a successful disconnection */
469 						return BT_HCI_ERR_SUCCESS;
470 					}
471 
472 					/* Procedure could not be canceled in the current
473 					 * state - let it run its course and enqueue a
474 					 * terminate procedure.
475 					 */
476 					return ull_cp_cis_terminate(conn, cis, reason);
477 				}
478 			}
479 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
480 			/* Disallow if CIS is not connected */
481 			return BT_HCI_ERR_CMD_DISALLOWED;
482 		}
483 
484 		conn = ll_connected_get(cis->lll.acl_handle);
485 		/* Disallow if ACL has disconnected */
486 		if (!conn) {
487 			return BT_HCI_ERR_CMD_DISALLOWED;
488 		}
489 
490 		return ull_cp_cis_terminate(conn, cis, reason);
491 	}
492 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
493 
494 	return BT_HCI_ERR_UNKNOWN_CONN_ID;
495 }
496 
497 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
ll_feature_req_send(uint16_t handle)498 uint8_t ll_feature_req_send(uint16_t handle)
499 {
500 	struct ll_conn *conn;
501 
502 	conn = ll_connected_get(handle);
503 	if (!conn) {
504 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
505 	}
506 
507 	uint8_t err;
508 
509 	err = ull_cp_feature_exchange(conn, 1U);
510 	if (err) {
511 		return err;
512 	}
513 
514 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
515 	    IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) &&
516 	    conn->lll.role) {
517 		ull_periph_latency_cancel(conn, handle);
518 	}
519 
520 	return 0;
521 }
522 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
523 
ll_version_ind_send(uint16_t handle)524 uint8_t ll_version_ind_send(uint16_t handle)
525 {
526 	struct ll_conn *conn;
527 
528 	conn = ll_connected_get(handle);
529 	if (!conn) {
530 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
531 	}
532 
533 	uint8_t err;
534 
535 	err = ull_cp_version_exchange(conn);
536 	if (err) {
537 		return err;
538 	}
539 
540 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
541 		ull_periph_latency_cancel(conn, handle);
542 	}
543 
544 	return 0;
545 }
546 
547 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ll_len_validate(uint16_t tx_octets,uint16_t tx_time)548 static bool ll_len_validate(uint16_t tx_octets, uint16_t tx_time)
549 {
550 	/* validate if within HCI allowed range */
551 	if (!IN_RANGE(tx_octets, PDU_DC_PAYLOAD_SIZE_MIN,
552 		      PDU_DC_PAYLOAD_SIZE_MAX)) {
553 		return false;
554 	}
555 
556 	/* validate if within HCI allowed range */
557 	if (!IN_RANGE(tx_time, PDU_DC_PAYLOAD_TIME_MIN,
558 		      PDU_DC_PAYLOAD_TIME_MAX_CODED)) {
559 		return false;
560 	}
561 
562 	return true;
563 }
564 
ll_length_req_send(uint16_t handle,uint16_t tx_octets,uint16_t tx_time)565 uint32_t ll_length_req_send(uint16_t handle, uint16_t tx_octets,
566 			    uint16_t tx_time)
567 {
568 	struct ll_conn *conn;
569 
570 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
571 	    !ll_len_validate(tx_octets, tx_time)) {
572 		return BT_HCI_ERR_INVALID_PARAM;
573 	}
574 
575 	conn = ll_connected_get(handle);
576 	if (!conn) {
577 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
578 	}
579 
580 	if (!feature_dle(conn)) {
581 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
582 	}
583 
584 	uint8_t err;
585 
586 	err = ull_cp_data_length_update(conn, tx_octets, tx_time);
587 	if (err) {
588 		return err;
589 	}
590 
591 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
592 		ull_periph_latency_cancel(conn, handle);
593 	}
594 
595 	return 0;
596 }
597 
ll_length_default_get(uint16_t * max_tx_octets,uint16_t * max_tx_time)598 void ll_length_default_get(uint16_t *max_tx_octets, uint16_t *max_tx_time)
599 {
600 	*max_tx_octets = default_tx_octets;
601 	*max_tx_time = default_tx_time;
602 }
603 
ll_length_default_set(uint16_t max_tx_octets,uint16_t max_tx_time)604 uint32_t ll_length_default_set(uint16_t max_tx_octets, uint16_t max_tx_time)
605 {
606 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
607 	    !ll_len_validate(max_tx_octets, max_tx_time)) {
608 		return BT_HCI_ERR_INVALID_PARAM;
609 	}
610 
611 	default_tx_octets = max_tx_octets;
612 	default_tx_time = max_tx_time;
613 
614 	return 0;
615 }
616 
ll_length_max_get(uint16_t * max_tx_octets,uint16_t * max_tx_time,uint16_t * max_rx_octets,uint16_t * max_rx_time)617 void ll_length_max_get(uint16_t *max_tx_octets, uint16_t *max_tx_time,
618 		       uint16_t *max_rx_octets, uint16_t *max_rx_time)
619 {
620 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_PHY_CODED)
621 #define PHY (PHY_CODED)
622 #else /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
623 #define PHY (PHY_1M)
624 #endif /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
625 	*max_tx_octets = LL_LENGTH_OCTETS_RX_MAX;
626 	*max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
627 	*max_tx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
628 	*max_rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
629 #undef PHY
630 }
631 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
632 
633 #if defined(CONFIG_BT_CTLR_PHY)
ll_phy_get(uint16_t handle,uint8_t * tx,uint8_t * rx)634 uint8_t ll_phy_get(uint16_t handle, uint8_t *tx, uint8_t *rx)
635 {
636 	struct ll_conn *conn;
637 
638 	conn = ll_connected_get(handle);
639 	if (!conn) {
640 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
641 	}
642 
643 	/* TODO: context safe read */
644 	*tx = conn->lll.phy_tx;
645 	*rx = conn->lll.phy_rx;
646 
647 	return 0;
648 }
649 
ll_phy_default_set(uint8_t tx,uint8_t rx)650 uint8_t ll_phy_default_set(uint8_t tx, uint8_t rx)
651 {
652 	/* TODO: validate against supported phy */
653 
654 	default_phy_tx = tx;
655 	default_phy_rx = rx;
656 
657 	return 0;
658 }
659 
ll_phy_req_send(uint16_t handle,uint8_t tx,uint8_t flags,uint8_t rx)660 uint8_t ll_phy_req_send(uint16_t handle, uint8_t tx, uint8_t flags, uint8_t rx)
661 {
662 	struct ll_conn *conn;
663 
664 	conn = ll_connected_get(handle);
665 	if (!conn) {
666 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
667 	}
668 
669 	if (!feature_phy_2m(conn) && !feature_phy_coded(conn)) {
670 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
671 	}
672 
673 	uint8_t err;
674 
675 	err = ull_cp_phy_update(conn, tx, flags, rx, 1U);
676 	if (err) {
677 		return err;
678 	}
679 
680 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
681 		ull_periph_latency_cancel(conn, handle);
682 	}
683 
684 	return 0;
685 }
686 #endif /* CONFIG_BT_CTLR_PHY */
687 
688 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
ll_rssi_get(uint16_t handle,uint8_t * rssi)689 uint8_t ll_rssi_get(uint16_t handle, uint8_t *rssi)
690 {
691 	struct ll_conn *conn;
692 
693 	conn = ll_connected_get(handle);
694 	if (!conn) {
695 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
696 	}
697 
698 	*rssi = conn->lll.rssi_latest;
699 
700 	return 0;
701 }
702 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
703 
704 #if defined(CONFIG_BT_CTLR_LE_PING)
ll_apto_get(uint16_t handle,uint16_t * apto)705 uint8_t ll_apto_get(uint16_t handle, uint16_t *apto)
706 {
707 	struct ll_conn *conn;
708 
709 	conn = ll_connected_get(handle);
710 	if (!conn) {
711 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
712 	}
713 
714 	if (conn->lll.interval >= BT_HCI_LE_INTERVAL_MIN) {
715 		*apto = conn->apto_reload * conn->lll.interval *
716 			CONN_INT_UNIT_US / (10U * USEC_PER_MSEC);
717 	} else {
718 		*apto = conn->apto_reload * (conn->lll.interval + 1U) *
719 			CONN_LOW_LAT_INT_UNIT_US / (10U * USEC_PER_MSEC);
720 	}
721 
722 	return 0;
723 }
724 
ll_apto_set(uint16_t handle,uint16_t apto)725 uint8_t ll_apto_set(uint16_t handle, uint16_t apto)
726 {
727 	struct ll_conn *conn;
728 
729 	conn = ll_connected_get(handle);
730 	if (!conn) {
731 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
732 	}
733 
734 	if (conn->lll.interval >= BT_HCI_LE_INTERVAL_MIN) {
735 		conn->apto_reload =
736 			RADIO_CONN_EVENTS(apto * 10U * USEC_PER_MSEC,
737 					  conn->lll.interval *
738 					  CONN_INT_UNIT_US);
739 	} else {
740 		conn->apto_reload =
741 			RADIO_CONN_EVENTS(apto * 10U * USEC_PER_MSEC,
742 					  (conn->lll.interval + 1U) *
743 					  CONN_LOW_LAT_INT_UNIT_US);
744 	}
745 
746 	return 0;
747 }
748 #endif /* CONFIG_BT_CTLR_LE_PING */
749 
ull_conn_init(void)750 int ull_conn_init(void)
751 {
752 	int err;
753 
754 	err = init_reset();
755 	if (err) {
756 		return err;
757 	}
758 
759 	return 0;
760 }
761 
ull_conn_reset(void)762 int ull_conn_reset(void)
763 {
764 	uint16_t handle;
765 	int err;
766 
767 #if defined(CONFIG_BT_CENTRAL)
768 	/* Reset initiator */
769 	(void)ull_central_reset();
770 #endif /* CONFIG_BT_CENTRAL */
771 
772 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
773 		disable(handle);
774 	}
775 
776 	/* Re-initialize the Tx mfifo */
777 	MFIFO_INIT(conn_tx);
778 
779 	/* Re-initialize the Tx Ack mfifo */
780 	MFIFO_INIT(conn_ack);
781 
782 	err = init_reset();
783 	if (err) {
784 		return err;
785 	}
786 
787 	return 0;
788 }
789 
ull_conn_lll_get(uint16_t handle)790 struct lll_conn *ull_conn_lll_get(uint16_t handle)
791 {
792 	struct ll_conn *conn;
793 
794 	conn = ll_conn_get(handle);
795 
796 	return &conn->lll;
797 }
798 
799 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_conn_default_tx_octets_get(void)800 uint16_t ull_conn_default_tx_octets_get(void)
801 {
802 	return default_tx_octets;
803 }
804 
805 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_tx_time_get(void)806 uint16_t ull_conn_default_tx_time_get(void)
807 {
808 	return default_tx_time;
809 }
810 #endif /* CONFIG_BT_CTLR_PHY */
811 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
812 
813 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_phy_tx_get(void)814 uint8_t ull_conn_default_phy_tx_get(void)
815 {
816 	return default_phy_tx;
817 }
818 
ull_conn_default_phy_rx_get(void)819 uint8_t ull_conn_default_phy_rx_get(void)
820 {
821 	return default_phy_rx;
822 }
823 #endif /* CONFIG_BT_CTLR_PHY */
824 
825 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
ull_conn_default_past_param_set(uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)826 void ull_conn_default_past_param_set(uint8_t mode, uint16_t skip, uint16_t timeout,
827 				     uint8_t cte_type)
828 {
829 	default_past_params.mode     = mode;
830 	default_past_params.skip     = skip;
831 	default_past_params.timeout  = timeout;
832 	default_past_params.cte_type = cte_type;
833 }
834 
ull_conn_default_past_param_get(void)835 struct past_params ull_conn_default_past_param_get(void)
836 {
837 	return default_past_params;
838 }
839 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
840 
841 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
ull_conn_peer_connected(uint8_t const own_id_addr_type,uint8_t const * const own_id_addr,uint8_t const peer_id_addr_type,uint8_t const * const peer_id_addr)842 bool ull_conn_peer_connected(uint8_t const own_id_addr_type,
843 			     uint8_t const *const own_id_addr,
844 			     uint8_t const peer_id_addr_type,
845 			     uint8_t const *const peer_id_addr)
846 {
847 	uint16_t handle;
848 
849 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
850 		struct ll_conn *conn = ll_connected_get(handle);
851 
852 		if (conn &&
853 		    conn->peer_id_addr_type == peer_id_addr_type &&
854 		    !memcmp(conn->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
855 		    conn->own_id_addr_type == own_id_addr_type &&
856 		    !memcmp(conn->own_id_addr, own_id_addr, BDADDR_SIZE)) {
857 			return true;
858 		}
859 	}
860 
861 	return false;
862 }
863 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
864 
ull_conn_setup(memq_link_t * rx_link,struct node_rx_pdu * rx)865 void ull_conn_setup(memq_link_t *rx_link, struct node_rx_pdu *rx)
866 {
867 	struct node_rx_ftr *ftr;
868 	struct ull_hdr *hdr;
869 
870 	/* Store the link in the node rx so that when done event is
871 	 * processed it can be used to enqueue node rx towards LL context
872 	 */
873 	rx->hdr.link = rx_link;
874 
875 	/* NOTE: LLL conn context SHALL be after lll_hdr in
876 	 *       struct lll_adv and struct lll_scan.
877 	 */
878 	ftr = &(rx->rx_ftr);
879 
880 	/* Check for reference count and decide to setup connection
881 	 * here or when done event arrives.
882 	 */
883 	hdr = HDR_LLL2ULL(ftr->param);
884 	if (ull_ref_get(hdr)) {
885 		/* Setup connection in ULL disabled callback,
886 		 * pass the node rx as disabled callback parameter.
887 		 */
888 		LL_ASSERT(!hdr->disabled_cb);
889 		hdr->disabled_param = rx;
890 		hdr->disabled_cb = conn_setup_adv_scan_disabled_cb;
891 	} else {
892 		conn_setup_adv_scan_disabled_cb(rx);
893 	}
894 }
895 
ull_conn_rx(memq_link_t * link,struct node_rx_pdu ** rx)896 void ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
897 {
898 	struct pdu_data *pdu_rx;
899 	struct ll_conn *conn;
900 
901 	conn = ll_connected_get((*rx)->hdr.handle);
902 	if (!conn) {
903 		/* Mark for buffer for release */
904 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
905 
906 		return;
907 	}
908 
909 	ull_cp_tx_ntf(conn);
910 
911 	pdu_rx = (void *)(*rx)->pdu;
912 
913 	switch (pdu_rx->ll_id) {
914 	case PDU_DATA_LLID_CTRL:
915 	{
916 		/* Mark buffer for release */
917 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
918 
919 		ull_cp_rx(conn, link, *rx);
920 
921 		return;
922 	}
923 
924 	case PDU_DATA_LLID_DATA_CONTINUE:
925 	case PDU_DATA_LLID_DATA_START:
926 #if defined(CONFIG_BT_CTLR_LE_ENC)
927 		if (conn->pause_rx_data) {
928 			conn->llcp_terminate.reason_final =
929 				BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
930 
931 			/* Mark for buffer for release */
932 			(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
933 		}
934 #endif /* CONFIG_BT_CTLR_LE_ENC */
935 		break;
936 
937 	case PDU_DATA_LLID_RESV:
938 	default:
939 #if defined(CONFIG_BT_CTLR_LE_ENC)
940 		if (conn->pause_rx_data) {
941 			conn->llcp_terminate.reason_final =
942 				BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
943 		}
944 #endif /* CONFIG_BT_CTLR_LE_ENC */
945 
946 		/* Invalid LL id, drop it. */
947 
948 		/* Mark for buffer for release */
949 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
950 
951 		break;
952 	}
953 }
954 
ull_conn_llcp(struct ll_conn * conn,uint32_t ticks_at_expire,uint32_t remainder,uint16_t lazy)955 int ull_conn_llcp(struct ll_conn *conn, uint32_t ticks_at_expire,
956 		  uint32_t remainder, uint16_t lazy)
957 {
958 	LL_ASSERT(conn->lll.handle != LLL_HANDLE_INVALID);
959 
960 	conn->llcp.prep.ticks_at_expire = ticks_at_expire;
961 	conn->llcp.prep.remainder = remainder;
962 	conn->llcp.prep.lazy = lazy;
963 
964 	ull_cp_run(conn);
965 
966 	if (conn->cancel_prepare) {
967 		/* Reset signal */
968 		conn->cancel_prepare = 0U;
969 
970 		/* Cancel prepare */
971 		return -ECANCELED;
972 	}
973 
974 	/* Continue prepare */
975 	return 0;
976 }
977 
ull_conn_done(struct node_rx_event_done * done)978 void ull_conn_done(struct node_rx_event_done *done)
979 {
980 	uint32_t ticks_drift_minus;
981 	uint32_t ticks_drift_plus;
982 	uint32_t ticks_slot_minus;
983 	uint32_t ticks_slot_plus;
984 	uint16_t latency_event;
985 	uint16_t elapsed_event;
986 	struct lll_conn *lll;
987 	struct ll_conn *conn;
988 	uint8_t reason_final;
989 	uint8_t force_lll;
990 	uint16_t lazy;
991 	uint8_t force;
992 
993 	/* Get reference to ULL context */
994 	conn = CONTAINER_OF(done->param, struct ll_conn, ull);
995 	lll = &conn->lll;
996 
997 	/* Skip if connection terminated by local host */
998 	if (unlikely(lll->handle == LLL_HANDLE_INVALID)) {
999 		return;
1000 	}
1001 
1002 	ull_cp_tx_ntf(conn);
1003 
1004 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1005 	ull_lp_past_conn_evt_done(conn, done);
1006 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1007 
1008 #if defined(CONFIG_BT_CTLR_LE_ENC)
1009 	/* Check authenticated payload expiry or MIC failure */
1010 	switch (done->extra.mic_state) {
1011 	case LLL_CONN_MIC_NONE:
1012 #if defined(CONFIG_BT_CTLR_LE_PING)
1013 		if (lll->enc_rx && lll->enc_tx) {
1014 			uint16_t appto_reload_new;
1015 
1016 			/* check for change in apto */
1017 			appto_reload_new = (conn->apto_reload >
1018 					    (lll->latency + 6)) ?
1019 					   (conn->apto_reload -
1020 					    (lll->latency + 6)) :
1021 					   conn->apto_reload;
1022 			if (conn->appto_reload != appto_reload_new) {
1023 				conn->appto_reload = appto_reload_new;
1024 				conn->apto_expire = 0U;
1025 			}
1026 
1027 			/* start authenticated payload (pre) timeout */
1028 			if (conn->apto_expire == 0U) {
1029 				conn->appto_expire = conn->appto_reload;
1030 				conn->apto_expire = conn->apto_reload;
1031 			}
1032 		}
1033 #endif /* CONFIG_BT_CTLR_LE_PING */
1034 		break;
1035 
1036 	case LLL_CONN_MIC_PASS:
1037 #if defined(CONFIG_BT_CTLR_LE_PING)
1038 		conn->appto_expire = conn->apto_expire = 0U;
1039 #endif /* CONFIG_BT_CTLR_LE_PING */
1040 		break;
1041 
1042 	case LLL_CONN_MIC_FAIL:
1043 		conn->llcp_terminate.reason_final =
1044 			BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
1045 		break;
1046 	}
1047 #endif /* CONFIG_BT_CTLR_LE_ENC */
1048 
1049 	reason_final = conn->llcp_terminate.reason_final;
1050 	if (reason_final) {
1051 		conn_cleanup(conn, reason_final);
1052 
1053 		return;
1054 	}
1055 
1056 	/* Events elapsed used in timeout checks below */
1057 #if defined(CONFIG_BT_CTLR_CONN_META)
1058 	/* If event has shallow expiry do not add latency, but rely on
1059 	 * accumulated lazy count.
1060 	 */
1061 	latency_event = conn->common.is_must_expire ? 0 : lll->latency_event;
1062 #else
1063 	latency_event = lll->latency_event;
1064 #endif
1065 
1066 	/* Peripheral drift compensation calc and new latency or
1067 	 * central terminate acked
1068 	 */
1069 	ticks_drift_plus = 0U;
1070 	ticks_drift_minus = 0U;
1071 	ticks_slot_plus = 0U;
1072 	ticks_slot_minus = 0U;
1073 
1074 	if (done->extra.trx_cnt) {
1075 		if (0) {
1076 #if defined(CONFIG_BT_PERIPHERAL)
1077 		} else if (lll->role == BT_HCI_ROLE_PERIPHERAL) {
1078 			if (!conn->periph.drift_skip) {
1079 				ull_drift_ticks_get(done, &ticks_drift_plus,
1080 						    &ticks_drift_minus);
1081 
1082 				if (ticks_drift_plus || ticks_drift_minus) {
1083 					conn->periph.drift_skip =
1084 						ull_ref_get(&conn->ull);
1085 				}
1086 			} else {
1087 				conn->periph.drift_skip--;
1088 			}
1089 
1090 			if (!ull_tx_q_peek(&conn->tx_q)) {
1091 				ull_conn_tx_demux(UINT8_MAX);
1092 			}
1093 
1094 			if (ull_tx_q_peek(&conn->tx_q) ||
1095 			    memq_peek(lll->memq_tx.head,
1096 				      lll->memq_tx.tail, NULL)) {
1097 				lll->latency_event = 0U;
1098 			} else if (lll->periph.latency_enabled) {
1099 				lll->latency_event = lll->latency;
1100 			}
1101 #endif /* CONFIG_BT_PERIPHERAL */
1102 		}
1103 
1104 		/* Reset connection failed to establish countdown */
1105 		conn->connect_expire = 0U;
1106 	} else {
1107 #if defined(CONFIG_BT_PERIPHERAL)
1108 		if (lll->role == BT_HCI_ROLE_PERIPHERAL) {
1109 			conn->periph.drift_skip = 0U;
1110 		}
1111 #endif /* CONFIG_BT_PERIPHERAL */
1112 	}
1113 
1114 	elapsed_event = latency_event + lll->lazy_prepare + 1U;
1115 
1116 	/* Reset supervision countdown */
1117 	if (done->extra.crc_valid && !done->extra.is_aborted) {
1118 		conn->supervision_expire = 0U;
1119 	}
1120 
1121 	/* check connection failed to establish */
1122 	else if (conn->connect_expire) {
1123 		if (conn->connect_expire > elapsed_event) {
1124 			conn->connect_expire -= elapsed_event;
1125 		} else {
1126 			conn_cleanup(conn, BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
1127 
1128 			return;
1129 		}
1130 	}
1131 
1132 	/* if anchor point not sync-ed, start supervision timeout, and break
1133 	 * latency if any.
1134 	 */
1135 	else {
1136 		/* Start supervision timeout, if not started already */
1137 		if (!conn->supervision_expire) {
1138 			uint32_t conn_interval_us;
1139 
1140 			if (conn->lll.interval >= BT_HCI_LE_INTERVAL_MIN) {
1141 				conn_interval_us = conn->lll.interval *
1142 						   CONN_INT_UNIT_US;
1143 			} else {
1144 				conn_interval_us = (conn->lll.interval + 1U) *
1145 						   CONN_LOW_LAT_INT_UNIT_US;
1146 			}
1147 
1148 			conn->supervision_expire = RADIO_CONN_EVENTS(
1149 				(conn->supervision_timeout * 10U * USEC_PER_MSEC),
1150 				conn_interval_us);
1151 		}
1152 	}
1153 
1154 	/* check supervision timeout */
1155 	force = 0U;
1156 	force_lll = 0U;
1157 	if (conn->supervision_expire) {
1158 		if (conn->supervision_expire > elapsed_event) {
1159 			conn->supervision_expire -= elapsed_event;
1160 
1161 			/* break latency */
1162 			lll->latency_event = 0U;
1163 
1164 			/* Force both central and peripheral when close to
1165 			 * supervision timeout.
1166 			 */
1167 			if (conn->supervision_expire <= 6U) {
1168 				force_lll = 1U;
1169 
1170 				force = 1U;
1171 			}
1172 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
1173 			/* use randomness to force peripheral role when anchor
1174 			 * points are being missed.
1175 			 */
1176 			else if (lll->role) {
1177 				if (latency_event) {
1178 					force = 1U;
1179 				} else {
1180 					force = conn->periph.force & 0x01;
1181 
1182 					/* rotate force bits */
1183 					conn->periph.force >>= 1U;
1184 					if (force) {
1185 						conn->periph.force |= BIT(31);
1186 					}
1187 				}
1188 			}
1189 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
1190 		} else {
1191 			conn_cleanup(conn, BT_HCI_ERR_CONN_TIMEOUT);
1192 
1193 			return;
1194 		}
1195 	}
1196 
1197 	lll->forced = force_lll;
1198 
1199 	/* check procedure timeout */
1200 	uint8_t error_code;
1201 
1202 	if (-ETIMEDOUT == ull_cp_prt_elapse(conn, elapsed_event, &error_code)) {
1203 		conn_cleanup(conn, error_code);
1204 
1205 		return;
1206 	}
1207 
1208 #if defined(CONFIG_BT_CTLR_LE_PING)
1209 	/* check apto */
1210 	if (conn->apto_expire != 0U) {
1211 		if (conn->apto_expire > elapsed_event) {
1212 			conn->apto_expire -= elapsed_event;
1213 		} else {
1214 			struct node_rx_hdr *rx;
1215 
1216 			rx = ll_pdu_rx_alloc();
1217 			if (rx) {
1218 				conn->apto_expire = 0U;
1219 
1220 				rx->handle = lll->handle;
1221 				rx->type = NODE_RX_TYPE_APTO;
1222 
1223 				/* enqueue apto event into rx queue */
1224 				ll_rx_put_sched(rx->link, rx);
1225 			} else {
1226 				conn->apto_expire = 1U;
1227 			}
1228 		}
1229 	}
1230 
1231 	/* check appto */
1232 	if (conn->appto_expire != 0U) {
1233 		if (conn->appto_expire > elapsed_event) {
1234 			conn->appto_expire -= elapsed_event;
1235 		} else {
1236 			conn->appto_expire = 0U;
1237 
1238 			/* Initiate LE_PING procedure */
1239 			ull_cp_le_ping(conn);
1240 		}
1241 	}
1242 #endif /* CONFIG_BT_CTLR_LE_PING */
1243 
1244 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1245 	/* Check if the CTE_REQ procedure is periodic and counter has been started.
1246 	 * req_expire is set when: new CTE_REQ is started, after completion of last periodic run.
1247 	 */
1248 	if (conn->llcp.cte_req.req_interval != 0U && conn->llcp.cte_req.req_expire != 0U) {
1249 		if (conn->llcp.cte_req.req_expire > elapsed_event) {
1250 			conn->llcp.cte_req.req_expire -= elapsed_event;
1251 		} else {
1252 			uint8_t err;
1253 
1254 			/* Set req_expire to zero to mark that new periodic CTE_REQ was started.
1255 			 * The counter is re-started after completion of this run.
1256 			 */
1257 			conn->llcp.cte_req.req_expire = 0U;
1258 
1259 			err = ull_cp_cte_req(conn, conn->llcp.cte_req.min_cte_len,
1260 					     conn->llcp.cte_req.cte_type);
1261 
1262 			if (err == BT_HCI_ERR_CMD_DISALLOWED) {
1263 				/* Conditions has changed e.g. PHY was changed to CODED.
1264 				 * New CTE REQ is not possible. Disable the periodic requests.
1265 				 */
1266 				ull_cp_cte_req_set_disable(conn);
1267 			}
1268 		}
1269 	}
1270 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1271 
1272 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1273 	/* generate RSSI event */
1274 	if (lll->rssi_sample_count == 0U) {
1275 		struct node_rx_pdu *rx;
1276 		struct pdu_data *pdu_data_rx;
1277 
1278 		rx = ll_pdu_rx_alloc();
1279 		if (rx) {
1280 			lll->rssi_reported = lll->rssi_latest;
1281 			lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT;
1282 
1283 			/* Prepare the rx packet structure */
1284 			rx->hdr.handle = lll->handle;
1285 			rx->hdr.type = NODE_RX_TYPE_RSSI;
1286 
1287 			/* prepare connection RSSI structure */
1288 			pdu_data_rx = (void *)rx->pdu;
1289 			pdu_data_rx->rssi = lll->rssi_reported;
1290 
1291 			/* enqueue connection RSSI structure into queue */
1292 			ll_rx_put_sched(rx->hdr.link, rx);
1293 		}
1294 	}
1295 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1296 
1297 	/* check if latency needs update */
1298 	lazy = 0U;
1299 	if ((force) || (latency_event != lll->latency_event)) {
1300 		lazy = lll->latency_event + 1U;
1301 	}
1302 
1303 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
1304 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) || defined(CONFIG_BT_CTLR_PHY)
1305 	if (lll->evt_len_upd) {
1306 		uint32_t ready_delay, rx_time, tx_time, ticks_slot, slot_us;
1307 
1308 		lll->evt_len_upd = 0;
1309 
1310 #if defined(CONFIG_BT_CTLR_PHY)
1311 		ready_delay = (lll->role) ?
1312 			lll_radio_rx_ready_delay_get(lll->phy_rx, PHY_FLAGS_S8) :
1313 			lll_radio_tx_ready_delay_get(lll->phy_tx, lll->phy_flags);
1314 
1315 #if defined(CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX)
1316 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1317 		tx_time = lll->dle.eff.max_tx_time;
1318 		rx_time = lll->dle.eff.max_rx_time;
1319 
1320 #else /* CONFIG_BT_CTLR_DATA_LENGTH */
1321 		tx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1322 			      PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
1323 		rx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, 0),
1324 			      PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
1325 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1326 
1327 #else /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
1328 		tx_time = PDU_MAX_US(0U, 0U, lll->phy_tx);
1329 		rx_time = PDU_MAX_US(0U, 0U, lll->phy_rx);
1330 #endif /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
1331 
1332 #else /* CONFIG_BT_CTLR_PHY */
1333 		ready_delay = (lll->role) ?
1334 			lll_radio_rx_ready_delay_get(0, 0) :
1335 			lll_radio_tx_ready_delay_get(0, 0);
1336 #if defined(CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX)
1337 		tx_time = PDU_DC_MAX_US(lll->dle.eff.max_tx_octets, 0);
1338 		rx_time = PDU_DC_MAX_US(lll->dle.eff.max_rx_octets, 0);
1339 
1340 #else /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
1341 		tx_time = PDU_MAX_US(0U, 0U, PHY_1M);
1342 		rx_time = PDU_MAX_US(0U, 0U, PHY_1M);
1343 #endif /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
1344 #endif /* CONFIG_BT_CTLR_PHY */
1345 
1346 		/* Calculate event time reservation */
1347 		slot_us = tx_time + rx_time;
1348 		slot_us += lll->tifs_rx_us + (EVENT_CLOCK_JITTER_US << 1);
1349 		slot_us += ready_delay;
1350 
1351 		if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX) ||
1352 		    !conn->lll.role) {
1353 			slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1354 		}
1355 
1356 		ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1357 		if (ticks_slot > conn->ull.ticks_slot) {
1358 			ticks_slot_plus = ticks_slot - conn->ull.ticks_slot;
1359 		} else {
1360 			ticks_slot_minus = conn->ull.ticks_slot - ticks_slot;
1361 		}
1362 		conn->ull.ticks_slot = ticks_slot;
1363 	}
1364 #endif /* CONFIG_BT_CTLR_DATA_LENGTH || CONFIG_BT_CTLR_PHY */
1365 #else /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1366 	ticks_slot_plus = 0;
1367 	ticks_slot_minus = 0;
1368 #endif /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
1369 
1370 	/* update conn ticker */
1371 	if (ticks_drift_plus || ticks_drift_minus ||
1372 	    ticks_slot_plus || ticks_slot_minus ||
1373 	    lazy || force) {
1374 		uint8_t ticker_id = TICKER_ID_CONN_BASE + lll->handle;
1375 		struct ll_conn *conn_ll = lll->hdr.parent;
1376 		uint32_t ticker_status;
1377 
1378 		/* Call to ticker_update can fail under the race
1379 		 * condition where in the peripheral role is being stopped but
1380 		 * at the same time it is preempted by peripheral event that
1381 		 * gets into close state. Accept failure when peripheral role
1382 		 * is being stopped.
1383 		 */
1384 		ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
1385 					      TICKER_USER_ID_ULL_HIGH,
1386 					      ticker_id,
1387 					      ticks_drift_plus, ticks_drift_minus,
1388 					      ticks_slot_plus, ticks_slot_minus,
1389 					      lazy, force,
1390 					      ticker_update_conn_op_cb,
1391 					      conn_ll);
1392 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1393 			  (ticker_status == TICKER_STATUS_BUSY) ||
1394 			  ((void *)conn_ll == ull_disable_mark_get()));
1395 	}
1396 }
1397 
1398 #if defined(CONFIG_BT_CTLR_LOW_LAT)
ull_conn_lll_tx_demux_sched(struct lll_conn * lll)1399 void ull_conn_lll_tx_demux_sched(struct lll_conn *lll)
1400 {
1401 	static memq_link_t link;
1402 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1403 
1404 	mfy.param = HDR_LLL2ULL(lll);
1405 
1406 	mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1U, &mfy);
1407 }
1408 #endif /* CONFIG_BT_CTLR_LOW_LAT */
1409 
ull_conn_tx_demux(uint8_t count)1410 void ull_conn_tx_demux(uint8_t count)
1411 {
1412 	do {
1413 		struct lll_tx *lll_tx;
1414 		struct ll_conn *conn;
1415 
1416 		lll_tx = MFIFO_DEQUEUE_GET(conn_tx);
1417 		if (!lll_tx) {
1418 			break;
1419 		}
1420 
1421 		conn = ll_connected_get(lll_tx->handle);
1422 		if (conn) {
1423 			struct node_tx *tx = lll_tx->node;
1424 
1425 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1426 			if (empty_data_start_release(conn, tx)) {
1427 				goto ull_conn_tx_demux_release;
1428 			}
1429 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1430 
1431 			ull_tx_q_enqueue_data(&conn->tx_q, tx);
1432 		} else {
1433 			struct node_tx *tx = lll_tx->node;
1434 			struct pdu_data *p = (void *)tx->pdu;
1435 
1436 			p->ll_id = PDU_DATA_LLID_RESV;
1437 			ll_tx_ack_put(LLL_HANDLE_INVALID, tx);
1438 		}
1439 
1440 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1441 ull_conn_tx_demux_release:
1442 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1443 
1444 		MFIFO_DEQUEUE(conn_tx);
1445 	} while (--count);
1446 }
1447 
ull_conn_tx_lll_enqueue(struct ll_conn * conn,uint8_t count)1448 void ull_conn_tx_lll_enqueue(struct ll_conn *conn, uint8_t count)
1449 {
1450 	while (count--) {
1451 		struct node_tx *tx;
1452 		memq_link_t *link;
1453 
1454 		tx = tx_ull_dequeue(conn, NULL);
1455 		if (!tx) {
1456 			/* No more tx nodes available */
1457 			break;
1458 		}
1459 
1460 		link = mem_acquire(&mem_link_tx.free);
1461 		LL_ASSERT(link);
1462 
1463 		/* Enqueue towards LLL */
1464 		memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1465 	}
1466 }
1467 
ull_conn_link_tx_release(void * link)1468 void ull_conn_link_tx_release(void *link)
1469 {
1470 	mem_release(link, &mem_link_tx.free);
1471 }
1472 
ull_conn_ack_last_idx_get(void)1473 uint8_t ull_conn_ack_last_idx_get(void)
1474 {
1475 	return mfifo_fifo_conn_ack.l;
1476 }
1477 
ull_conn_ack_peek(uint8_t * ack_last,uint16_t * handle,struct node_tx ** tx)1478 memq_link_t *ull_conn_ack_peek(uint8_t *ack_last, uint16_t *handle,
1479 			       struct node_tx **tx)
1480 {
1481 	struct lll_tx *lll_tx;
1482 
1483 	lll_tx = MFIFO_DEQUEUE_GET(conn_ack);
1484 	if (!lll_tx) {
1485 		return NULL;
1486 	}
1487 
1488 	*ack_last = mfifo_fifo_conn_ack.l;
1489 
1490 	*handle = lll_tx->handle;
1491 	*tx = lll_tx->node;
1492 
1493 	return (*tx)->link;
1494 }
1495 
ull_conn_ack_by_last_peek(uint8_t last,uint16_t * handle,struct node_tx ** tx)1496 memq_link_t *ull_conn_ack_by_last_peek(uint8_t last, uint16_t *handle,
1497 				       struct node_tx **tx)
1498 {
1499 	struct lll_tx *lll_tx;
1500 
1501 	lll_tx = mfifo_dequeue_get(mfifo_fifo_conn_ack.m, mfifo_conn_ack.s,
1502 				   mfifo_fifo_conn_ack.f, last);
1503 	if (!lll_tx) {
1504 		return NULL;
1505 	}
1506 
1507 	*handle = lll_tx->handle;
1508 	*tx = lll_tx->node;
1509 
1510 	return (*tx)->link;
1511 }
1512 
ull_conn_ack_dequeue(void)1513 void *ull_conn_ack_dequeue(void)
1514 {
1515 	return MFIFO_DEQUEUE(conn_ack);
1516 }
1517 
ull_conn_lll_ack_enqueue(uint16_t handle,struct node_tx * tx)1518 void ull_conn_lll_ack_enqueue(uint16_t handle, struct node_tx *tx)
1519 {
1520 	struct lll_tx *lll_tx;
1521 	uint8_t idx;
1522 
1523 	idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&lll_tx);
1524 	LL_ASSERT(lll_tx);
1525 
1526 	lll_tx->handle = handle;
1527 	lll_tx->node = tx;
1528 
1529 	MFIFO_ENQUEUE(conn_ack, idx);
1530 }
1531 
ull_conn_tx_ack(uint16_t handle,memq_link_t * link,struct node_tx * tx)1532 void ull_conn_tx_ack(uint16_t handle, memq_link_t *link, struct node_tx *tx)
1533 {
1534 	struct pdu_data *pdu_tx;
1535 
1536 	pdu_tx = (void *)tx->pdu;
1537 	LL_ASSERT(pdu_tx->len);
1538 
1539 	if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1540 		if (handle != LLL_HANDLE_INVALID) {
1541 			struct ll_conn *conn = ll_conn_get(handle);
1542 
1543 			ull_cp_tx_ack(conn, tx);
1544 		}
1545 
1546 		/* release ctrl mem if points to itself */
1547 		if (link->next == (void *)tx) {
1548 			LL_ASSERT(link->next);
1549 
1550 			struct ll_conn *conn = ll_connected_get(handle);
1551 
1552 			ull_cp_release_tx(conn, tx);
1553 			return;
1554 		} else if (!tx) {
1555 			/* Tx Node re-used to enqueue new ctrl PDU */
1556 			return;
1557 		}
1558 		LL_ASSERT(!link->next);
1559 	} else if (handle == LLL_HANDLE_INVALID) {
1560 		pdu_tx->ll_id = PDU_DATA_LLID_RESV;
1561 	} else {
1562 		LL_ASSERT(handle != LLL_HANDLE_INVALID);
1563 	}
1564 
1565 	ll_tx_ack_put(handle, tx);
1566 }
1567 
ull_conn_lll_max_tx_octets_get(struct lll_conn * lll)1568 uint16_t ull_conn_lll_max_tx_octets_get(struct lll_conn *lll)
1569 {
1570 	uint16_t max_tx_octets;
1571 
1572 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1573 #if defined(CONFIG_BT_CTLR_PHY)
1574 	switch (lll->phy_tx_time) {
1575 	default:
1576 	case PHY_1M:
1577 		/* 1M PHY, 1us = 1 bit, hence divide by 8.
1578 		 * Deduct 10 bytes for preamble (1), access address (4),
1579 		 * header (2), and CRC (3).
1580 		 */
1581 		max_tx_octets = (lll->dle.eff.max_tx_time >> 3) - 10;
1582 		break;
1583 
1584 	case PHY_2M:
1585 		/* 2M PHY, 1us = 2 bits, hence divide by 4.
1586 		 * Deduct 11 bytes for preamble (2), access address (4),
1587 		 * header (2), and CRC (3).
1588 		 */
1589 		max_tx_octets = (lll->dle.eff.max_tx_time >> 2) - 11;
1590 		break;
1591 
1592 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1593 	case PHY_CODED:
1594 		if (lll->phy_flags & 0x01) {
1595 			/* S8 Coded PHY, 8us = 1 bit, hence divide by
1596 			 * 64.
1597 			 * Subtract time for preamble (80), AA (256),
1598 			 * CI (16), TERM1 (24), CRC (192) and
1599 			 * TERM2 (24), total 592 us.
1600 			 * Subtract 2 bytes for header.
1601 			 */
1602 			max_tx_octets = ((lll->dle.eff.max_tx_time - 592) >>
1603 					  6) - 2;
1604 		} else {
1605 			/* S2 Coded PHY, 2us = 1 bit, hence divide by
1606 			 * 16.
1607 			 * Subtract time for preamble (80), AA (256),
1608 			 * CI (16), TERM1 (24), CRC (48) and
1609 			 * TERM2 (6), total 430 us.
1610 			 * Subtract 2 bytes for header.
1611 			 */
1612 			max_tx_octets = ((lll->dle.eff.max_tx_time - 430) >>
1613 					  4) - 2;
1614 		}
1615 		break;
1616 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1617 	}
1618 
1619 #if defined(CONFIG_BT_CTLR_LE_ENC)
1620 	if (lll->enc_tx) {
1621 		/* deduct the MIC */
1622 		max_tx_octets -= 4U;
1623 	}
1624 #endif /* CONFIG_BT_CTLR_LE_ENC */
1625 
1626 	if (max_tx_octets > lll->dle.eff.max_tx_octets) {
1627 		max_tx_octets = lll->dle.eff.max_tx_octets;
1628 	}
1629 
1630 #else /* !CONFIG_BT_CTLR_PHY */
1631 	max_tx_octets = lll->dle.eff.max_tx_octets;
1632 #endif /* !CONFIG_BT_CTLR_PHY */
1633 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
1634 	max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1635 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
1636 	return max_tx_octets;
1637 }
1638 
1639 /**
1640  * @brief Initialize pdu_data members that are read only in lower link layer.
1641  *
1642  * @param pdu Pointer to pdu_data object to be initialized
1643  */
ull_pdu_data_init(struct pdu_data * pdu)1644 void ull_pdu_data_init(struct pdu_data *pdu)
1645 {
1646 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_TX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1647 	pdu->cp = 0U;
1648 	pdu->octet3.resv[0] = 0U;
1649 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_TX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1650 }
1651 
init_reset(void)1652 static int init_reset(void)
1653 {
1654 	/* Initialize conn pool. */
1655 	mem_init(conn_pool, sizeof(struct ll_conn),
1656 		 sizeof(conn_pool) / sizeof(struct ll_conn), &conn_free);
1657 
1658 	/* Initialize tx pool. */
1659 	mem_init(mem_conn_tx.pool, CONN_TX_BUF_SIZE, CONN_DATA_BUFFERS,
1660 		 &mem_conn_tx.free);
1661 
1662 	/* Initialize tx link pool. */
1663 	mem_init(mem_link_tx.pool, sizeof(memq_link_t),
1664 		 (CONN_DATA_BUFFERS +
1665 		  LLCP_TX_CTRL_BUF_COUNT),
1666 		 &mem_link_tx.free);
1667 
1668 	/* Initialize control procedure system. */
1669 	ull_cp_init();
1670 
1671 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1672 	/* Reset CPR mutex */
1673 	cpr_active_reset();
1674 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1675 
1676 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1677 	/* Initialize the DLE defaults */
1678 	default_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1679 	default_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
1680 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1681 
1682 #if defined(CONFIG_BT_CTLR_PHY)
1683 	/* Initialize the PHY defaults */
1684 	default_phy_tx = PHY_1M;
1685 	default_phy_rx = PHY_1M;
1686 
1687 #if defined(CONFIG_BT_CTLR_PHY_2M)
1688 	default_phy_tx |= PHY_2M;
1689 	default_phy_rx |= PHY_2M;
1690 #endif /* CONFIG_BT_CTLR_PHY_2M */
1691 
1692 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1693 	default_phy_tx |= PHY_CODED;
1694 	default_phy_rx |= PHY_CODED;
1695 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1696 #endif /* CONFIG_BT_CTLR_PHY */
1697 
1698 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1699 	memset(&default_past_params, 0, sizeof(struct past_params));
1700 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1701 
1702 	return 0;
1703 }
1704 
1705 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
tx_demux_sched(struct ll_conn * conn)1706 static void tx_demux_sched(struct ll_conn *conn)
1707 {
1708 	static memq_link_t link;
1709 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_demux};
1710 
1711 	mfy.param = conn;
1712 
1713 	mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1714 }
1715 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
1716 
tx_demux(void * param)1717 static void tx_demux(void *param)
1718 {
1719 	ull_conn_tx_demux(1);
1720 
1721 	ull_conn_tx_lll_enqueue(param, 1);
1722 }
1723 
tx_ull_dequeue(struct ll_conn * conn,struct node_tx * unused)1724 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *unused)
1725 {
1726 	struct node_tx *tx = NULL;
1727 
1728 	tx = ull_tx_q_dequeue(&conn->tx_q);
1729 	if (tx) {
1730 		struct pdu_data *pdu_tx;
1731 
1732 		pdu_tx = (void *)tx->pdu;
1733 		if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1734 			/* Mark the tx node as belonging to the ctrl pool */
1735 			tx->next = tx;
1736 		} else {
1737 			/* Mark the tx node as belonging to the data pool */
1738 			tx->next = NULL;
1739 		}
1740 	}
1741 	return tx;
1742 }
1743 
ticker_update_conn_op_cb(uint32_t status,void * param)1744 static void ticker_update_conn_op_cb(uint32_t status, void *param)
1745 {
1746 	/* Peripheral drift compensation succeeds, or it fails in a race condition
1747 	 * when disconnecting or connection update (race between ticker_update
1748 	 * and ticker_stop calls).
1749 	 */
1750 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1751 		  param == ull_update_mark_get() ||
1752 		  param == ull_disable_mark_get());
1753 }
1754 
ticker_stop_conn_op_cb(uint32_t status,void * param)1755 static void ticker_stop_conn_op_cb(uint32_t status, void *param)
1756 {
1757 	void *p;
1758 
1759 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1760 
1761 	p = ull_update_mark(param);
1762 	LL_ASSERT(p == param);
1763 }
1764 
ticker_start_conn_op_cb(uint32_t status,void * param)1765 static void ticker_start_conn_op_cb(uint32_t status, void *param)
1766 {
1767 	void *p;
1768 
1769 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1770 
1771 	p = ull_update_unmark(param);
1772 	LL_ASSERT(p == param);
1773 }
1774 
conn_setup_adv_scan_disabled_cb(void * param)1775 static void conn_setup_adv_scan_disabled_cb(void *param)
1776 {
1777 	struct node_rx_ftr *ftr;
1778 	struct node_rx_pdu *rx;
1779 	struct lll_conn *lll;
1780 
1781 	/* NOTE: LLL conn context SHALL be after lll_hdr in
1782 	 *       struct lll_adv and struct lll_scan.
1783 	 */
1784 	rx = param;
1785 	ftr = &(rx->rx_ftr);
1786 	lll = *((struct lll_conn **)((uint8_t *)ftr->param +
1787 				     sizeof(struct lll_hdr)));
1788 
1789 	if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING)) {
1790 		struct ull_hdr *hdr;
1791 
1792 		/* Prevent fast ADV re-scheduling from re-triggering */
1793 		hdr = HDR_LLL2ULL(ftr->param);
1794 		hdr->disabled_cb = NULL;
1795 	}
1796 
1797 	switch (lll->role) {
1798 #if defined(CONFIG_BT_CENTRAL)
1799 	case 0:
1800 		ull_central_setup(rx, ftr, lll);
1801 		break;
1802 #endif /* CONFIG_BT_CENTRAL */
1803 
1804 #if defined(CONFIG_BT_PERIPHERAL)
1805 	case 1:
1806 		ull_periph_setup(rx, ftr, lll);
1807 		break;
1808 #endif /* CONFIG_BT_PERIPHERAL */
1809 
1810 	default:
1811 		LL_ASSERT(0);
1812 		break;
1813 	}
1814 }
1815 
disable(uint16_t handle)1816 static inline void disable(uint16_t handle)
1817 {
1818 	struct ll_conn *conn;
1819 	int err;
1820 
1821 	conn = ll_conn_get(handle);
1822 
1823 	err = ull_ticker_stop_with_mark(TICKER_ID_CONN_BASE + handle,
1824 					conn, &conn->lll);
1825 	LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
1826 
1827 	conn->lll.handle = LLL_HANDLE_INVALID;
1828 	conn->lll.link_tx_free = NULL;
1829 }
1830 
1831 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
conn_cleanup_iso_cis_released_cb(struct ll_conn * conn)1832 static void conn_cleanup_iso_cis_released_cb(struct ll_conn *conn)
1833 {
1834 	struct ll_conn_iso_stream *cis;
1835 
1836 	cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1837 	if (cis) {
1838 		struct node_rx_pdu *rx;
1839 		uint8_t reason;
1840 
1841 		/* More associated CISes - stop next */
1842 		rx = (void *)&conn->llcp_terminate.node_rx;
1843 		reason = *(uint8_t *)rx->pdu;
1844 
1845 		ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1846 				      reason);
1847 	} else {
1848 		/* No more CISes associated with conn - finalize */
1849 		conn_cleanup_finalize(conn);
1850 	}
1851 }
1852 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1853 
conn_cleanup_finalize(struct ll_conn * conn)1854 static void conn_cleanup_finalize(struct ll_conn *conn)
1855 {
1856 	struct lll_conn *lll = &conn->lll;
1857 	uint32_t ticker_status;
1858 
1859 	ull_cp_state_set(conn, ULL_CP_DISCONNECTED);
1860 
1861 	/* Update tx buffer queue handling */
1862 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
1863 	ull_cp_update_tx_buffer_queue(conn);
1864 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
1865 	ull_cp_release_nodes(conn);
1866 
1867 	/* flush demux-ed Tx buffer still in ULL context */
1868 	tx_ull_flush(conn);
1869 
1870 	/* Stop Central or Peripheral role ticker */
1871 	ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1872 				    TICKER_USER_ID_ULL_HIGH,
1873 				    TICKER_ID_CONN_BASE + lll->handle,
1874 				    ticker_stop_op_cb, conn);
1875 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1876 		  (ticker_status == TICKER_STATUS_BUSY));
1877 
1878 	/* Invalidate the connection context */
1879 	lll->handle = LLL_HANDLE_INVALID;
1880 
1881 	/* Demux and flush Tx PDUs that remain enqueued in thread context */
1882 	ull_conn_tx_demux(UINT8_MAX);
1883 }
1884 
conn_cleanup(struct ll_conn * conn,uint8_t reason)1885 static void conn_cleanup(struct ll_conn *conn, uint8_t reason)
1886 {
1887 	struct node_rx_pdu *rx;
1888 
1889 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1890 	struct ll_conn_iso_stream *cis;
1891 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1892 
1893 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1894 	/* Reset CPR mutex */
1895 	cpr_active_check_and_reset(conn);
1896 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1897 
1898 	/* Only termination structure is populated here in ULL context
1899 	 * but the actual enqueue happens in the LLL context in
1900 	 * tx_lll_flush. The reason being to avoid passing the reason
1901 	 * value and handle through the mayfly scheduling of the
1902 	 * tx_lll_flush.
1903 	 */
1904 	rx = (void *)&conn->llcp_terminate.node_rx.rx;
1905 	rx->hdr.handle = conn->lll.handle;
1906 	rx->hdr.type = NODE_RX_TYPE_TERMINATE;
1907 	*((uint8_t *)rx->pdu) = reason;
1908 
1909 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1910 	cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1911 	if (cis) {
1912 		/* Stop CIS and defer cleanup to after teardown. */
1913 		ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb,
1914 				      reason);
1915 		return;
1916 	}
1917 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1918 
1919 	conn_cleanup_finalize(conn);
1920 }
1921 
tx_ull_flush(struct ll_conn * conn)1922 static void tx_ull_flush(struct ll_conn *conn)
1923 {
1924 	struct node_tx *tx;
1925 
1926 	ull_tx_q_resume_data(&conn->tx_q);
1927 
1928 	tx = tx_ull_dequeue(conn, NULL);
1929 	while (tx) {
1930 		memq_link_t *link;
1931 
1932 		link = mem_acquire(&mem_link_tx.free);
1933 		LL_ASSERT(link);
1934 
1935 		/* Enqueue towards LLL */
1936 		memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1937 
1938 		tx = tx_ull_dequeue(conn, NULL);
1939 	}
1940 }
1941 
ticker_stop_op_cb(uint32_t status,void * param)1942 static void ticker_stop_op_cb(uint32_t status, void *param)
1943 {
1944 	static memq_link_t link;
1945 	static struct mayfly mfy = {0, 0, &link, NULL, conn_disable};
1946 	uint32_t ret;
1947 
1948 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1949 
1950 	/* Check if any pending LLL events that need to be aborted */
1951 	mfy.param = param;
1952 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1953 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1954 	LL_ASSERT(!ret);
1955 }
1956 
conn_disable(void * param)1957 static void conn_disable(void *param)
1958 {
1959 	struct ll_conn *conn;
1960 	struct ull_hdr *hdr;
1961 
1962 	/* Check ref count to determine if any pending LLL events in pipeline */
1963 	conn = param;
1964 	hdr = &conn->ull;
1965 	if (ull_ref_get(hdr)) {
1966 		static memq_link_t link;
1967 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1968 		uint32_t ret;
1969 
1970 		mfy.param = &conn->lll;
1971 
1972 		/* Setup disabled callback to be called when ref count
1973 		 * returns to zero.
1974 		 */
1975 		LL_ASSERT(!hdr->disabled_cb);
1976 		hdr->disabled_param = mfy.param;
1977 		hdr->disabled_cb = disabled_cb;
1978 
1979 		/* Trigger LLL disable */
1980 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1981 				     TICKER_USER_ID_LLL, 0, &mfy);
1982 		LL_ASSERT(!ret);
1983 	} else {
1984 		/* No pending LLL events */
1985 		disabled_cb(&conn->lll);
1986 	}
1987 }
1988 
disabled_cb(void * param)1989 static void disabled_cb(void *param)
1990 {
1991 	static memq_link_t link;
1992 	static struct mayfly mfy = {0, 0, &link, NULL, tx_lll_flush};
1993 	uint32_t ret;
1994 
1995 	mfy.param = param;
1996 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1997 			     TICKER_USER_ID_LLL, 0, &mfy);
1998 	LL_ASSERT(!ret);
1999 }
2000 
tx_lll_flush(void * param)2001 static void tx_lll_flush(void *param)
2002 {
2003 	struct node_rx_pdu *rx;
2004 	struct lll_conn *lll;
2005 	struct ll_conn *conn;
2006 	struct node_tx *tx;
2007 	memq_link_t *link;
2008 	uint16_t handle;
2009 
2010 	/* Get reference to ULL context */
2011 	lll = param;
2012 	conn = HDR_LLL2ULL(lll);
2013 	handle = ll_conn_handle_get(conn);
2014 
2015 	lll_conn_flush(handle, lll);
2016 
2017 	link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
2018 			    (void **)&tx);
2019 	while (link) {
2020 		uint8_t idx;
2021 		struct lll_tx *tx_buf;
2022 
2023 		idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx_buf);
2024 		LL_ASSERT(tx_buf);
2025 
2026 		tx_buf->handle = LLL_HANDLE_INVALID;
2027 		tx_buf->node = tx;
2028 
2029 		/* TX node UPSTREAM, i.e. Tx node ack path */
2030 		link->next = tx->next; /* Indicates ctrl pool or data pool */
2031 		tx->next = link;
2032 
2033 		MFIFO_ENQUEUE(conn_ack, idx);
2034 
2035 		link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
2036 				    (void **)&tx);
2037 	}
2038 
2039 	/* Get the terminate structure reserved in the connection context.
2040 	 * The terminate reason and connection handle should already be
2041 	 * populated before this mayfly function was scheduled.
2042 	 */
2043 	rx = (void *)&conn->llcp_terminate.node_rx;
2044 	LL_ASSERT(rx->hdr.link);
2045 	link = rx->hdr.link;
2046 	rx->hdr.link = NULL;
2047 
2048 	/* Enqueue the terminate towards ULL context */
2049 	ull_rx_put_sched(link, rx);
2050 }
2051 
2052 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
empty_data_start_release(struct ll_conn * conn,struct node_tx * tx)2053 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx)
2054 {
2055 	struct pdu_data *p = (void *)tx->pdu;
2056 
2057 	if ((p->ll_id == PDU_DATA_LLID_DATA_START) && !p->len) {
2058 		conn->start_empty = 1U;
2059 
2060 		ll_tx_ack_put(conn->lll.handle, tx);
2061 
2062 		return -EINVAL;
2063 	} else if (p->len && conn->start_empty) {
2064 		conn->start_empty = 0U;
2065 
2066 		if (p->ll_id == PDU_DATA_LLID_DATA_CONTINUE) {
2067 			p->ll_id = PDU_DATA_LLID_DATA_START;
2068 		}
2069 	}
2070 
2071 	return 0;
2072 }
2073 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
2074 
2075 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
force_md_cnt_calc(struct lll_conn * lll_connection,uint32_t tx_rate)2076 static uint8_t force_md_cnt_calc(struct lll_conn *lll_connection, uint32_t tx_rate)
2077 {
2078 	uint32_t time_incoming, time_outgoing;
2079 	uint8_t force_md_cnt;
2080 	uint8_t phy_flags;
2081 	uint8_t mic_size;
2082 	uint8_t phy;
2083 
2084 #if defined(CONFIG_BT_CTLR_PHY)
2085 	phy = lll_connection->phy_tx;
2086 	phy_flags = lll_connection->phy_flags;
2087 #else /* !CONFIG_BT_CTLR_PHY */
2088 	phy = PHY_1M;
2089 	phy_flags = 0U;
2090 #endif /* !CONFIG_BT_CTLR_PHY */
2091 
2092 #if defined(CONFIG_BT_CTLR_LE_ENC)
2093 	mic_size = PDU_MIC_SIZE * lll_connection->enc_tx;
2094 #else /* !CONFIG_BT_CTLR_LE_ENC */
2095 	mic_size = 0U;
2096 #endif /* !CONFIG_BT_CTLR_LE_ENC */
2097 
2098 	time_incoming = (LL_LENGTH_OCTETS_RX_MAX << 3) *
2099 			1000000UL / tx_rate;
2100 	time_outgoing = PDU_DC_US(LL_LENGTH_OCTETS_RX_MAX, mic_size, phy,
2101 				  phy_flags) +
2102 			PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
2103 			(EVENT_IFS_US << 1);
2104 
2105 	force_md_cnt = 0U;
2106 	if (time_incoming > time_outgoing) {
2107 		uint32_t delta;
2108 		uint32_t time_keep_alive;
2109 
2110 		delta = (time_incoming << 1) - time_outgoing;
2111 		time_keep_alive = (PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
2112 				   EVENT_IFS_US) << 1;
2113 		force_md_cnt = (delta + (time_keep_alive - 1)) /
2114 			       time_keep_alive;
2115 		LOG_DBG("Time: incoming= %u, expected outgoing= %u, delta= %u, "
2116 		       "keepalive= %u, force_md_cnt = %u.",
2117 		       time_incoming, time_outgoing, delta, time_keep_alive,
2118 		       force_md_cnt);
2119 	}
2120 
2121 	return force_md_cnt;
2122 }
2123 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
2124 
2125 #if defined(CONFIG_BT_CTLR_LE_ENC)
2126 /**
2127  * @brief Pause the data path of a rx queue.
2128  */
ull_conn_pause_rx_data(struct ll_conn * conn)2129 void ull_conn_pause_rx_data(struct ll_conn *conn)
2130 {
2131 	conn->pause_rx_data = 1U;
2132 }
2133 
2134 /**
2135  * @brief Resume the data path of a rx queue.
2136  */
ull_conn_resume_rx_data(struct ll_conn * conn)2137 void ull_conn_resume_rx_data(struct ll_conn *conn)
2138 {
2139 	conn->pause_rx_data = 0U;
2140 }
2141 #endif /* CONFIG_BT_CTLR_LE_ENC */
2142 
ull_conn_event_counter(struct ll_conn * conn)2143 uint16_t ull_conn_event_counter(struct ll_conn *conn)
2144 {
2145 	struct lll_conn *lll;
2146 	uint16_t event_counter;
2147 
2148 	lll = &conn->lll;
2149 
2150 	/* Calculate current event counter. If refcount is non-zero, we have called
2151 	 * prepare and the LLL implementation has calculated and incremented the event
2152 	 * counter (RX path). In this case we need to subtract one from the current
2153 	 * event counter.
2154 	 * Otherwise we are in the TX path, and we calculate the current event counter
2155 	 * similar to LLL by taking the expected event counter value plus accumulated
2156 	 * latency.
2157 	 */
2158 	if (ull_ref_get(&conn->ull)) {
2159 		/* We are in post-prepare (RX path). Event counter is already
2160 		 * calculated and incremented by 1 for next event.
2161 		 */
2162 		event_counter = lll->event_counter - 1;
2163 	} else {
2164 		event_counter = lll->event_counter + lll->latency_prepare +
2165 				conn->llcp.prep.lazy;
2166 	}
2167 
2168 	return event_counter;
2169 }
ull_conn_update_ticker(struct ll_conn * conn,uint32_t ticks_win_offset,uint32_t ticks_slot_overhead,uint32_t periodic_us,uint32_t ticks_at_expire)2170 static void ull_conn_update_ticker(struct ll_conn *conn,
2171 				   uint32_t ticks_win_offset,
2172 				   uint32_t ticks_slot_overhead,
2173 				   uint32_t periodic_us,
2174 				   uint32_t ticks_at_expire)
2175 {
2176 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2177 	/* disable ticker job, in order to chain stop and start
2178 	 * to avoid RTC being stopped if no tickers active.
2179 	 */
2180 	uint32_t mayfly_was_enabled =
2181 		mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW);
2182 
2183 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0U);
2184 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2185 
2186 	/* start periph/central with new timings */
2187 	uint8_t ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
2188 	uint32_t ticker_status = ticker_stop_abs(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2189 						 ticker_id_conn, ticks_at_expire,
2190 						 ticker_stop_conn_op_cb, (void *)conn);
2191 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2192 		  (ticker_status == TICKER_STATUS_BUSY));
2193 	ticker_status = ticker_start(
2194 		TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH, ticker_id_conn, ticks_at_expire,
2195 		ticks_win_offset, HAL_TICKER_US_TO_TICKS(periodic_us),
2196 		HAL_TICKER_REMAINDER(periodic_us),
2197 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2198 		TICKER_NULL_LAZY,
2199 #else /* !CONFIG_BT_TICKER_LOW_LAT */
2200 		TICKER_LAZY_MUST_EXPIRE_KEEP,
2201 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2202 		(ticks_slot_overhead + conn->ull.ticks_slot),
2203 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CENTRAL)
2204 		conn->lll.role == BT_HCI_ROLE_PERIPHERAL ?
2205 		ull_periph_ticker_cb : ull_central_ticker_cb,
2206 #elif defined(CONFIG_BT_PERIPHERAL)
2207 		ull_periph_ticker_cb,
2208 #else
2209 		ull_central_ticker_cb,
2210 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CENTRAL */
2211 		conn, ticker_start_conn_op_cb, (void *)conn);
2212 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2213 		  (ticker_status == TICKER_STATUS_BUSY));
2214 
2215 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2216 	/* enable ticker job, if disabled in this function */
2217 	if (mayfly_was_enabled) {
2218 		mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1U);
2219 	}
2220 #endif /* CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO */
2221 }
2222 
ull_conn_update_parameters(struct ll_conn * conn,uint8_t is_cu_proc,uint8_t win_size,uint32_t win_offset_us,uint16_t interval,uint16_t latency,uint16_t timeout,uint16_t instant)2223 void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_t win_size,
2224 				uint32_t win_offset_us, uint16_t interval, uint16_t latency,
2225 				uint16_t timeout, uint16_t instant)
2226 {
2227 	uint16_t conn_interval_unit_old;
2228 	uint16_t conn_interval_unit_new;
2229 	uint32_t ticks_win_offset = 0U;
2230 	uint32_t conn_interval_old_us;
2231 	uint32_t conn_interval_new_us;
2232 	uint32_t ticks_slot_overhead;
2233 	uint16_t conn_interval_old;
2234 	uint16_t conn_interval_new;
2235 	uint32_t conn_interval_us;
2236 	uint32_t ticks_at_expire;
2237 	uint16_t instant_latency;
2238 	uint32_t ready_delay_us;
2239 	uint16_t event_counter;
2240 	uint32_t periodic_us;
2241 	uint16_t latency_upd;
2242 	struct lll_conn *lll;
2243 
2244 	lll = &conn->lll;
2245 
2246 	/* Calculate current event counter */
2247 	event_counter = ull_conn_event_counter(conn);
2248 
2249 	instant_latency = (event_counter - instant) & 0xFFFF;
2250 
2251 
2252 	ticks_at_expire = conn->llcp.prep.ticks_at_expire;
2253 
2254 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
2255 	/* restore to normal prepare */
2256 	if (conn->ull.ticks_prepare_to_start & XON_BITMASK) {
2257 		uint32_t ticks_prepare_to_start =
2258 			MAX(conn->ull.ticks_active_to_start, conn->ull.ticks_preempt_to_start);
2259 
2260 		conn->ull.ticks_prepare_to_start &= ~XON_BITMASK;
2261 
2262 		ticks_at_expire -= (conn->ull.ticks_prepare_to_start - ticks_prepare_to_start);
2263 	}
2264 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
2265 
2266 #if defined(CONFIG_BT_CTLR_PHY)
2267 	ready_delay_us = lll_radio_tx_ready_delay_get(lll->phy_tx,
2268 						      lll->phy_flags);
2269 #else
2270 	ready_delay_us = lll_radio_tx_ready_delay_get(0U, 0U);
2271 #endif
2272 
2273 	/* compensate for instant_latency due to laziness */
2274 	if (lll->interval >= BT_HCI_LE_INTERVAL_MIN) {
2275 		conn_interval_old = instant_latency * lll->interval;
2276 		conn_interval_unit_old = CONN_INT_UNIT_US;
2277 	} else {
2278 		conn_interval_old = instant_latency * (lll->interval + 1U);
2279 		conn_interval_unit_old = CONN_LOW_LAT_INT_UNIT_US;
2280 	}
2281 
2282 	if (interval >= BT_HCI_LE_INTERVAL_MIN) {
2283 		uint16_t max_tx_time;
2284 		uint16_t max_rx_time;
2285 		uint32_t slot_us;
2286 
2287 		conn_interval_new = interval;
2288 		conn_interval_unit_new = CONN_INT_UNIT_US;
2289 		lll->tifs_tx_us = EVENT_IFS_DEFAULT_US;
2290 		lll->tifs_rx_us = EVENT_IFS_DEFAULT_US;
2291 		lll->tifs_hcto_us = EVENT_IFS_DEFAULT_US;
2292 
2293 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && \
2294 	defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2295 		max_tx_time = lll->dle.eff.max_tx_time;
2296 		max_rx_time = lll->dle.eff.max_rx_time;
2297 
2298 #else /* !CONFIG_BT_CTLR_DATA_LENGTH ||
2299        * !CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE
2300        */
2301 		max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
2302 		max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
2303 #if defined(CONFIG_BT_CTLR_PHY)
2304 		max_tx_time = MAX(max_tx_time, PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
2305 		max_rx_time = MAX(max_rx_time, PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
2306 #endif /* !CONFIG_BT_CTLR_PHY */
2307 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH ||
2308 	* !CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE
2309 	*/
2310 
2311 		/* Calculate event time reservation */
2312 		slot_us = max_tx_time + max_rx_time;
2313 		slot_us += lll->tifs_rx_us + (EVENT_CLOCK_JITTER_US << 1);
2314 		slot_us += ready_delay_us;
2315 
2316 		if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX) ||
2317 		    (lll->role == BT_HCI_ROLE_CENTRAL)) {
2318 			slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
2319 		}
2320 
2321 		conn->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
2322 
2323 	} else {
2324 		conn_interval_new = interval + 1U;
2325 		conn_interval_unit_new = CONN_LOW_LAT_INT_UNIT_US;
2326 		lll->tifs_tx_us = CONFIG_BT_CTLR_EVENT_IFS_LOW_LAT_US;
2327 		lll->tifs_rx_us = CONFIG_BT_CTLR_EVENT_IFS_LOW_LAT_US;
2328 		lll->tifs_hcto_us = CONFIG_BT_CTLR_EVENT_IFS_LOW_LAT_US;
2329 		/* Reserve only the processing overhead, on overlap the
2330 		 * is_abort_cb mechanism will ensure to continue the event so
2331 		 * as to not loose anchor point sync.
2332 		 */
2333 		conn->ull.ticks_slot =
2334 			HAL_TICKER_US_TO_TICKS_CEIL(EVENT_OVERHEAD_START_US);
2335 	}
2336 
2337 	conn_interval_us = conn_interval_new * conn_interval_unit_new;
2338 	periodic_us = conn_interval_us;
2339 
2340 	conn_interval_old_us = conn_interval_old * conn_interval_unit_old;
2341 	latency_upd = conn_interval_old_us / conn_interval_us;
2342 	conn_interval_new_us = latency_upd * conn_interval_us;
2343 	if (conn_interval_new_us > conn_interval_old_us) {
2344 		ticks_at_expire += HAL_TICKER_US_TO_TICKS(
2345 			conn_interval_new_us - conn_interval_old_us);
2346 	} else {
2347 		ticks_at_expire -= HAL_TICKER_US_TO_TICKS(
2348 			conn_interval_old_us - conn_interval_new_us);
2349 	}
2350 
2351 	lll->latency_prepare += conn->llcp.prep.lazy;
2352 	lll->latency_prepare -= (instant_latency - latency_upd);
2353 
2354 	/* calculate the offset */
2355 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
2356 		ticks_slot_overhead =
2357 			MAX(conn->ull.ticks_active_to_start,
2358 			    conn->ull.ticks_prepare_to_start);
2359 
2360 	} else {
2361 		ticks_slot_overhead = 0U;
2362 	}
2363 
2364 	/* calculate the window widening and interval */
2365 	switch (lll->role) {
2366 #if defined(CONFIG_BT_PERIPHERAL)
2367 	case BT_HCI_ROLE_PERIPHERAL:
2368 		lll->periph.window_widening_prepare_us -=
2369 			lll->periph.window_widening_periodic_us * instant_latency;
2370 
2371 		lll->periph.window_widening_periodic_us =
2372 			DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2373 					   lll_clock_ppm_get(conn->periph.sca)) *
2374 					  conn_interval_us), 1000000U);
2375 		lll->periph.window_widening_max_us = (conn_interval_us >> 1U) - EVENT_IFS_US;
2376 		lll->periph.window_size_prepare_us = win_size * CONN_INT_UNIT_US;
2377 
2378 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2379 		conn->periph.ticks_to_offset = 0U;
2380 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2381 
2382 		lll->periph.window_widening_prepare_us +=
2383 			lll->periph.window_widening_periodic_us * latency_upd;
2384 		if (lll->periph.window_widening_prepare_us > lll->periph.window_widening_max_us) {
2385 			lll->periph.window_widening_prepare_us = lll->periph.window_widening_max_us;
2386 		}
2387 
2388 		ticks_at_expire -= HAL_TICKER_US_TO_TICKS(lll->periph.window_widening_periodic_us *
2389 							  latency_upd);
2390 		ticks_win_offset = HAL_TICKER_US_TO_TICKS((win_offset_us / CONN_INT_UNIT_US) *
2391 							  CONN_INT_UNIT_US);
2392 		periodic_us -= lll->periph.window_widening_periodic_us;
2393 		break;
2394 #endif /* CONFIG_BT_PERIPHERAL */
2395 #if defined(CONFIG_BT_CENTRAL)
2396 	case BT_HCI_ROLE_CENTRAL:
2397 		ticks_win_offset = HAL_TICKER_US_TO_TICKS(win_offset_us);
2398 
2399 		/* Workaround: Due to the missing remainder param in
2400 		 * ticker_start function for first interval; add a
2401 		 * tick so as to use the ceiled value.
2402 		 */
2403 		ticks_win_offset += 1U;
2404 		break;
2405 #endif /*CONFIG_BT_CENTRAL */
2406 	default:
2407 		LL_ASSERT(0);
2408 		break;
2409 	}
2410 
2411 	lll->interval = interval;
2412 	lll->latency = latency;
2413 
2414 	conn->supervision_timeout = timeout;
2415 	ull_cp_prt_reload_set(conn, conn_interval_us);
2416 
2417 #if defined(CONFIG_BT_CTLR_LE_PING)
2418 	/* APTO in no. of connection events */
2419 	conn->apto_reload = RADIO_CONN_EVENTS((30U * 1000U * 1000U), conn_interval_us);
2420 	/* Dispatch LE Ping PDU 6 connection events (that peer would
2421 	 * listen to) before 30s timeout
2422 	 * TODO: "peer listens to" is greater than 30s due to latency
2423 	 */
2424 	conn->appto_reload = (conn->apto_reload > (lll->latency + 6U)) ?
2425 					   (conn->apto_reload - (lll->latency + 6U)) :
2426 					   conn->apto_reload;
2427 #endif /* CONFIG_BT_CTLR_LE_PING */
2428 
2429 	if (is_cu_proc) {
2430 		conn->supervision_expire = 0U;
2431 	}
2432 
2433 	/* Update ACL ticker */
2434 	ull_conn_update_ticker(conn, ticks_win_offset, ticks_slot_overhead, periodic_us,
2435 			       ticks_at_expire);
2436 	/* Signal that the prepare needs to be canceled */
2437 	conn->cancel_prepare = 1U;
2438 }
2439 
2440 #if defined(CONFIG_BT_PERIPHERAL)
ull_conn_update_peer_sca(struct ll_conn * conn)2441 void ull_conn_update_peer_sca(struct ll_conn *conn)
2442 {
2443 	struct lll_conn *lll;
2444 
2445 	uint32_t conn_interval_us;
2446 	uint32_t periodic_us;
2447 
2448 	lll = &conn->lll;
2449 
2450 	/* calculate the window widening and interval */
2451 	if (lll->interval >= BT_HCI_LE_INTERVAL_MIN) {
2452 		conn_interval_us = lll->interval *
2453 				   CONN_INT_UNIT_US;
2454 	} else {
2455 		conn_interval_us = (lll->interval + 1U) *
2456 				   CONN_LOW_LAT_INT_UNIT_US;
2457 	}
2458 	periodic_us = conn_interval_us;
2459 
2460 	lll->periph.window_widening_periodic_us =
2461 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
2462 				   lll_clock_ppm_get(conn->periph.sca)) *
2463 				  conn_interval_us), 1000000U);
2464 
2465 	periodic_us -= lll->periph.window_widening_periodic_us;
2466 
2467 	/* Update ACL ticker */
2468 	ull_conn_update_ticker(conn, HAL_TICKER_US_TO_TICKS(periodic_us), 0, periodic_us,
2469 				   conn->llcp.prep.ticks_at_expire);
2470 
2471 }
2472 #endif /* CONFIG_BT_PERIPHERAL */
2473 
ull_conn_chan_map_set(struct ll_conn * conn,const uint8_t chm[5])2474 void ull_conn_chan_map_set(struct ll_conn *conn, const uint8_t chm[5])
2475 {
2476 	struct lll_conn *lll = &conn->lll;
2477 
2478 	memcpy(lll->data_chan_map, chm, sizeof(lll->data_chan_map));
2479 	lll->data_chan_count = util_ones_count_get(lll->data_chan_map, sizeof(lll->data_chan_map));
2480 }
2481 
2482 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2483 static inline void dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2484 				    uint16_t *max_tx_time)
2485 {
2486 	uint8_t phy_select = PHY_1M;
2487 	uint16_t rx_time = 0U;
2488 	uint16_t tx_time = 0U;
2489 
2490 #if defined(CONFIG_BT_CTLR_PHY)
2491 	if (conn->llcp.fex.valid && feature_phy_coded(conn)) {
2492 		/* If coded PHY is supported on the connection
2493 		 * this will define the max times
2494 		 */
2495 		phy_select = PHY_CODED;
2496 		/* If not, max times should be defined by 1M timing */
2497 	}
2498 #endif
2499 
2500 	rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select);
2501 
2502 #if defined(CONFIG_BT_CTLR_PHY)
2503 	tx_time = MIN(conn->lll.dle.default_tx_time,
2504 		      PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy_select));
2505 #else /* !CONFIG_BT_CTLR_PHY */
2506 	tx_time = PDU_DC_MAX_US(conn->lll.dle.default_tx_octets, phy_select);
2507 #endif /* !CONFIG_BT_CTLR_PHY */
2508 
2509 	/*
2510 	 * see Vol. 6 Part B chapter 4.5.10
2511 	 * minimum value for time is 328 us
2512 	 */
2513 	rx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, rx_time);
2514 	tx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, tx_time);
2515 
2516 	*max_rx_time = rx_time;
2517 	*max_tx_time = tx_time;
2518 }
2519 
ull_dle_max_time_get(struct ll_conn * conn,uint16_t * max_rx_time,uint16_t * max_tx_time)2520 void ull_dle_max_time_get(struct ll_conn *conn, uint16_t *max_rx_time,
2521 				    uint16_t *max_tx_time)
2522 {
2523 	dle_max_time_get(conn, max_rx_time, max_tx_time);
2524 }
2525 
2526 /*
2527  * TODO: this probably can be optimised for ex. by creating a macro for the
2528  * ull_dle_update_eff function
2529  */
ull_dle_update_eff(struct ll_conn * conn)2530 uint8_t ull_dle_update_eff(struct ll_conn *conn)
2531 {
2532 	uint8_t dle_changed = 0U;
2533 
2534 	/* Note that we must use bitwise or and not logical or */
2535 	dle_changed = ull_dle_update_eff_rx(conn);
2536 	dle_changed |= ull_dle_update_eff_tx(conn);
2537 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2538 	if (dle_changed) {
2539 		conn->lll.evt_len_upd = 1U;
2540 	}
2541 #endif
2542 
2543 
2544 	return dle_changed;
2545 }
2546 
ull_dle_update_eff_rx(struct ll_conn * conn)2547 uint8_t ull_dle_update_eff_rx(struct ll_conn *conn)
2548 {
2549 	uint8_t dle_changed = 0U;
2550 
2551 	const uint16_t eff_rx_octets =
2552 		MAX(MIN(conn->lll.dle.local.max_rx_octets, conn->lll.dle.remote.max_tx_octets),
2553 		    PDU_DC_PAYLOAD_SIZE_MIN);
2554 
2555 #if defined(CONFIG_BT_CTLR_PHY)
2556 	unsigned int min_eff_rx_time = (conn->lll.phy_rx == PHY_CODED) ?
2557 			PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2558 
2559 	const uint16_t eff_rx_time =
2560 		MAX(MIN(conn->lll.dle.local.max_rx_time, conn->lll.dle.remote.max_tx_time),
2561 		    min_eff_rx_time);
2562 
2563 	if (eff_rx_time != conn->lll.dle.eff.max_rx_time) {
2564 		conn->lll.dle.eff.max_rx_time = eff_rx_time;
2565 		dle_changed = 1U;
2566 	}
2567 #else
2568 	conn->lll.dle.eff.max_rx_time = PDU_DC_MAX_US(eff_rx_octets, PHY_1M);
2569 #endif
2570 
2571 	if (eff_rx_octets != conn->lll.dle.eff.max_rx_octets) {
2572 		conn->lll.dle.eff.max_rx_octets = eff_rx_octets;
2573 		dle_changed = 1U;
2574 	}
2575 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2576 	/* we delay the update of event length to after the DLE procedure is finishede */
2577 	if (dle_changed) {
2578 		conn->lll.evt_len_upd_delayed = 1;
2579 	}
2580 #endif
2581 
2582 	return dle_changed;
2583 }
2584 
ull_dle_update_eff_tx(struct ll_conn * conn)2585 uint8_t ull_dle_update_eff_tx(struct ll_conn *conn)
2586 
2587 {
2588 	uint8_t dle_changed = 0U;
2589 
2590 	const uint16_t eff_tx_octets =
2591 		MAX(MIN(conn->lll.dle.local.max_tx_octets, conn->lll.dle.remote.max_rx_octets),
2592 		    PDU_DC_PAYLOAD_SIZE_MIN);
2593 
2594 #if defined(CONFIG_BT_CTLR_PHY)
2595 	unsigned int min_eff_tx_time = (conn->lll.phy_tx == PHY_CODED) ?
2596 			PDU_DC_PAYLOAD_TIME_MIN_CODED : PDU_DC_PAYLOAD_TIME_MIN;
2597 
2598 	const uint16_t eff_tx_time =
2599 		MAX(MIN(conn->lll.dle.local.max_tx_time, conn->lll.dle.remote.max_rx_time),
2600 		    min_eff_tx_time);
2601 
2602 	if (eff_tx_time != conn->lll.dle.eff.max_tx_time) {
2603 		conn->lll.dle.eff.max_tx_time = eff_tx_time;
2604 		dle_changed = 1U;
2605 	}
2606 #else
2607 	conn->lll.dle.eff.max_tx_time = PDU_DC_MAX_US(eff_tx_octets, PHY_1M);
2608 #endif
2609 
2610 	if (eff_tx_octets != conn->lll.dle.eff.max_tx_octets) {
2611 		conn->lll.dle.eff.max_tx_octets = eff_tx_octets;
2612 		dle_changed = 1U;
2613 	}
2614 
2615 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
2616 	if (dle_changed) {
2617 		conn->lll.evt_len_upd = 1U;
2618 	}
2619 	conn->lll.evt_len_upd |= conn->lll.evt_len_upd_delayed;
2620 	conn->lll.evt_len_upd_delayed = 0;
2621 #endif
2622 
2623 	return dle_changed;
2624 }
2625 
ull_len_data_length_trim(uint16_t * tx_octets,uint16_t * tx_time)2626 static void ull_len_data_length_trim(uint16_t *tx_octets, uint16_t *tx_time)
2627 {
2628 #if defined(CONFIG_BT_CTLR_PHY_CODED)
2629 	uint16_t tx_time_max =
2630 			PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_CODED);
2631 #else /* !CONFIG_BT_CTLR_PHY_CODED */
2632 	uint16_t tx_time_max =
2633 			PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_1M);
2634 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
2635 
2636 	/* trim to supported values */
2637 	if (*tx_octets > LL_LENGTH_OCTETS_TX_MAX) {
2638 		*tx_octets = LL_LENGTH_OCTETS_TX_MAX;
2639 	}
2640 
2641 	if (*tx_time > tx_time_max) {
2642 		*tx_time = tx_time_max;
2643 	}
2644 }
2645 
ull_dle_local_tx_update(struct ll_conn * conn,uint16_t tx_octets,uint16_t tx_time)2646 void ull_dle_local_tx_update(struct ll_conn *conn, uint16_t tx_octets, uint16_t tx_time)
2647 {
2648 	/* Trim to supported values */
2649 	ull_len_data_length_trim(&tx_octets, &tx_time);
2650 
2651 	conn->lll.dle.default_tx_octets = tx_octets;
2652 
2653 #if defined(CONFIG_BT_CTLR_PHY)
2654 	conn->lll.dle.default_tx_time = tx_time;
2655 #endif /* CONFIG_BT_CTLR_PHY */
2656 
2657 	dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time, &conn->lll.dle.local.max_tx_time);
2658 	conn->lll.dle.local.max_tx_octets = conn->lll.dle.default_tx_octets;
2659 }
2660 
ull_dle_init(struct ll_conn * conn,uint8_t phy)2661 void ull_dle_init(struct ll_conn *conn, uint8_t phy)
2662 {
2663 #if defined(CONFIG_BT_CTLR_PHY)
2664 	const uint16_t max_time_min = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy);
2665 	const uint16_t max_time_max = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, phy);
2666 #endif /* CONFIG_BT_CTLR_PHY */
2667 
2668 	/* Clear DLE data set */
2669 	memset(&conn->lll.dle, 0, sizeof(conn->lll.dle));
2670 	/* See BT. 5.2 Spec - Vol 6, Part B, Sect 4.5.10
2671 	 * Default to locally max supported rx/tx length/time
2672 	 */
2673 	ull_dle_local_tx_update(conn, default_tx_octets, default_tx_time);
2674 
2675 	conn->lll.dle.local.max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
2676 #if defined(CONFIG_BT_CTLR_PHY)
2677 	conn->lll.dle.local.max_rx_time = max_time_max;
2678 #endif /* CONFIG_BT_CTLR_PHY */
2679 
2680 	/* Default to minimum rx/tx data length/time */
2681 	conn->lll.dle.remote.max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2682 	conn->lll.dle.remote.max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
2683 
2684 #if defined(CONFIG_BT_CTLR_PHY)
2685 	conn->lll.dle.remote.max_tx_time = max_time_min;
2686 	conn->lll.dle.remote.max_rx_time = max_time_min;
2687 #endif /* CONFIG_BT_CTLR_PHY */
2688 
2689 	/*
2690 	 * ref. Bluetooth Core Specification version 5.3, Vol. 6,
2691 	 * Part B, section 4.5.10 we can call ull_dle_update_eff
2692 	 * for initialisation
2693 	 */
2694 	(void)ull_dle_update_eff(conn);
2695 
2696 	/* Check whether the controller should perform a data length update after
2697 	 * connection is established
2698 	 */
2699 #if defined(CONFIG_BT_CTLR_PHY)
2700 	if ((conn->lll.dle.local.max_rx_time != max_time_min ||
2701 	     conn->lll.dle.local.max_tx_time != max_time_min)) {
2702 		conn->lll.dle.update = 1;
2703 	} else
2704 #endif
2705 	{
2706 		if (conn->lll.dle.local.max_tx_octets != PDU_DC_PAYLOAD_SIZE_MIN ||
2707 		    conn->lll.dle.local.max_rx_octets != PDU_DC_PAYLOAD_SIZE_MIN) {
2708 			conn->lll.dle.update = 1;
2709 		}
2710 	}
2711 }
2712 
ull_conn_default_tx_octets_set(uint16_t tx_octets)2713 void ull_conn_default_tx_octets_set(uint16_t tx_octets)
2714 {
2715 	default_tx_octets = tx_octets;
2716 }
2717 
ull_conn_default_tx_time_set(uint16_t tx_time)2718 void ull_conn_default_tx_time_set(uint16_t tx_time)
2719 {
2720 	default_tx_time = tx_time;
2721 }
2722 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2723 
2724 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
ticker_op_id_match_func(uint8_t ticker_id,uint32_t ticks_slot,uint32_t ticks_to_expire,void * op_context)2725 static bool ticker_op_id_match_func(uint8_t ticker_id, uint32_t ticks_slot,
2726 				    uint32_t ticks_to_expire, void *op_context)
2727 {
2728 	ARG_UNUSED(ticks_slot);
2729 	ARG_UNUSED(ticks_to_expire);
2730 
2731 	uint8_t match_id = *(uint8_t *)op_context;
2732 
2733 	return ticker_id == match_id;
2734 }
2735 
ticker_get_offset_op_cb(uint32_t status,void * param)2736 static void ticker_get_offset_op_cb(uint32_t status, void *param)
2737 {
2738 	*((uint32_t volatile *)param) = status;
2739 }
2740 
get_ticker_offset(uint8_t ticker_id,uint16_t * lazy)2741 static uint32_t get_ticker_offset(uint8_t ticker_id, uint16_t *lazy)
2742 {
2743 	uint32_t volatile ret_cb;
2744 	uint32_t ticks_to_expire;
2745 	uint32_t ticks_current;
2746 	uint32_t sync_remainder_us;
2747 	uint32_t remainder = 0U;
2748 	uint32_t start_us;
2749 	uint32_t ret;
2750 	uint8_t id;
2751 
2752 	id = TICKER_NULL;
2753 	ticks_to_expire = 0U;
2754 	ticks_current = 0U;
2755 
2756 	ret_cb = TICKER_STATUS_BUSY;
2757 
2758 	ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_LOW,
2759 				       &id, &ticks_current, &ticks_to_expire, &remainder,
2760 				       lazy, ticker_op_id_match_func, &ticker_id,
2761 				       ticker_get_offset_op_cb, (void *)&ret_cb);
2762 
2763 	if (ret == TICKER_STATUS_BUSY) {
2764 		while (ret_cb == TICKER_STATUS_BUSY) {
2765 			ticker_job_sched(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_LOW);
2766 		}
2767 	}
2768 
2769 	LL_ASSERT(ret_cb == TICKER_STATUS_SUCCESS);
2770 
2771 	/* Reduced a tick for negative remainder and return positive remainder
2772 	 * value.
2773 	 */
2774 	hal_ticker_remove_jitter(&ticks_to_expire, &remainder);
2775 	sync_remainder_us = remainder;
2776 
2777 	/* Add a tick for negative remainder and return positive remainder
2778 	 * value.
2779 	 */
2780 	hal_ticker_add_jitter(&ticks_to_expire, &remainder);
2781 	start_us = remainder;
2782 
2783 	return ull_get_wrapped_time_us(HAL_TICKER_TICKS_TO_US(ticks_to_expire),
2784 					(sync_remainder_us - start_us));
2785 }
2786 
mfy_past_sender_offset_get(void * param)2787 static void mfy_past_sender_offset_get(void *param)
2788 {
2789 	uint16_t last_pa_event_counter;
2790 	uint32_t ticker_offset_us;
2791 	uint16_t pa_event_counter;
2792 	uint8_t adv_sync_handle;
2793 	uint16_t sync_handle;
2794 	struct ll_conn *conn;
2795 	uint16_t lazy;
2796 
2797 	conn = param;
2798 
2799 	/* Get handle to look for */
2800 	ull_lp_past_offset_get_calc_params(conn, &adv_sync_handle, &sync_handle);
2801 
2802 	if (adv_sync_handle == BT_HCI_ADV_HANDLE_INVALID &&
2803 	    sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
2804 		/* Procedure must have been aborted, do nothing */
2805 		return;
2806 	}
2807 
2808 	if (adv_sync_handle != BT_HCI_ADV_HANDLE_INVALID) {
2809 		const struct ll_adv_sync_set *adv_sync = ull_adv_sync_get(adv_sync_handle);
2810 
2811 		LL_ASSERT(adv_sync);
2812 
2813 		ticker_offset_us = get_ticker_offset(TICKER_ID_ADV_SYNC_BASE + adv_sync_handle,
2814 						     &lazy);
2815 
2816 		pa_event_counter = adv_sync->lll.event_counter;
2817 		last_pa_event_counter = pa_event_counter - 1;
2818 	} else {
2819 		const struct ll_sync_set *sync = ull_sync_is_enabled_get(sync_handle);
2820 		uint32_t interval_us = sync->interval * PERIODIC_INT_UNIT_US;
2821 		uint32_t window_widening_event_us;
2822 
2823 		LL_ASSERT(sync);
2824 
2825 		ticker_offset_us = get_ticker_offset(TICKER_ID_SCAN_SYNC_BASE + sync_handle,
2826 						     &lazy);
2827 
2828 		if (lazy && ticker_offset_us > interval_us) {
2829 
2830 			/* Figure out how many events we have actually skipped */
2831 			lazy = lazy - (ticker_offset_us / interval_us);
2832 
2833 			/* Correct offset to point to next event */
2834 			ticker_offset_us = ticker_offset_us % interval_us;
2835 		}
2836 
2837 		/* Calculate window widening for next event */
2838 		window_widening_event_us = sync->lll.window_widening_event_us +
2839 					   sync->lll.window_widening_periodic_us * (lazy + 1U);
2840 
2841 		/* Correct for window widening */
2842 		ticker_offset_us += window_widening_event_us;
2843 
2844 		pa_event_counter = sync->lll.event_counter + lazy;
2845 
2846 		last_pa_event_counter = pa_event_counter - 1 - lazy;
2847 
2848 		/* Handle unsuccessful events */
2849 		if (sync->timeout_expire) {
2850 			last_pa_event_counter -= sync->timeout_reload - sync->timeout_expire;
2851 		}
2852 	}
2853 
2854 	ull_lp_past_offset_calc_reply(conn, ticker_offset_us, pa_event_counter,
2855 				      last_pa_event_counter);
2856 }
2857 
ull_conn_past_sender_offset_request(struct ll_conn * conn)2858 void ull_conn_past_sender_offset_request(struct ll_conn *conn)
2859 {
2860 	static memq_link_t link;
2861 	static struct mayfly mfy = {0, 0, &link, NULL, mfy_past_sender_offset_get};
2862 	uint32_t ret;
2863 
2864 	mfy.param = conn;
2865 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
2866 			     &mfy);
2867 	LL_ASSERT(!ret);
2868 }
2869 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
2870 
ull_conn_lll_phy_active(struct ll_conn * conn,uint8_t phys)2871 uint8_t ull_conn_lll_phy_active(struct ll_conn *conn, uint8_t phys)
2872 {
2873 #if defined(CONFIG_BT_CTLR_PHY)
2874 	if (!(phys & (conn->lll.phy_tx | conn->lll.phy_rx))) {
2875 #else /* !CONFIG_BT_CTLR_PHY */
2876 	if (!(phys & 0x01)) {
2877 #endif /* !CONFIG_BT_CTLR_PHY */
2878 		return 0;
2879 	}
2880 	return 1;
2881 }
2882 
2883 uint8_t ull_is_lll_tx_queue_empty(struct ll_conn *conn)
2884 {
2885 	return (memq_peek(conn->lll.memq_tx.head, conn->lll.memq_tx.tail, NULL) == NULL);
2886 }
2887