1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #if defined(CONFIG_BT_CTLR_RX_PDU_META)
8 #include "lll_meta.h"
9 #endif /* CONFIG_BT_CTLR_RX_PDU_META */
10 
11 #define TICKER_INSTANCE_ID_CTLR 0
12 #define TICKER_USER_ID_LLL      MAYFLY_CALL_ID_0
13 #define TICKER_USER_ID_ULL_HIGH MAYFLY_CALL_ID_1
14 #define TICKER_USER_ID_ULL_LOW  MAYFLY_CALL_ID_2
15 #define TICKER_USER_ID_THREAD   MAYFLY_CALL_ID_PROGRAM
16 
17 #define EVENT_PIPELINE_MAX 7
18 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
19 #define EVENT_DONE_LINK_CNT 0
20 #else
21 #define EVENT_DONE_LINK_CNT 1
22 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
23 
24 #define ADV_INT_UNIT_US  625U
25 #define SCAN_INT_UNIT_US 625U
26 #define CONN_INT_UNIT_US 1250U
27 
28 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
29 #define XON_BITMASK BIT(31) /* XTAL has been retained from previous prepare */
30 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
31 
32 #if defined(CONFIG_BT_BROADCASTER)
33 #if defined(CONFIG_BT_CTLR_ADV_SET)
34 #define BT_CTLR_ADV_SET CONFIG_BT_CTLR_ADV_SET
35 #else /* CONFIG_BT_CTLR_ADV_SET */
36 #define BT_CTLR_ADV_SET 1
37 #endif /* CONFIG_BT_CTLR_ADV_SET */
38 #else /* !CONFIG_BT_BROADCASTER */
39 #define BT_CTLR_ADV_SET 0
40 #endif /* !CONFIG_BT_BROADCASTER */
41 
42 #if defined(CONFIG_BT_OBSERVER)
43 #if defined(CONFIG_BT_CTLR_ADV_EXT)
44 #if defined(CONFIG_BT_CTLR_PHY_CODED)
45 #define BT_CTLR_SCAN_SET 2
46 #else /* !CONFIG_BT_CTLR_PHY_CODED */
47 #define BT_CTLR_SCAN_SET 1
48 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
49 #else /* !CONFIG_BT_CTLR_ADV_EXT */
50 #define BT_CTLR_SCAN_SET 1
51 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
52 #else /* !CONFIG_BT_OBSERVER */
53 #define BT_CTLR_SCAN_SET 0
54 #endif /* !CONFIG_BT_OBSERVER */
55 
56 enum {
57 	TICKER_ID_LLL_PREEMPT = 0,
58 
59 #if defined(CONFIG_BT_BROADCASTER)
60 	TICKER_ID_ADV_STOP,
61 	TICKER_ID_ADV_BASE,
62 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT)
63 	TICKER_ID_ADV_LAST = ((TICKER_ID_ADV_BASE) + (BT_CTLR_ADV_SET) - 1),
64 #if defined(CONFIG_BT_CTLR_ADV_EXT)
65 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
66 	TICKER_ID_ADV_AUX_BASE,
67 	TICKER_ID_ADV_AUX_LAST = ((TICKER_ID_ADV_AUX_BASE) +
68 				  (CONFIG_BT_CTLR_ADV_AUX_SET) - 1),
69 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
70 	TICKER_ID_ADV_SYNC_BASE,
71 	TICKER_ID_ADV_SYNC_LAST = ((TICKER_ID_ADV_SYNC_BASE) +
72 				   (CONFIG_BT_CTLR_ADV_SYNC_SET) - 1),
73 #if defined(CONFIG_BT_CTLR_ADV_ISO)
74 	TICKER_ID_ADV_ISO_BASE,
75 	TICKER_ID_ADV_ISO_LAST = ((TICKER_ID_ADV_ISO_BASE) +
76 				  (CONFIG_BT_CTLR_ADV_ISO_SET) - 1),
77 #endif /* CONFIG_BT_CTLR_ADV_ISO */
78 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
79 #endif /* CONFIG_BT_CTLR_ADV_AUX_SET > 0 */
80 #endif /* CONFIG_BT_CTLR_ADV_EXT */
81 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */
82 #endif /* CONFIG_BT_BROADCASTER */
83 
84 #if defined(CONFIG_BT_OBSERVER)
85 	TICKER_ID_SCAN_STOP,
86 	TICKER_ID_SCAN_BASE,
87 	TICKER_ID_SCAN_LAST = ((TICKER_ID_SCAN_BASE) + (BT_CTLR_SCAN_SET) - 1),
88 #if defined(CONFIG_BT_CTLR_ADV_EXT)
89 	TICKER_ID_SCAN_AUX_BASE,
90 	TICKER_ID_SCAN_AUX_LAST = ((TICKER_ID_SCAN_AUX_BASE) +
91 				   (CONFIG_BT_CTLR_SCAN_AUX_SET) - 1),
92 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
93 	TICKER_ID_SCAN_SYNC_BASE,
94 	TICKER_ID_SCAN_SYNC_LAST = ((TICKER_ID_SCAN_SYNC_BASE) +
95 				    (CONFIG_BT_PER_ADV_SYNC_MAX) - 1),
96 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
97 	TICKER_ID_SCAN_SYNC_ISO_BASE,
98 	TICKER_ID_SCAN_SYNC_ISO_LAST = ((TICKER_ID_SCAN_SYNC_ISO_BASE) +
99 					(CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET) - 1),
100 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
101 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
102 #endif /* CONFIG_BT_CTLR_ADV_EXT */
103 #endif /* CONFIG_BT_OBSERVER */
104 
105 #if defined(CONFIG_BT_CONN)
106 	TICKER_ID_CONN_BASE,
107 	TICKER_ID_CONN_LAST = ((TICKER_ID_CONN_BASE) + (CONFIG_BT_MAX_CONN) -
108 			       1),
109 #endif /* CONFIG_BT_CONN */
110 
111 #if defined(CONFIG_BT_CTLR_CONN_ISO)
112 	TICKER_ID_CONN_ISO_BASE,
113 	TICKER_ID_CONN_ISO_LAST = ((TICKER_ID_CONN_ISO_BASE) +
114 				   (CONFIG_BT_CTLR_CONN_ISO_GROUPS) - 1),
115 	TICKER_ID_CONN_ISO_RESUME_BASE,
116 	TICKER_ID_CONN_ISO_RESUME_LAST = ((TICKER_ID_CONN_ISO_RESUME_BASE) +
117 					  (CONFIG_BT_CTLR_CONN_ISO_GROUPS) - 1),
118 #endif /* CONFIG_BT_CTLR_CONN_ISO */
119 
120 #if defined(CONFIG_BT_CTLR_USER_EXT) && \
121 	(CONFIG_BT_CTLR_USER_TICKER_ID_RANGE > 0)
122 	TICKER_ID_USER_BASE,
123 	TICKER_ID_USER_LAST = (TICKER_ID_USER_BASE +
124 			       CONFIG_BT_CTLR_USER_TICKER_ID_RANGE - 1),
125 #endif /* CONFIG_BT_CTLR_USER_EXT */
126 
127 	TICKER_ID_MAX,
128 };
129 
130 #if defined(CONFIG_BT_BROADCASTER) && !defined(CONFIG_BT_CTLR_ADV_EXT) && \
131 	!defined(CONFIG_BT_HCI_MESH_EXT)
132 #define TICKER_ID_ADV_LAST TICKER_ID_ADV_BASE
133 #endif
134 
135 #define TICKER_ID_ULL_BASE ((TICKER_ID_LLL_PREEMPT) + 1)
136 
137 enum done_result {
138 	DONE_COMPLETED,
139 	DONE_ABORTED,
140 	DONE_LATE
141 };
142 
143 struct ull_hdr {
144 	uint8_t volatile ref;  /* Number of ongoing (between Prepare and Done)
145 				* events
146 				*/
147 
148 	/* Event parameters */
149 	/* TODO: The intention is to use the greater of the
150 	 *       ticks_prepare_to_start or ticks_active_to_start as the prepare
151 	 *       offset. At the prepare tick generate a software interrupt
152 	 *       servicable by application as the per role configurable advance
153 	 *       radio event notification, usable for data acquisitions.
154 	 *       ticks_preempt_to_start is the per role dynamic preempt offset,
155 	 *       which shall be based on role's preparation CPU usage
156 	 *       requirements.
157 	 */
158 	struct {
159 		uint32_t ticks_active_to_start;
160 		uint32_t ticks_prepare_to_start;
161 		uint32_t ticks_preempt_to_start;
162 		uint32_t ticks_slot;
163 	};
164 
165 	/* ULL context disabled callback and its parameter */
166 	void (*disabled_cb)(void *param);
167 	void *disabled_param;
168 };
169 
170 struct lll_hdr {
171 	void *parent;
172 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
173 	uint8_t score;
174 	uint8_t latency;
175 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
176 };
177 
178 #define HDR_LLL2ULL(p) (((struct lll_hdr *)(p))->parent)
179 
180 struct lll_prepare_param {
181 	uint32_t ticks_at_expire;
182 	uint32_t remainder;
183 	uint16_t lazy;
184 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
185 	int8_t  prio;
186 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
187 	uint8_t force;
188 	void *param;
189 };
190 
191 typedef int (*lll_prepare_cb_t)(struct lll_prepare_param *prepare_param);
192 typedef int (*lll_is_abort_cb_t)(void *next, void *curr,
193 				 lll_prepare_cb_t *resume_cb);
194 typedef void (*lll_abort_cb_t)(struct lll_prepare_param *prepare_param,
195 			       void *param);
196 
197 struct lll_event {
198 	struct lll_prepare_param prepare_param;
199 	lll_prepare_cb_t         prepare_cb;
200 	lll_is_abort_cb_t        is_abort_cb;
201 	lll_abort_cb_t           abort_cb;
202 	uint8_t                  is_resume:1;
203 	uint8_t                  is_aborted:1;
204 };
205 
206 #define DEFINE_NODE_RX_USER_TYPE(i, _) NODE_RX_TYPE_##i,
207 
208 enum node_rx_type {
209 	/* Unused */
210 	NODE_RX_TYPE_NONE = 0x00,
211 	/* Signals release of node */
212 	NODE_RX_TYPE_RELEASE,
213 	/* Signals completion of RX event */
214 	NODE_RX_TYPE_EVENT_DONE,
215 	/* Signals arrival of RX Data Channel payload */
216 	NODE_RX_TYPE_DC_PDU,
217 	/* Signals arrival of isochronous payload */
218 	NODE_RX_TYPE_ISO_PDU,
219 	/* Advertisement report from scanning */
220 	NODE_RX_TYPE_REPORT,
221 	NODE_RX_TYPE_EXT_1M_REPORT,
222 	NODE_RX_TYPE_EXT_2M_REPORT,
223 	NODE_RX_TYPE_EXT_CODED_REPORT,
224 	NODE_RX_TYPE_EXT_AUX_REPORT,
225 	NODE_RX_TYPE_EXT_AUX_RELEASE,
226 	NODE_RX_TYPE_EXT_SCAN_TERMINATE,
227 	NODE_RX_TYPE_SYNC,
228 	NODE_RX_TYPE_SYNC_REPORT,
229 	NODE_RX_TYPE_SYNC_LOST,
230 	NODE_RX_TYPE_SYNC_CHM_COMPLETE,
231 	NODE_RX_TYPE_SYNC_ISO,
232 	NODE_RX_TYPE_SYNC_ISO_LOST,
233 	NODE_RX_TYPE_EXT_ADV_TERMINATE,
234 	NODE_RX_TYPE_BIG_COMPLETE,
235 	NODE_RX_TYPE_BIG_TERMINATE,
236 	NODE_RX_TYPE_SCAN_REQ,
237 	NODE_RX_TYPE_CONNECTION,
238 	NODE_RX_TYPE_TERMINATE,
239 	NODE_RX_TYPE_CONN_UPDATE,
240 	NODE_RX_TYPE_ENC_REFRESH,
241 	NODE_RX_TYPE_APTO,
242 	NODE_RX_TYPE_CHAN_SEL_ALGO,
243 	NODE_RX_TYPE_PHY_UPDATE,
244 	NODE_RX_TYPE_RSSI,
245 	NODE_RX_TYPE_PROFILE,
246 	NODE_RX_TYPE_ADV_INDICATION,
247 	NODE_RX_TYPE_SCAN_INDICATION,
248 	NODE_RX_TYPE_CIS_REQUEST,
249 	NODE_RX_TYPE_CIS_ESTABLISHED,
250 	NODE_RX_TYPE_MESH_ADV_CPLT,
251 	NODE_RX_TYPE_MESH_REPORT,
252 	NODE_RX_TYPE_IQ_SAMPLE_REPORT,
253 
254 #if defined(CONFIG_BT_CTLR_USER_EXT)
255 	/* No entries shall be added after the NODE_RX_TYPE_USER_START/END */
256 	NODE_RX_TYPE_USER_START,
257 	UTIL_LISTIFY(CONFIG_BT_CTLR_USER_EVT_RANGE, DEFINE_NODE_RX_USER_TYPE, _)
258 	NODE_RX_TYPE_USER_END,
259 #endif /* CONFIG_BT_CTLR_USER_EXT */
260 };
261 
262 /* Footer of node_rx_hdr */
263 struct node_rx_ftr {
264 	union {
265 		void *param;
266 		struct {
267 			uint8_t  status;
268 			uint8_t  num_events;
269 			uint16_t conn_handle;
270 		} param_adv_term;
271 	};
272 	union {
273 		void *extra;   /* Used as next pointer for extended PDU
274 				* chaining, to reserve node_rx for CSA#2 event
275 				* generation etc.
276 				*/
277 		void *aux_ptr;
278 		uint8_t aux_phy;
279 	};
280 	uint32_t ticks_anchor;
281 	uint32_t radio_end_us;
282 	uint8_t  rssi;
283 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
284 	uint8_t  aux_lll_sched:1;
285 	uint8_t  aux_w4next:1;
286 	uint8_t  aux_failed:1;
287 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
288 	uint8_t sync_status:2;
289 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
290 
291 	uint8_t  phy_flags:1;
292 	uint8_t  scan_req:1;
293 	uint8_t  scan_rsp:1;
294 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_OBSERVER */
295 #if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
296 	uint8_t  direct:1;
297 #endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */
298 #if defined(CONFIG_BT_CTLR_PRIVACY)
299 	uint8_t  lrpa_used:1;
300 	uint8_t  rl_idx;
301 #endif /* CONFIG_BT_CTLR_PRIVACY */
302 #if defined(CONFIG_BT_HCI_MESH_EXT)
303 	uint8_t  chan_idx;
304 #endif /* CONFIG_BT_HCI_MESH_EXT */
305 };
306 
307 /* Meta-information for isochronous PDUs in node_rx_hdr */
308 struct node_rx_iso_meta {
309 	uint64_t payload_number : 39; /* cisPayloadNumber */
310 	uint32_t timestamp;           /* Time of reception */
311 	uint8_t  status;              /* Status of reception (OK/not OK) */
312 };
313 
314 /* Define invalid/unassigned Controller state/role instance handle */
315 #define NODE_RX_HANDLE_INVALID 0xFFFF
316 
317 /* Define invalid/unassigned Controller LLL context handle */
318 #define LLL_HANDLE_INVALID     0xFFFF
319 
320 /* Header of node_rx_pdu */
321 struct node_rx_hdr {
322 	union {
323 		void        *next;    /* For slist, by hci module */
324 		memq_link_t *link;    /* Supply memq_link from ULL to LLL */
325 		uint8_t     ack_last; /* Tx ack queue index at this node rx */
326 	};
327 
328 	enum node_rx_type type;
329 	uint8_t           user_meta; /* User metadata */
330 	uint16_t          handle;    /* State/Role instance handle */
331 
332 	union {
333 		struct node_rx_ftr rx_ftr;
334 #if defined(CONFIG_BT_CTLR_SYNC_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
335 		struct node_rx_iso_meta rx_iso_meta;
336 #endif
337 #if defined(CONFIG_BT_CTLR_RX_PDU_META)
338 		lll_rx_pdu_meta_t  rx_pdu_meta;
339 #endif /* CONFIG_BT_CTLR_RX_PDU_META */
340 	};
341 };
342 
343 /* Template node rx type with memory aligned offset to PDU buffer.
344  * NOTE: offset to memory aligned pdu buffer location is used to reference
345  *       node rx type specific information, like, terminate or sync lost reason
346  *       from a dedicated node rx structure storage location.
347  */
348 struct node_rx_pdu {
349 	struct node_rx_hdr hdr;
350 	union {
351 		uint8_t    pdu[0] __aligned(4);
352 	};
353 };
354 
355 enum {
356 	EVENT_DONE_EXTRA_TYPE_NONE,
357 
358 #if defined(CONFIG_BT_CONN)
359 	EVENT_DONE_EXTRA_TYPE_CONN,
360 #endif /* CONFIG_BT_CONN */
361 
362 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
363 #if defined(CONFIG_BT_BROADCASTER)
364 	EVENT_DONE_EXTRA_TYPE_ADV,
365 	EVENT_DONE_EXTRA_TYPE_ADV_AUX,
366 #endif /* CONFIG_BT_BROADCASTER */
367 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
368 
369 #if defined(CONFIG_BT_OBSERVER)
370 #if defined(CONFIG_BT_CTLR_ADV_EXT)
371 	EVENT_DONE_EXTRA_TYPE_SCAN,
372 	EVENT_DONE_EXTRA_TYPE_SCAN_AUX,
373 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
374 	EVENT_DONE_EXTRA_TYPE_SYNC,
375 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
376 #endif /* CONFIG_BT_CTLR_ADV_EXT */
377 #endif /* CONFIG_BT_OBSERVER */
378 
379 #if defined(CONFIG_BT_CTLR_CONN_ISO)
380 	EVENT_DONE_EXTRA_TYPE_CIS,
381 #endif /* CONFIG_BT_CTLR_CONN_ISO */
382 
383 /* Following proprietary defines must be at end of enum range */
384 #if defined(CONFIG_BT_CTLR_USER_EXT)
385 	EVENT_DONE_EXTRA_TYPE_USER_START,
386 	EVENT_DONE_EXTRA_TYPE_USER_END = EVENT_DONE_EXTRA_TYPE_USER_START +
387 		CONFIG_BT_CTLR_USER_EVT_RANGE,
388 #endif /* CONFIG_BT_CTLR_USER_EXT */
389 
390 };
391 
392 struct event_done_extra_drift {
393 	uint32_t start_to_address_actual_us;
394 	uint32_t window_widening_event_us;
395 	uint32_t preamble_to_addr_us;
396 };
397 
398 struct event_done_extra {
399 	uint8_t type;
400 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
401 	uint8_t result;
402 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
403 	union {
404 		struct {
405 			uint16_t trx_cnt;
406 			uint8_t  crc_valid:1;
407 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
408 	defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
409 			/* Used to inform ULL that periodic advertising sync scan should be
410 			 * terminated.
411 			 */
412 			uint8_t  sync_term:1;
413 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
414 #if defined(CONFIG_BT_CTLR_LE_ENC)
415 			uint8_t  mic_state;
416 #endif /* CONFIG_BT_CTLR_LE_ENC */
417 #if defined(CONFIG_BT_PERIPHERAL) || defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
418 			union {
419 				struct event_done_extra_drift drift;
420 			};
421 #endif /* CONFIG_BT_PERIPHERAL || CONFIG_BT_CTLR_SYNC_PERIODIC */
422 		};
423 	};
424 };
425 
426 struct node_rx_event_done {
427 	struct node_rx_hdr      hdr;
428 	void                    *param;
429 	struct event_done_extra extra;
430 };
431 
lll_hdr_init(void * lll,void * parent)432 static inline void lll_hdr_init(void *lll, void *parent)
433 {
434 	struct lll_hdr *hdr = lll;
435 
436 	hdr->parent = parent;
437 
438 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
439 	hdr->score = 0U;
440 	hdr->latency = 0U;
441 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
442 }
443 
444 void lll_done_score(void *param, uint8_t result);
445 
446 int lll_init(void);
447 int lll_reset(void);
448 void lll_resume(void *param);
449 void lll_disable(void *param);
450 void lll_done_sync(void);
451 uint32_t lll_radio_is_idle(void);
452 uint32_t lll_radio_tx_ready_delay_get(uint8_t phy, uint8_t flags);
453 uint32_t lll_radio_rx_ready_delay_get(uint8_t phy, uint8_t flags);
454 int8_t lll_radio_tx_pwr_min_get(void);
455 int8_t lll_radio_tx_pwr_max_get(void);
456 int8_t lll_radio_tx_pwr_floor(int8_t tx_pwr_lvl);
457 
458 int lll_csrand_get(void *buf, size_t len);
459 int lll_csrand_isr_get(void *buf, size_t len);
460 int lll_rand_get(void *buf, size_t len);
461 int lll_rand_isr_get(void *buf, size_t len);
462 
463 struct lll_event *ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,
464 				      lll_abort_cb_t abort_cb,
465 				      struct lll_prepare_param *prepare_param,
466 				      lll_prepare_cb_t prepare_cb,
467 				      uint8_t is_resume);
468 void *ull_prepare_dequeue_get(void);
469 void *ull_prepare_dequeue_iter(uint8_t *idx);
470 void ull_prepare_dequeue(uint8_t caller_id);
471 void *ull_pdu_rx_alloc_peek(uint8_t count);
472 void *ull_pdu_rx_alloc_peek_iter(uint8_t *idx);
473 void *ull_pdu_rx_alloc(void);
474 void *ull_iso_pdu_rx_alloc_peek(uint8_t count);
475 void *ull_iso_pdu_rx_alloc_peek_iter(uint8_t *idx);
476 void *ull_iso_pdu_rx_alloc(void);
477 void ull_rx_put(memq_link_t *link, void *rx);
478 void ull_rx_put_done(memq_link_t *link, void *done);
479 void ull_rx_sched(void);
480 void ull_rx_sched_done(void);
481 struct event_done_extra *ull_event_done_extra_get(void);
482 struct event_done_extra *ull_done_extra_type_set(uint8_t type);
483 void *ull_event_done(void *param);
484 
485 int lll_prepare(lll_is_abort_cb_t is_abort_cb,
486 		lll_abort_cb_t abort_cb,
487 		lll_prepare_cb_t prepare_cb, int8_t event_prio,
488 		struct lll_prepare_param *prepare_param);
489 int lll_resume_enqueue(lll_prepare_cb_t resume_cb, int resume_prio);
490 int lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
491 			lll_prepare_cb_t prepare_cb,
492 			struct lll_prepare_param *prepare_param,
493 			uint8_t is_resume, uint8_t is_dequeue);
494