1 /*
2  * Copyright (c) 2017-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <stdbool.h>
9 #include <errno.h>
10 
11 #include <zephyr/kernel.h>
12 #include <soc.h>
13 #include <zephyr/device.h>
14 #include <zephyr/drivers/entropy.h>
15 #include <zephyr/bluetooth/hci_types.h>
16 
17 #include "hal/cpu.h"
18 #include "hal/ccm.h"
19 #include "hal/cntr.h"
20 #include "hal/ticker.h"
21 
22 #include "util/util.h"
23 #include "util/mem.h"
24 #include "util/mfifo.h"
25 #include "util/memq.h"
26 #include "util/mayfly.h"
27 #include "util/dbuf.h"
28 
29 #include "ticker/ticker.h"
30 
31 #include "pdu_df.h"
32 #include "lll/pdu_vendor.h"
33 #include "pdu.h"
34 
35 #include "lll.h"
36 #include "lll/lll_vendor.h"
37 #include "lll/lll_adv_types.h"
38 #include "lll_adv.h"
39 #include "lll/lll_adv_pdu.h"
40 #include "lll_chan.h"
41 #include "lll_scan.h"
42 #include "lll/lll_df_types.h"
43 #include "lll_sync.h"
44 #include "lll_sync_iso.h"
45 #include "lll_iso_tx.h"
46 #include "lll_conn.h"
47 #include "lll_conn_iso.h"
48 #include "lll_df.h"
49 
50 #include "ull_adv_types.h"
51 #include "ull_scan_types.h"
52 #include "ull_sync_types.h"
53 #include "ll_sw/ull_tx_queue.h"
54 #include "ull_conn_types.h"
55 #include "ull_filter.h"
56 #include "ull_df_types.h"
57 #include "ull_df_internal.h"
58 
59 #if defined(CONFIG_BT_CTLR_USER_EXT)
60 #include "ull_vendor.h"
61 #endif /* CONFIG_BT_CTLR_USER_EXT */
62 
63 #include "isoal.h"
64 #include "ll_feat_internal.h"
65 #include "ull_internal.h"
66 #include "ull_chan_internal.h"
67 #include "ull_iso_internal.h"
68 #include "ull_adv_internal.h"
69 #include "ull_scan_internal.h"
70 #include "ull_sync_internal.h"
71 #include "ull_sync_iso_internal.h"
72 #include "ull_central_internal.h"
73 #include "ull_iso_types.h"
74 #include "ull_conn_internal.h"
75 #include "ull_conn_iso_types.h"
76 #include "ull_central_iso_internal.h"
77 #include "ull_llcp.h"
78 
79 #include "ull_conn_iso_internal.h"
80 #include "ull_peripheral_iso_internal.h"
81 
82 #include "ll.h"
83 #include "ll_feat.h"
84 #include "ll_test.h"
85 #include "ll_settings.h"
86 
87 #include "hal/debug.h"
88 
89 #if defined(CONFIG_BT_BROADCASTER)
90 #define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1)
91 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
92 #define BT_ADV_AUX_TICKER_NODES ((TICKER_ID_ADV_AUX_LAST) - \
93 				 (TICKER_ID_ADV_AUX_BASE) + 1)
94 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
95 #define BT_ADV_SYNC_TICKER_NODES ((TICKER_ID_ADV_SYNC_LAST) - \
96 				  (TICKER_ID_ADV_SYNC_BASE) + 1)
97 #if defined(CONFIG_BT_CTLR_ADV_ISO)
98 #define BT_ADV_ISO_TICKER_NODES ((TICKER_ID_ADV_ISO_LAST) - \
99 				  (TICKER_ID_ADV_ISO_BASE) + 1)
100 #else /* !CONFIG_BT_CTLR_ADV_ISO */
101 #define BT_ADV_ISO_TICKER_NODES 0
102 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
103 #else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
104 #define BT_ADV_SYNC_TICKER_NODES 0
105 #define BT_ADV_ISO_TICKER_NODES 0
106 #endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
107 #else /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
108 #define BT_ADV_AUX_TICKER_NODES 0
109 #define BT_ADV_SYNC_TICKER_NODES 0
110 #define BT_ADV_ISO_TICKER_NODES 0
111 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
112 #else /* !CONFIG_BT_BROADCASTER */
113 #define BT_ADV_TICKER_NODES 0
114 #define BT_ADV_AUX_TICKER_NODES 0
115 #define BT_ADV_SYNC_TICKER_NODES 0
116 #define BT_ADV_ISO_TICKER_NODES 0
117 #endif /* !CONFIG_BT_BROADCASTER */
118 
119 #if defined(CONFIG_BT_OBSERVER)
120 #define BT_SCAN_TICKER_NODES ((TICKER_ID_SCAN_LAST) - (TICKER_ID_SCAN_STOP) + 1)
121 #if defined(CONFIG_BT_CTLR_ADV_EXT)
122 #define BT_SCAN_AUX_TICKER_NODES ((TICKER_ID_SCAN_AUX_LAST) - \
123 				  (TICKER_ID_SCAN_AUX_BASE) + 1)
124 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
125 #define BT_SCAN_SYNC_TICKER_NODES ((TICKER_ID_SCAN_SYNC_LAST) - \
126 				   (TICKER_ID_SCAN_SYNC_BASE) + 1)
127 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
128 #define BT_SCAN_SYNC_ISO_TICKER_NODES ((TICKER_ID_SCAN_SYNC_ISO_LAST) - \
129 				       (TICKER_ID_SCAN_SYNC_ISO_BASE) + 1)
130 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
131 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
132 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
133 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
134 #define BT_SCAN_SYNC_TICKER_NODES 0
135 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
136 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
137 #else /* !CONFIG_BT_CTLR_ADV_EXT */
138 #define BT_SCAN_AUX_TICKER_NODES 0
139 #define BT_SCAN_SYNC_TICKER_NODES 0
140 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
141 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
142 #else
143 #define BT_SCAN_TICKER_NODES 0
144 #define BT_SCAN_AUX_TICKER_NODES 0
145 #define BT_SCAN_SYNC_TICKER_NODES 0
146 #define BT_SCAN_SYNC_ISO_TICKER_NODES 0
147 #endif
148 
149 #if defined(CONFIG_BT_CONN)
150 #define BT_CONN_TICKER_NODES ((TICKER_ID_CONN_LAST) - (TICKER_ID_CONN_BASE) + 1)
151 #else
152 #define BT_CONN_TICKER_NODES 0
153 #endif
154 
155 #if defined(CONFIG_BT_CTLR_CONN_ISO)
156 #define BT_CIG_TICKER_NODES ((TICKER_ID_CONN_ISO_LAST) - \
157 			     (TICKER_ID_CONN_ISO_BASE) + 1 + \
158 			     (TICKER_ID_CONN_ISO_RESUME_LAST) - \
159 			     (TICKER_ID_CONN_ISO_RESUME_BASE) + 1)
160 
161 #else
162 #define BT_CIG_TICKER_NODES 0
163 #endif
164 
165 #if defined(CONFIG_BT_CTLR_USER_EXT)
166 #define USER_TICKER_NODES         CONFIG_BT_CTLR_USER_TICKER_ID_RANGE
167 #else
168 #define USER_TICKER_NODES         0
169 #endif
170 
171 
172 #if defined(CONFIG_BT_CTLR_COEX_TICKER)
173 #define COEX_TICKER_NODES             1
174 					/* No. of tickers reserved for coex drivers */
175 #else
176 #define COEX_TICKER_NODES             0
177 #endif
178 
179 
180 #if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
181 #define FLASH_TICKER_NODES             2 /* No. of tickers reserved for flash
182 					  * driver
183 					  */
184 #define TICKER_USER_ULL_HIGH_FLASH_OPS 1 /* No. of additional ticker ULL_HIGH
185 					  * context operations
186 					  */
187 #define TICKER_USER_THREAD_FLASH_OPS   1 /* No. of additional ticker thread
188 					  * context operations
189 					  */
190 #else
191 #define FLASH_TICKER_NODES             0
192 #define TICKER_USER_ULL_HIGH_FLASH_OPS 0
193 #define TICKER_USER_THREAD_FLASH_OPS   0
194 #endif
195 
196 /* Define ticker nodes */
197 /* NOTE: FLASH_TICKER_NODES shall be after Link Layer's list of ticker id
198  *       allocations, refer to ll_timeslice_ticker_id_get on how ticker id
199  *       used by flash driver is returned.
200  */
201 #define TICKER_NODES              (TICKER_ID_ULL_BASE + \
202 				   BT_ADV_TICKER_NODES + \
203 				   BT_ADV_AUX_TICKER_NODES + \
204 				   BT_ADV_SYNC_TICKER_NODES + \
205 				   BT_ADV_ISO_TICKER_NODES + \
206 				   BT_SCAN_TICKER_NODES + \
207 				   BT_SCAN_AUX_TICKER_NODES + \
208 				   BT_SCAN_SYNC_TICKER_NODES + \
209 				   BT_SCAN_SYNC_ISO_TICKER_NODES + \
210 				   BT_CONN_TICKER_NODES + \
211 				   BT_CIG_TICKER_NODES + \
212 				   USER_TICKER_NODES + \
213 				   FLASH_TICKER_NODES + \
214 				   COEX_TICKER_NODES)
215 
216 /* When both central and peripheral are supported, one each Rx node will be
217  * needed by connectable advertising and the initiator to generate connection
218  * complete event, hence conditionally set the count.
219  */
220 #if defined(CONFIG_BT_MAX_CONN)
221 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
222 #define BT_CTLR_MAX_CONNECTABLE (1U + MIN(((CONFIG_BT_MAX_CONN) - 1U), \
223 					  (BT_CTLR_ADV_SET)))
224 #else
225 #define BT_CTLR_MAX_CONNECTABLE MAX(1U, (BT_CTLR_ADV_SET))
226 #endif
227 #define BT_CTLR_MAX_CONN        CONFIG_BT_MAX_CONN
228 #else
229 #define BT_CTLR_MAX_CONNECTABLE 0
230 #define BT_CTLR_MAX_CONN        0
231 #endif
232 
233 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
234 #if defined(CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX)
235 /* Note: Need node for PDU and CTE sample */
236 #define BT_CTLR_ADV_EXT_RX_CNT  (CONFIG_BT_CTLR_SCAN_AUX_SET * \
237 				 CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
238 #else /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
239 #define BT_CTLR_ADV_EXT_RX_CNT  1
240 #endif /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
241 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
242 #define BT_CTLR_ADV_EXT_RX_CNT  0
243 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
244 
245 #if !defined(TICKER_USER_LLL_VENDOR_OPS)
246 #define TICKER_USER_LLL_VENDOR_OPS 0
247 #endif /* TICKER_USER_LLL_VENDOR_OPS */
248 
249 #if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
250 #define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
251 #endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
252 
253 #if !defined(TICKER_USER_ULL_LOW_VENDOR_OPS)
254 #define TICKER_USER_ULL_LOW_VENDOR_OPS 0
255 #endif /* TICKER_USER_ULL_LOW_VENDOR_OPS */
256 
257 #if !defined(TICKER_USER_THREAD_VENDOR_OPS)
258 #define TICKER_USER_THREAD_VENDOR_OPS 0
259 #endif /* TICKER_USER_THREAD_VENDOR_OPS */
260 
261 /* Define ticker user operations */
262 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
263 	(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
264 /* NOTE: When ticker job is disabled inside radio events then all advertising,
265  *       scanning, and peripheral latency cancel ticker operations will be deferred,
266  *       requiring increased ticker thread context operation queue count.
267  */
268 #define TICKER_USER_THREAD_OPS   (BT_CTLR_ADV_SET + BT_CTLR_SCAN_SET + \
269 				  BT_CTLR_MAX_CONN + \
270 				  TICKER_USER_THREAD_VENDOR_OPS + \
271 				  TICKER_USER_THREAD_FLASH_OPS + \
272 				  1)
273 #else /* !CONFIG_BT_CTLR_LOW_LAT */
274 /* NOTE: As ticker job is not disabled inside radio events, no need for extra
275  *       thread operations queue element for flash driver.
276  */
277 #define TICKER_USER_THREAD_OPS   (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
278 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
279 
280 #define TICKER_USER_ULL_LOW_OPS  (1 + TICKER_USER_ULL_LOW_VENDOR_OPS + 1)
281 
282 /* NOTE: Extended Advertising needs one extra ticker operation being enqueued
283  *       for scheduling the auxiliary PDU reception while there can already
284  *       be three other operations being enqueued.
285  *
286  *       This value also covers the case were initiator with 1M and Coded PHY
287  *       scan window is stopping the two scan tickers, stopping one scan stop
288  *       ticker and starting one new ticker for establishing an ACL connection.
289  */
290 #if defined(CONFIG_BT_CTLR_ADV_EXT)
291 #define TICKER_USER_ULL_HIGH_OPS (4 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
292 				  TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
293 #else /* !CONFIG_BT_CTLR_ADV_EXT */
294 #define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
295 				  TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
296 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
297 
298 #define TICKER_USER_LLL_OPS      (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
299 
300 #define TICKER_USER_OPS           (TICKER_USER_LLL_OPS + \
301 				   TICKER_USER_ULL_HIGH_OPS + \
302 				   TICKER_USER_ULL_LOW_OPS + \
303 				   TICKER_USER_THREAD_OPS)
304 
305 /* Memory for ticker nodes/instances */
306 static uint8_t MALIGN(4) ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
307 
308 /* Memory for users/contexts operating on ticker module */
309 static uint8_t MALIGN(4) ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE];
310 
311 /* Memory for user/context simultaneous API operations */
312 static uint8_t MALIGN(4) ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE];
313 
314 /* Semaphore to wakeup thread on ticker API callback */
315 static struct k_sem sem_ticker_api_cb;
316 
317 /* Semaphore to wakeup thread on Rx-ed objects */
318 static struct k_sem *sem_recv;
319 
320 /* Declare prepare-event FIFO: mfifo_prep.
321  * Queue of struct node_rx_event_done
322  */
323 static MFIFO_DEFINE(prep, sizeof(struct lll_event), EVENT_PIPELINE_MAX);
324 
325 /* Declare done-event RXFIFO. This is a composite pool-backed MFIFO for rx_nodes.
326  * The declaration constructs the following data structures:
327  * - mfifo_done:    FIFO with pointers to struct node_rx_event_done
328  * - mem_done:      Backing data pool for struct node_rx_event_done elements
329  * - mem_link_done: Pool of memq_link_t elements
330  *
331  * Queue of pointers to struct node_rx_event_done.
332  * The actual backing behind these pointers is mem_done.
333  *
334  * When there are radio events with time reservations lower than the preemption
335  * timeout of 1.5 ms, the pipeline has to account for the maximum radio events
336  * that can be enqueued during the preempt timeout duration. All these enqueued
337  * events could be aborted in case of late scheduling, needing as many done
338  * event buffers.
339  *
340  * During continuous scanning, there can be 1 active radio event, 1 scan resume
341  * and 1 new scan prepare. If there are peripheral prepares in addition, and due
342  * to late scheduling all these will abort needing 4 done buffers.
343  *
344  * If there are additional peripheral prepares enqueued, which are apart by
345  * their time reservations, these are not yet late and hence no more additional
346  * done buffers are needed.
347  *
348  * If Extended Scanning is supported, then an additional auxiliary scan event's
349  * prepare could be enqueued in the pipeline during the preemption duration.
350  *
351  * If Extended Scanning with Coded PHY is supported, then an additional 1 resume
352  * prepare could be enqueued in the pipeline during the preemption duration.
353  */
354 #if !defined(VENDOR_EVENT_DONE_MAX)
355 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
356 #if defined(CONFIG_BT_CTLR_PHY_CODED)
357 #define EVENT_DONE_MAX 6
358 #else /* !CONFIG_BT_CTLR_PHY_CODED */
359 #define EVENT_DONE_MAX 5
360 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
361 #else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
362 #define EVENT_DONE_MAX 4
363 #endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
364 #else
365 #define EVENT_DONE_MAX VENDOR_EVENT_DONE_MAX
366 #endif
367 
368 /* Maximum time allowed for comleting synchronous LLL disabling via
369  * ull_disable.
370  */
371 #define ULL_DISABLE_TIMEOUT K_MSEC(1000)
372 
373 static RXFIFO_DEFINE(done, sizeof(struct node_rx_event_done),
374 		     EVENT_DONE_MAX, 0U);
375 
376 /* Minimum number of node rx for ULL to LL/HCI thread per connection.
377  * Increasing this by times the max. simultaneous connection count will permit
378  * simultaneous parallel PHY update or Connection Update procedures amongst
379  * active connections.
380  * Minimum node rx of 2 that can be reserved happens when:
381  *   Central and peripheral always use two new nodes for handling completion
382  *   notification one for PHY update complete and another for Data Length Update
383  *   complete.
384  */
385 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY)
386 #define LL_PDU_RX_CNT (2 * (CONFIG_BT_CTLR_LLCP_CONN))
387 #elif defined(CONFIG_BT_CONN)
388 #define LL_PDU_RX_CNT (CONFIG_BT_CTLR_LLCP_CONN)
389 #else
390 #define LL_PDU_RX_CNT 0
391 #endif
392 
393 /* No. of node rx for LLL to ULL.
394  * Reserve 3, 1 for adv data, 1 for scan response and 1 for empty PDU reception.
395  */
396 #define PDU_RX_CNT    (3 + BT_CTLR_ADV_EXT_RX_CNT + CONFIG_BT_CTLR_RX_BUFFERS)
397 
398 /* Part sum of LLL to ULL and ULL to LL/HCI thread node rx count.
399  * Will be used below in allocating node rx pool.
400  */
401 #define RX_CNT        (PDU_RX_CNT + LL_PDU_RX_CNT)
402 
403 static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
404 
405 #if defined(CONFIG_BT_RX_USER_PDU_LEN)
406 #define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
407 #else
408 #define PDU_RX_USER_PDU_OCTETS_MAX 0
409 #endif
410 
411 #define PDU_ADV_SIZE  MAX(PDU_AC_LL_SIZE_MAX, \
412 			  (PDU_AC_LL_HEADER_SIZE + LL_EXT_OCTETS_RX_MAX))
413 
414 #define PDU_DATA_SIZE MAX((PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX), \
415 			  (PDU_BIS_LL_HEADER_SIZE + LL_BIS_OCTETS_RX_MAX))
416 
417 #define PDU_CTRL_SIZE (PDU_DC_LL_HEADER_SIZE + PDU_DC_CTRL_RX_SIZE_MAX)
418 
419 #define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
420 
421 #define PDU_RX_NODE_POOL_ELEMENT_SIZE MROUND(NODE_RX_HEADER_SIZE + \
422 					     MAX(MAX(PDU_ADV_SIZE, \
423 						     MAX(PDU_DATA_SIZE, \
424 							 PDU_CTRL_SIZE)), \
425 						 PDU_RX_USER_PDU_OCTETS_MAX))
426 
427 #if defined(CONFIG_BT_CTLR_ADV_ISO_SET)
428 #define BT_CTLR_ADV_ISO_SET CONFIG_BT_CTLR_ADV_ISO_SET
429 #else
430 #define BT_CTLR_ADV_ISO_SET 0
431 #endif
432 
433 #if defined(CONFIG_BT_PER_ADV_SYNC_MAX)
434 #define BT_CTLR_SCAN_SYNC_SET CONFIG_BT_PER_ADV_SYNC_MAX
435 #else
436 #define BT_CTLR_SCAN_SYNC_SET 0
437 #endif
438 
439 #if defined(CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET)
440 #define BT_CTLR_SCAN_SYNC_ISO_SET CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET
441 #else
442 #define BT_CTLR_SCAN_SYNC_ISO_SET 0
443 #endif
444 
445 #define PDU_RX_POOL_SIZE (PDU_RX_NODE_POOL_ELEMENT_SIZE * \
446 			  (RX_CNT + BT_CTLR_MAX_CONNECTABLE + \
447 			   BT_CTLR_ADV_SET + BT_CTLR_SCAN_SYNC_SET))
448 
449 /* Macros for encoding number of completed packets.
450  *
451  * If the pointer is numerically below 0x100, the pointer is treated as either
452  * data or control PDU.
453  *
454  * NOTE: For any architecture which would map RAM below address 0x100, this will
455  * not work.
456  */
457 #define IS_NODE_TX_PTR(_p) ((uint32_t)(_p) & ~0xFFUL)
458 #define IS_NODE_TX_DATA(_p) ((uint32_t)(_p) == 0x01UL)
459 #define IS_NODE_TX_CTRL(_p) ((uint32_t)(_p) == 0x02UL)
460 #define NODE_TX_DATA_SET(_p) ((_p) = (void *)0x01UL)
461 #define NODE_TX_CTRL_SET(_p) ((_p) = (void *)0x012UL)
462 
463 /* Macros for encoding number of ISO SDU fragments in the enqueued TX node
464  * pointer. This is needed to ensure only a single release of the node and link
465  * in tx_cmplt_get, even when called several times. At all times, the number of
466  * fragments must be available for HCI complete-counting.
467  *
468  * If the pointer is numerically below 0x100, the pointer is treated as a one
469  * byte fragments count.
470  *
471  * NOTE: For any architecture which would map RAM below address 0x100, this will
472  * not work.
473  */
474 #define NODE_TX_FRAGMENTS_GET(_p) ((uint32_t)(_p) & 0xFFUL)
475 #define NODE_TX_FRAGMENTS_SET(_p, _cmplt) ((_p) = (void *)(uint32_t)(_cmplt))
476 
477 static struct {
478 	void *free;
479 	uint8_t pool[PDU_RX_POOL_SIZE];
480 } mem_pdu_rx;
481 
482 /* NOTE: Two memq_link structures are reserved in the case of periodic sync,
483  * one each for sync established and sync lost respectively. Where as in
484  * comparison to a connection, the connection established uses incoming Rx-ed
485  * CONNECT_IND PDU to piggy back generation of connection complete, and hence
486  * only one is reserved for the generation of disconnection event (which can
487  * happen due to supervision timeout and other reasons that dont have an
488  * incoming Rx-ed PDU).
489  */
490 #define LINK_RX_POOL_SIZE                                                      \
491 	(sizeof(memq_link_t) *                                                 \
492 	 (RX_CNT + 2 + BT_CTLR_MAX_CONN + BT_CTLR_ADV_SET +                    \
493 	  (BT_CTLR_ADV_ISO_SET * 2) + (BT_CTLR_SCAN_SYNC_SET * 2) +            \
494 	  (BT_CTLR_SCAN_SYNC_ISO_SET * 2) +                                    \
495 	  (IQ_REPORT_CNT)))
496 static struct {
497 	uint16_t quota_pdu; /* Number of un-utilized buffers */
498 
499 	void *free;
500 	uint8_t pool[LINK_RX_POOL_SIZE];
501 } mem_link_rx;
502 
503 static MEMQ_DECLARE(ull_rx);
504 static MEMQ_DECLARE(ll_rx);
505 
506 #if defined(CONFIG_BT_CONN)
507 static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
508 
509 static void *mark_update;
510 #endif /* CONFIG_BT_CONN */
511 
512 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
513 #if defined(CONFIG_BT_CONN)
514 #define BT_BUF_ACL_TX_COUNT CONFIG_BT_BUF_ACL_TX_COUNT
515 #else
516 #define BT_BUF_ACL_TX_COUNT 0
517 #endif /* CONFIG_BT_CONN */
518 
519 static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
520 		    BT_BUF_ACL_TX_COUNT + BT_CTLR_ISO_TX_BUFFERS);
521 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
522 
523 static void *mark_disable;
524 
525 static inline int init_reset(void);
526 static void perform_lll_reset(void *param);
527 static inline void *mark_set(void **m, void *param);
528 static inline void *mark_unset(void **m, void *param);
529 static inline void *mark_get(void *m);
530 static void rx_replenish_all(void);
531 #if defined(CONFIG_BT_CONN) || \
532 	(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
533 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
534 	defined(CONFIG_BT_CTLR_ADV_ISO)
535 static void rx_release_replenish(struct node_rx_hdr *rx);
536 static void rx_link_dequeue_release_quota_inc(memq_link_t *link);
537 #endif /* CONFIG_BT_CONN ||
538 	* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
539 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
540 	* CONFIG_BT_CTLR_ADV_ISO
541 	*/
542 static void rx_demux(void *param);
543 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
544 static void rx_demux_yield(void);
545 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
546 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
547 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last);
548 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
549 					memq_link_t *link,
550 					struct node_tx *node_tx);
551 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
552 static inline void rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx_hdr);
553 static inline void rx_demux_event_done(memq_link_t *link,
554 				       struct node_rx_event_done *done);
555 static void ll_rx_link_quota_inc(void);
556 static void ll_rx_link_quota_dec(void);
557 static void disabled_cb(void *param);
558 
ll_init(struct k_sem * sem_rx)559 int ll_init(struct k_sem *sem_rx)
560 {
561 	static bool mayfly_initialized;
562 	int err;
563 
564 	/* Store the semaphore to be used to wakeup Thread context */
565 	sem_recv = sem_rx;
566 
567 	/* Initialize counter */
568 	/* TODO: Bind and use counter driver? */
569 	cntr_init();
570 
571 	/* Initialize mayfly. It may be done only once due to mayfly design.
572 	 *
573 	 * On init mayfly memq head and tail is assigned with a link instance
574 	 * that is used during enqueue operation. New link provided by enqueue
575 	 * is added as a tail and will be used in future enqueue. While dequeue,
576 	 * the link that was used for storage of the job is released and stored
577 	 * in a job it was related to. The job may store initial link. If mayfly
578 	 * is re-initialized but job objects were not re-initialized there is a
579 	 * risk that enqueued job will point to the same link as it is in a memq
580 	 * just after re-initialization. After enqueue operation with that link,
581 	 * head and tail still points to the same link object, so memq is
582 	 * considered as empty.
583 	 */
584 	if (!mayfly_initialized) {
585 		mayfly_init();
586 		mayfly_initialized = true;
587 	}
588 
589 
590 	/* Initialize Ticker */
591 	ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS;
592 	ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS;
593 	ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS;
594 	ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_THREAD_OPS;
595 
596 	err = ticker_init(TICKER_INSTANCE_ID_CTLR,
597 			  TICKER_NODES, &ticker_nodes[0],
598 			  MAYFLY_CALLER_COUNT, &ticker_users[0],
599 			  TICKER_USER_OPS, &ticker_user_ops[0],
600 			  hal_ticker_instance0_caller_id_get,
601 			  hal_ticker_instance0_sched,
602 			  hal_ticker_instance0_trigger_set);
603 	LL_ASSERT(!err);
604 
605 	/* Initialize semaphore for ticker API blocking wait */
606 	k_sem_init(&sem_ticker_api_cb, 0, 1);
607 
608 	/* Initialize LLL */
609 	err = lll_init();
610 	if (err) {
611 		return err;
612 	}
613 
614 	/* Initialize ULL internals */
615 	/* TODO: globals? */
616 
617 	/* Common to init and reset */
618 	err = init_reset();
619 	if (err) {
620 		return err;
621 	}
622 
623 #if defined(CONFIG_BT_BROADCASTER)
624 	err = lll_adv_init();
625 	if (err) {
626 		return err;
627 	}
628 
629 	err = ull_adv_init();
630 	if (err) {
631 		return err;
632 	}
633 #endif /* CONFIG_BT_BROADCASTER */
634 
635 #if defined(CONFIG_BT_OBSERVER)
636 	err = lll_scan_init();
637 	if (err) {
638 		return err;
639 	}
640 
641 	err = ull_scan_init();
642 	if (err) {
643 		return err;
644 	}
645 #endif /* CONFIG_BT_OBSERVER */
646 
647 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
648 	err = lll_sync_init();
649 	if (err) {
650 		return err;
651 	}
652 
653 	err = ull_sync_init();
654 	if (err) {
655 		return err;
656 	}
657 
658 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
659 	err = ull_sync_iso_init();
660 	if (err) {
661 		return err;
662 	}
663 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
664 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
665 
666 #if defined(CONFIG_BT_CONN)
667 	err = lll_conn_init();
668 	if (err) {
669 		return err;
670 	}
671 
672 	err = ull_conn_init();
673 	if (err) {
674 		return err;
675 	}
676 #endif /* CONFIG_BT_CONN */
677 
678 #if defined(CONFIG_BT_CTLR_DF)
679 	err = ull_df_init();
680 	if (err) {
681 		return err;
682 	}
683 #endif
684 
685 #if defined(CONFIG_BT_CTLR_ISO)
686 	err = ull_iso_init();
687 	if (err) {
688 		return err;
689 	}
690 #endif /* CONFIG_BT_CTLR_ISO */
691 
692 #if defined(CONFIG_BT_CTLR_CONN_ISO)
693 	err = ull_conn_iso_init();
694 	if (err) {
695 		return err;
696 	}
697 #endif /* CONFIG_BT_CTLR_CONN_ISO */
698 
699 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
700 	err = ull_peripheral_iso_init();
701 	if (err) {
702 		return err;
703 	}
704 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
705 
706 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
707 	err = ull_central_iso_init();
708 	if (err) {
709 		return err;
710 	}
711 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
712 
713 #if defined(CONFIG_BT_CTLR_ADV_ISO)
714 	err = ull_adv_iso_init();
715 	if (err) {
716 		return err;
717 	}
718 #endif /* CONFIG_BT_CTLR_ADV_ISO */
719 
720 #if defined(CONFIG_BT_CTLR_DF)
721 	err = lll_df_init();
722 	if (err) {
723 		return err;
724 	}
725 #endif
726 
727 #if defined(CONFIG_BT_CTLR_USER_EXT)
728 	err = ull_user_init();
729 	if (err) {
730 		return err;
731 	}
732 #endif /* CONFIG_BT_CTLR_USER_EXT */
733 
734 	/* reset filter accept list, resolving list and initialise RPA timeout*/
735 	if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
736 		ull_filter_reset(true);
737 	}
738 
739 #if defined(CONFIG_BT_CTLR_TEST)
740 	lll_chan_sel_2_ut();
741 #endif /* CONFIG_BT_CTLR_TEST */
742 
743 	return  0;
744 }
745 
ll_deinit(void)746 int ll_deinit(void)
747 {
748 	ll_reset();
749 	return lll_deinit();
750 }
751 
ll_reset(void)752 void ll_reset(void)
753 {
754 	int err;
755 
756 	/* Note: The sequence of reset control flow is as follows:
757 	 * - Reset ULL context, i.e. stop ULL scheduling, abort LLL events etc.
758 	 * - Reset LLL context, i.e. post LLL event abort, let LLL cleanup its
759 	 *   variables, if any.
760 	 * - Reset ULL static variables (which otherwise was mem-zeroed in cases
761 	 *   if power-on reset wherein architecture startup mem-zeroes .bss
762 	 *   sections.
763 	 * - Initialize ULL context variable, similar to on-power-up.
764 	 */
765 
766 #if defined(CONFIG_BT_BROADCASTER)
767 #if defined(CONFIG_BT_CTLR_ADV_ISO)
768 	/* Reset adv iso sets */
769 	err = ull_adv_iso_reset();
770 	LL_ASSERT(!err);
771 #endif /* CONFIG_BT_CTLR_ADV_ISO */
772 
773 	/* Reset adv state */
774 	err = ull_adv_reset();
775 	LL_ASSERT(!err);
776 #endif /* CONFIG_BT_BROADCASTER */
777 
778 #if defined(CONFIG_BT_OBSERVER)
779 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
780 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
781 	/* Reset sync iso sets */
782 	err = ull_sync_iso_reset();
783 	LL_ASSERT(!err);
784 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
785 
786 	/* Reset periodic sync sets */
787 	err = ull_sync_reset();
788 	LL_ASSERT(!err);
789 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
790 
791 	/* Reset scan state */
792 	err = ull_scan_reset();
793 	LL_ASSERT(!err);
794 #endif /* CONFIG_BT_OBSERVER */
795 
796 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
797 	err = ull_peripheral_iso_reset();
798 	LL_ASSERT(!err);
799 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
800 
801 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
802 	err = ull_central_iso_reset();
803 	LL_ASSERT(!err);
804 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
805 
806 #if defined(CONFIG_BT_CTLR_CONN_ISO)
807 	err = ull_conn_iso_reset();
808 	LL_ASSERT(!err);
809 #endif /* CONFIG_BT_CTLR_CONN_ISO */
810 
811 #if defined(CONFIG_BT_CTLR_ISO)
812 	err = ull_iso_reset();
813 	LL_ASSERT(!err);
814 #endif /* CONFIG_BT_CTLR_ISO */
815 
816 #if defined(CONFIG_BT_CONN)
817 	/* Reset conn role */
818 	err = ull_conn_reset();
819 	LL_ASSERT(!err);
820 
821 	MFIFO_INIT(tx_ack);
822 #endif /* CONFIG_BT_CONN */
823 
824 	/* reset filter accept list and resolving list */
825 	if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
826 		ull_filter_reset(false);
827 	}
828 
829 	/* Re-initialize ULL internals */
830 
831 	/* Re-initialize the prep mfifo */
832 	MFIFO_INIT(prep);
833 
834 	/* Re-initialize the free rx mfifo */
835 	MFIFO_INIT(pdu_rx_free);
836 
837 #if defined(CONFIG_BT_CONN)
838 	/* Re-initialize the free ll rx mfifo */
839 	MFIFO_INIT(ll_pdu_rx_free);
840 #endif /* CONFIG_BT_CONN */
841 
842 	/* Reset LLL via mayfly */
843 	{
844 		static memq_link_t link;
845 		static struct mayfly mfy = {0, 0, &link, NULL,
846 					    perform_lll_reset};
847 		uint32_t retval;
848 
849 		/* NOTE: If Zero Latency Interrupt is used, then LLL context
850 		 *       will be the highest priority IRQ in the system, hence
851 		 *       mayfly_enqueue will be done running the callee inline
852 		 *       (vector to the callee function) in this function. Else
853 		 *       we use semaphore to wait for perform_lll_reset to
854 		 *       complete.
855 		 */
856 
857 #if !defined(CONFIG_BT_CTLR_ZLI)
858 		struct k_sem sem;
859 
860 		k_sem_init(&sem, 0, 1);
861 		mfy.param = &sem;
862 #endif /* !CONFIG_BT_CTLR_ZLI */
863 
864 		retval = mayfly_enqueue(TICKER_USER_ID_THREAD,
865 					TICKER_USER_ID_LLL, 0, &mfy);
866 		LL_ASSERT(!retval);
867 
868 #if !defined(CONFIG_BT_CTLR_ZLI)
869 		/* LLL reset must complete before returning - wait for
870 		 * reset completion in LLL mayfly thread
871 		 */
872 		k_sem_take(&sem, K_FOREVER);
873 #endif /* !CONFIG_BT_CTLR_ZLI */
874 	}
875 
876 #if defined(CONFIG_BT_BROADCASTER)
877 	/* Finalize after adv state LLL context reset */
878 	err = ull_adv_reset_finalize();
879 	LL_ASSERT(!err);
880 #endif /* CONFIG_BT_BROADCASTER */
881 
882 	/* Reset/End DTM Tx or Rx commands */
883 	if (IS_ENABLED(CONFIG_BT_CTLR_DTM)) {
884 		uint16_t num_rx;
885 
886 		(void)ll_test_end(&num_rx);
887 		ARG_UNUSED(num_rx);
888 	}
889 
890 	/* Common to init and reset */
891 	err = init_reset();
892 	LL_ASSERT(!err);
893 
894 #if defined(CONFIG_BT_CTLR_DF)
895 	/* Direction Finding has to be reset after ull init_reset call because
896 	 *  it uses mem_link_rx for node_rx_iq_report. The mem_linx_rx is reset
897 	 *  in common ull init_reset.
898 	 */
899 	err = ull_df_reset();
900 	LL_ASSERT(!err);
901 #endif
902 
903 #if defined(CONFIG_BT_CTLR_SET_HOST_FEATURE)
904 	ll_feat_reset();
905 #endif /* CONFIG_BT_CTLR_SET_HOST_FEATURE */
906 
907 	/* clear static random address */
908 	(void)ll_addr_set(1U, NULL);
909 }
910 
911 /**
912  * @brief Peek the next node_rx to send up to Host
913  * @details Tightly coupled with prio_recv_thread()
914  *   Execution context: Controller thread
915  *
916  * @param node_rx[out]   Pointer to rx node at head of queue
917  * @param handle[out]    Connection handle
918  * @return TX completed
919  */
ll_rx_get(void ** node_rx,uint16_t * handle)920 uint8_t ll_rx_get(void **node_rx, uint16_t *handle)
921 {
922 	struct node_rx_pdu *rx;
923 	memq_link_t *link;
924 	uint8_t cmplt = 0U;
925 
926 #if defined(CONFIG_BT_CONN) || \
927 	(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
928 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
929 	defined(CONFIG_BT_CTLR_ADV_ISO)
930 ll_rx_get_again:
931 #endif /* CONFIG_BT_CONN ||
932 	* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
933 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
934 	* CONFIG_BT_CTLR_ADV_ISO
935 	*/
936 
937 	*node_rx = NULL;
938 
939 	link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx);
940 	if (link) {
941 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
942 		cmplt = tx_cmplt_get(handle, &mfifo_fifo_tx_ack.f, rx->hdr.ack_last);
943 		if (!cmplt) {
944 			uint8_t f, cmplt_prev, cmplt_curr;
945 			uint16_t h;
946 
947 			cmplt_curr = 0U;
948 			f = mfifo_fifo_tx_ack.f;
949 			do {
950 				cmplt_prev = cmplt_curr;
951 				cmplt_curr = tx_cmplt_get(&h, &f,
952 							  mfifo_fifo_tx_ack.l);
953 			} while ((cmplt_prev != 0U) ||
954 				 (cmplt_prev != cmplt_curr));
955 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
956 
957 			if (0) {
958 #if defined(CONFIG_BT_CONN) || \
959 	(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT))
960 			/* Do not send up buffers to Host thread that are
961 			 * marked for release
962 			 */
963 			} else if (rx->hdr.type == NODE_RX_TYPE_RELEASE) {
964 				rx_link_dequeue_release_quota_inc(link);
965 				rx_release_replenish((struct node_rx_hdr *)rx);
966 
967 				goto ll_rx_get_again;
968 #endif /* CONFIG_BT_CONN ||
969 	* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT)
970 	*/
971 
972 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
973 			} else if (rx->hdr.type == NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE) {
974 				const uint8_t report_cnt = 1U;
975 
976 				(void)memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head, NULL);
977 				ll_rx_link_release(link);
978 				ull_iq_report_link_inc_quota(report_cnt);
979 				ull_df_iq_report_mem_release(rx);
980 				ull_df_rx_iq_report_alloc(report_cnt);
981 
982 				goto ll_rx_get_again;
983 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
984 
985 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
986 			} else if (rx->hdr.type == NODE_RX_TYPE_SYNC_CHM_COMPLETE) {
987 				rx_link_dequeue_release_quota_inc(link);
988 
989 				/* Remove Channel Map Update Indication from
990 				 * ACAD.
991 				 */
992 				ull_adv_sync_chm_complete(rx);
993 
994 				rx_release_replenish((struct node_rx_hdr *)rx);
995 
996 				goto ll_rx_get_again;
997 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
998 
999 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1000 			} else if (rx->hdr.type == NODE_RX_TYPE_BIG_CHM_COMPLETE) {
1001 				rx_link_dequeue_release_quota_inc(link);
1002 
1003 				/* Update Channel Map in BIGInfo present in
1004 				 * Periodic Advertising PDU.
1005 				 */
1006 				ull_adv_iso_chm_complete(rx);
1007 
1008 				rx_release_replenish((struct node_rx_hdr *)rx);
1009 
1010 				goto ll_rx_get_again;
1011 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1012 			}
1013 
1014 			*node_rx = rx;
1015 
1016 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1017 		}
1018 	} else {
1019 		cmplt = tx_cmplt_get(handle, &mfifo_fifo_tx_ack.f,
1020 				     mfifo_fifo_tx_ack.l);
1021 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1022 	}
1023 
1024 	return cmplt;
1025 }
1026 
1027 /**
1028  * @brief Commit the dequeue from memq_ll_rx, where ll_rx_get() did the peek
1029  * @details Execution context: Controller thread
1030  */
ll_rx_dequeue(void)1031 void ll_rx_dequeue(void)
1032 {
1033 	struct node_rx_pdu *rx = NULL;
1034 	memq_link_t *link;
1035 
1036 	link = memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head,
1037 			    (void **)&rx);
1038 	LL_ASSERT(link);
1039 
1040 	ll_rx_link_release(link);
1041 
1042 	/* handle object specific clean up */
1043 	switch (rx->hdr.type) {
1044 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1045 #if defined(CONFIG_BT_OBSERVER)
1046 	case NODE_RX_TYPE_EXT_1M_REPORT:
1047 	case NODE_RX_TYPE_EXT_2M_REPORT:
1048 	case NODE_RX_TYPE_EXT_CODED_REPORT:
1049 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1050 	case NODE_RX_TYPE_SYNC_REPORT:
1051 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1052 	{
1053 		struct node_rx_pdu *rx_curr;
1054 		struct pdu_adv *adv;
1055 
1056 		adv = (struct pdu_adv *)rx->pdu;
1057 		if (adv->type != PDU_ADV_TYPE_EXT_IND) {
1058 			break;
1059 		}
1060 
1061 		rx_curr = rx->rx_ftr.extra;
1062 		while (rx_curr) {
1063 			memq_link_t *link_free;
1064 
1065 			link_free = rx_curr->hdr.link;
1066 			rx_curr = rx_curr->rx_ftr.extra;
1067 
1068 			ll_rx_link_release(link_free);
1069 		}
1070 	}
1071 	break;
1072 
1073 	case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1074 	{
1075 		ull_scan_term_dequeue(rx->hdr.handle);
1076 	}
1077 	break;
1078 #endif /* CONFIG_BT_OBSERVER */
1079 
1080 #if defined(CONFIG_BT_BROADCASTER)
1081 	case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1082 	{
1083 		struct ll_adv_set *adv;
1084 		struct lll_adv_aux *lll_aux;
1085 
1086 		adv = ull_adv_set_get(rx->hdr.handle);
1087 		LL_ASSERT(adv);
1088 
1089 		lll_aux = adv->lll.aux;
1090 		if (lll_aux) {
1091 			struct ll_adv_aux_set *aux;
1092 
1093 			aux = HDR_LLL2ULL(lll_aux);
1094 
1095 			aux->is_started = 0U;
1096 		}
1097 
1098 #if defined(CONFIG_BT_PERIPHERAL)
1099 		struct lll_conn *lll_conn = adv->lll.conn;
1100 
1101 		if (!lll_conn) {
1102 			adv->is_enabled = 0U;
1103 
1104 			break;
1105 		}
1106 
1107 		LL_ASSERT(!lll_conn->link_tx_free);
1108 
1109 		memq_link_t *memq_link = memq_deinit(&lll_conn->memq_tx.head,
1110 						     &lll_conn->memq_tx.tail);
1111 		LL_ASSERT(memq_link);
1112 
1113 		lll_conn->link_tx_free = memq_link;
1114 
1115 		struct ll_conn *conn = HDR_LLL2ULL(lll_conn);
1116 
1117 		ll_conn_release(conn);
1118 		adv->lll.conn = NULL;
1119 
1120 		ll_rx_release(adv->node_rx_cc_free);
1121 		adv->node_rx_cc_free = NULL;
1122 
1123 		ll_rx_link_release(adv->link_cc_free);
1124 		adv->link_cc_free = NULL;
1125 #endif /* CONFIG_BT_PERIPHERAL */
1126 
1127 		adv->is_enabled = 0U;
1128 	}
1129 	break;
1130 #endif /* CONFIG_BT_BROADCASTER */
1131 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1132 
1133 #if defined(CONFIG_BT_CONN)
1134 	case NODE_RX_TYPE_CONNECTION:
1135 	{
1136 		struct node_rx_cc *cc = (void *)rx->pdu;
1137 		struct node_rx_ftr *ftr = &(rx->rx_ftr);
1138 
1139 		if (0) {
1140 
1141 #if defined(CONFIG_BT_PERIPHERAL)
1142 		} else if ((cc->status == BT_HCI_ERR_ADV_TIMEOUT) || cc->role) {
1143 			struct ll_adv_set *adv;
1144 			struct lll_adv *lll;
1145 
1146 			/* Get reference to ULL context */
1147 			lll = ftr->param;
1148 			adv = HDR_LLL2ULL(lll);
1149 
1150 			if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1151 				struct lll_conn *conn_lll;
1152 				struct ll_conn *conn;
1153 				memq_link_t *memq_link;
1154 
1155 				conn_lll = lll->conn;
1156 				LL_ASSERT(conn_lll);
1157 				lll->conn = NULL;
1158 
1159 				LL_ASSERT(!conn_lll->link_tx_free);
1160 				memq_link = memq_deinit(&conn_lll->memq_tx.head,
1161 							&conn_lll->memq_tx.tail);
1162 				LL_ASSERT(memq_link);
1163 				conn_lll->link_tx_free = memq_link;
1164 
1165 				conn = HDR_LLL2ULL(conn_lll);
1166 				ll_conn_release(conn);
1167 			} else {
1168 				/* Release un-utilized node rx */
1169 				if (adv->node_rx_cc_free) {
1170 					void *rx_free;
1171 
1172 					rx_free = adv->node_rx_cc_free;
1173 					adv->node_rx_cc_free = NULL;
1174 
1175 					ll_rx_release(rx_free);
1176 				}
1177 			}
1178 
1179 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1180 			if (lll->aux) {
1181 				struct ll_adv_aux_set *aux;
1182 
1183 				aux = HDR_LLL2ULL(lll->aux);
1184 				aux->is_started = 0U;
1185 			}
1186 
1187 			/* If Extended Advertising Commands used, reset
1188 			 * is_enabled when advertising set terminated event is
1189 			 * dequeued. Otherwise, legacy advertising commands used
1190 			 * then reset is_enabled here.
1191 			 */
1192 			if (!lll->node_rx_adv_term) {
1193 				adv->is_enabled = 0U;
1194 			}
1195 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1196 			adv->is_enabled = 0U;
1197 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1198 
1199 #else /* !CONFIG_BT_PERIPHERAL */
1200 			ARG_UNUSED(cc);
1201 #endif /* !CONFIG_BT_PERIPHERAL */
1202 
1203 #if defined(CONFIG_BT_CENTRAL)
1204 		} else {
1205 			struct ll_scan_set *scan = HDR_LLL2ULL(ftr->param);
1206 
1207 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
1208 			struct ll_scan_set *scan_other =
1209 				ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
1210 
1211 			if (scan_other) {
1212 				if (scan_other == scan) {
1213 					scan_other = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
1214 				}
1215 
1216 				if (scan_other) {
1217 					scan_other->lll.conn = NULL;
1218 					scan_other->is_enabled = 0U;
1219 				}
1220 			}
1221 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
1222 
1223 			scan->lll.conn = NULL;
1224 			scan->is_enabled = 0U;
1225 #else /* !CONFIG_BT_CENTRAL */
1226 		} else {
1227 			LL_ASSERT(0);
1228 #endif /* !CONFIG_BT_CENTRAL */
1229 		}
1230 
1231 		if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
1232 			uint8_t bm;
1233 
1234 			/* FIXME: use the correct adv and scan set to get
1235 			 * enabled status bitmask
1236 			 */
1237 			bm = (IS_ENABLED(CONFIG_BT_OBSERVER)?(ull_scan_is_enabled(0) << 1):0) |
1238 			     (IS_ENABLED(CONFIG_BT_BROADCASTER)?ull_adv_is_enabled(0):0);
1239 
1240 			if (!bm) {
1241 				ull_filter_adv_scan_state_cb(0);
1242 			}
1243 		}
1244 	}
1245 	break;
1246 
1247 	case NODE_RX_TYPE_TERMINATE:
1248 	case NODE_RX_TYPE_DC_PDU:
1249 #endif /* CONFIG_BT_CONN */
1250 
1251 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1252 	case NODE_RX_TYPE_BIG_COMPLETE:
1253 	case NODE_RX_TYPE_BIG_TERMINATE:
1254 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1255 
1256 #if defined(CONFIG_BT_OBSERVER)
1257 	case NODE_RX_TYPE_REPORT:
1258 
1259 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1260 		/* fall through */
1261 	case NODE_RX_TYPE_SYNC:
1262 	case NODE_RX_TYPE_SYNC_LOST:
1263 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1264 		/* fall through */
1265 	case NODE_RX_TYPE_SYNC_ISO:
1266 	case NODE_RX_TYPE_SYNC_ISO_LOST:
1267 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1268 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1269 #endif /* CONFIG_BT_OBSERVER */
1270 
1271 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1272 	case NODE_RX_TYPE_SCAN_REQ:
1273 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1274 
1275 #if defined(CONFIG_BT_CONN)
1276 	case NODE_RX_TYPE_CONN_UPDATE:
1277 	case NODE_RX_TYPE_ENC_REFRESH:
1278 
1279 #if defined(CONFIG_BT_CTLR_LE_PING)
1280 	case NODE_RX_TYPE_APTO:
1281 #endif /* CONFIG_BT_CTLR_LE_PING */
1282 
1283 	case NODE_RX_TYPE_CHAN_SEL_ALGO:
1284 
1285 #if defined(CONFIG_BT_CTLR_PHY)
1286 	case NODE_RX_TYPE_PHY_UPDATE:
1287 #endif /* CONFIG_BT_CTLR_PHY */
1288 
1289 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1290 	case NODE_RX_TYPE_RSSI:
1291 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1292 #endif /* CONFIG_BT_CONN */
1293 
1294 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1295 	case NODE_RX_TYPE_PROFILE:
1296 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1297 
1298 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1299 	case NODE_RX_TYPE_ADV_INDICATION:
1300 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1301 
1302 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1303 	case NODE_RX_TYPE_SCAN_INDICATION:
1304 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1305 
1306 #if defined(CONFIG_BT_HCI_MESH_EXT)
1307 	case NODE_RX_TYPE_MESH_ADV_CPLT:
1308 	case NODE_RX_TYPE_MESH_REPORT:
1309 #endif /* CONFIG_BT_HCI_MESH_EXT */
1310 
1311 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1312 	case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1313 		__fallthrough;
1314 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1315 
1316 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1317 	case NODE_RX_TYPE_CIS_REQUEST:
1318 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1319 
1320 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1321 	case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
1322 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1323 
1324 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1325 	case NODE_RX_TYPE_CIS_ESTABLISHED:
1326 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1327 
1328 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1329 	case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
1330 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1331 
1332 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
1333 	case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
1334 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1335 
1336 #if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
1337 	case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
1338 #endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
1339 
1340 	/* Ensure that at least one 'case' statement is present for this
1341 	 * code block.
1342 	 */
1343 	case NODE_RX_TYPE_NONE:
1344 		LL_ASSERT(rx->hdr.type != NODE_RX_TYPE_NONE);
1345 		break;
1346 
1347 	default:
1348 		LL_ASSERT(0);
1349 		break;
1350 	}
1351 
1352 	/* FIXME: clean up when porting Mesh Ext. */
1353 	if (0) {
1354 #if defined(CONFIG_BT_HCI_MESH_EXT)
1355 	} else if (rx->hdr.type == NODE_RX_TYPE_MESH_ADV_CPLT) {
1356 		struct ll_adv_set *adv;
1357 		struct ll_scan_set *scan;
1358 
1359 		adv = ull_adv_is_enabled_get(0);
1360 		LL_ASSERT(adv);
1361 		adv->is_enabled = 0U;
1362 
1363 		scan = ull_scan_is_enabled_get(0);
1364 		LL_ASSERT(scan);
1365 
1366 		scan->is_enabled = 0U;
1367 
1368 		ll_adv_scan_state_cb(0);
1369 #endif /* CONFIG_BT_HCI_MESH_EXT */
1370 	}
1371 }
1372 
ll_rx_mem_release(void ** node_rx)1373 void ll_rx_mem_release(void **node_rx)
1374 {
1375 	struct node_rx_pdu *rx;
1376 
1377 	rx = *node_rx;
1378 	while (rx) {
1379 		struct node_rx_pdu *rx_free;
1380 
1381 		rx_free = rx;
1382 		rx = rx->hdr.next;
1383 
1384 		switch (rx_free->hdr.type) {
1385 #if defined(CONFIG_BT_BROADCASTER)
1386 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1387 		case NODE_RX_TYPE_EXT_ADV_TERMINATE:
1388 			ll_rx_release(rx_free);
1389 			break;
1390 
1391 #if defined(CONFIG_BT_CTLR_ADV_ISO)
1392 		case NODE_RX_TYPE_BIG_COMPLETE:
1393 			/* Nothing to release */
1394 			break;
1395 
1396 		case NODE_RX_TYPE_BIG_TERMINATE:
1397 		{
1398 			struct ll_adv_iso_set *adv_iso = rx_free->rx_ftr.param;
1399 
1400 			ull_adv_iso_stream_release(adv_iso);
1401 		}
1402 		break;
1403 #endif /* CONFIG_BT_CTLR_ADV_ISO */
1404 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1405 #endif /* CONFIG_BT_BROADCASTER */
1406 
1407 #if defined(CONFIG_BT_OBSERVER)
1408 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1409 		case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
1410 		{
1411 			ll_rx_release(rx_free);
1412 		}
1413 		break;
1414 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1415 #endif /* CONFIG_BT_OBSERVER */
1416 
1417 #if defined(CONFIG_BT_CONN)
1418 		case NODE_RX_TYPE_CONNECTION:
1419 		{
1420 			struct node_rx_cc *cc =
1421 				(void *)rx_free->pdu;
1422 
1423 			if (0) {
1424 
1425 #if defined(CONFIG_BT_PERIPHERAL)
1426 			} else if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
1427 				ll_rx_release(rx_free);
1428 
1429 				break;
1430 #endif /* !CONFIG_BT_PERIPHERAL */
1431 
1432 #if defined(CONFIG_BT_CENTRAL)
1433 			} else if (cc->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1434 				ull_central_cleanup(rx_free);
1435 
1436 #if defined(CONFIG_BT_CTLR_PRIVACY)
1437 #if defined(CONFIG_BT_BROADCASTER)
1438 				if (!ull_adv_is_enabled_get(0))
1439 #endif /* CONFIG_BT_BROADCASTER */
1440 				{
1441 					ull_filter_adv_scan_state_cb(0);
1442 				}
1443 #endif /* CONFIG_BT_CTLR_PRIVACY */
1444 				break;
1445 #endif /* CONFIG_BT_CENTRAL */
1446 
1447 			} else {
1448 				LL_ASSERT(!cc->status);
1449 			}
1450 		}
1451 
1452 		__fallthrough;
1453 		case NODE_RX_TYPE_DC_PDU:
1454 #endif /* CONFIG_BT_CONN */
1455 
1456 #if defined(CONFIG_BT_OBSERVER)
1457 		case NODE_RX_TYPE_REPORT:
1458 
1459 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1460 			__fallthrough;
1461 		case NODE_RX_TYPE_EXT_1M_REPORT:
1462 		case NODE_RX_TYPE_EXT_2M_REPORT:
1463 		case NODE_RX_TYPE_EXT_CODED_REPORT:
1464 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1465 		case NODE_RX_TYPE_SYNC_REPORT:
1466 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1467 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1468 #endif /* CONFIG_BT_OBSERVER */
1469 
1470 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1471 		case NODE_RX_TYPE_SCAN_REQ:
1472 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1473 
1474 #if defined(CONFIG_BT_CONN)
1475 		case NODE_RX_TYPE_CONN_UPDATE:
1476 		case NODE_RX_TYPE_ENC_REFRESH:
1477 
1478 #if defined(CONFIG_BT_CTLR_LE_PING)
1479 		case NODE_RX_TYPE_APTO:
1480 #endif /* CONFIG_BT_CTLR_LE_PING */
1481 
1482 		case NODE_RX_TYPE_CHAN_SEL_ALGO:
1483 
1484 #if defined(CONFIG_BT_CTLR_PHY)
1485 		case NODE_RX_TYPE_PHY_UPDATE:
1486 #endif /* CONFIG_BT_CTLR_PHY */
1487 
1488 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1489 		case NODE_RX_TYPE_RSSI:
1490 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1491 #endif /* CONFIG_BT_CONN */
1492 
1493 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
1494 		case NODE_RX_TYPE_PROFILE:
1495 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
1496 
1497 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1498 		case NODE_RX_TYPE_ADV_INDICATION:
1499 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1500 
1501 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
1502 		case NODE_RX_TYPE_SCAN_INDICATION:
1503 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
1504 
1505 #if defined(CONFIG_BT_HCI_MESH_EXT)
1506 		case NODE_RX_TYPE_MESH_ADV_CPLT:
1507 		case NODE_RX_TYPE_MESH_REPORT:
1508 #endif /* CONFIG_BT_HCI_MESH_EXT */
1509 
1510 #if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
1511 		case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
1512 #endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
1513 
1514 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1515 		case NODE_RX_TYPE_CIS_REQUEST:
1516 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1517 
1518 #if defined(CONFIG_BT_CTLR_CONN_ISO)
1519 		case NODE_RX_TYPE_CIS_ESTABLISHED:
1520 #endif /* CONFIG_BT_CTLR_CONN_ISO */
1521 
1522 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1523 		case NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE:
1524 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1525 
1526 #if defined(CONFIG_BT_CTLR_ISO)
1527 		case NODE_RX_TYPE_ISO_PDU:
1528 #endif
1529 
1530 		/* Ensure that at least one 'case' statement is present for this
1531 		 * code block.
1532 		 */
1533 		case NODE_RX_TYPE_NONE:
1534 			LL_ASSERT(rx_free->hdr.type != NODE_RX_TYPE_NONE);
1535 			ll_rx_link_quota_inc();
1536 			ll_rx_release(rx_free);
1537 			break;
1538 
1539 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1540 		case NODE_RX_TYPE_SYNC:
1541 		{
1542 			struct node_rx_sync *se =
1543 				(void *)rx_free->pdu;
1544 			uint8_t status = se->status;
1545 
1546 			/* Below status codes use node_rx_sync_estab, hence
1547 			 * release the node_rx memory and release sync context
1548 			 * if sync establishment failed.
1549 			 */
1550 			if ((status == BT_HCI_ERR_SUCCESS) ||
1551 			    (status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE) ||
1552 			    (status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB)) {
1553 				struct ll_sync_set *sync;
1554 				struct ll_scan_set *scan;
1555 
1556 				/* pick the scan context before node_rx
1557 				 * release.
1558 				 */
1559 				scan = (void *)rx_free->rx_ftr.param;
1560 
1561 				ll_rx_release(rx_free);
1562 
1563 				/* pick the sync context before scan context
1564 				 * is cleanup of sync context association.
1565 				 */
1566 				sync = scan->periodic.sync;
1567 
1568 				ull_sync_setup_reset(scan);
1569 
1570 				if (status != BT_HCI_ERR_SUCCESS) {
1571 					memq_link_t *link_sync_lost;
1572 
1573 					link_sync_lost =
1574 						sync->node_rx_lost.rx.hdr.link;
1575 					ll_rx_link_release(link_sync_lost);
1576 
1577 					ull_sync_release(sync);
1578 				}
1579 
1580 				break;
1581 			} else {
1582 				LL_ASSERT(status == BT_HCI_ERR_OP_CANCELLED_BY_HOST);
1583 
1584 				/* Fall through and release sync context */
1585 			}
1586 		}
1587 		/* Pass through */
1588 
1589 		case NODE_RX_TYPE_SYNC_LOST:
1590 		{
1591 			struct ll_sync_set *sync =
1592 				(void *)rx_free->rx_ftr.param;
1593 
1594 			ull_sync_release(sync);
1595 		}
1596 		break;
1597 
1598 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1599 		case NODE_RX_TYPE_SYNC_ISO:
1600 		{
1601 			struct node_rx_sync_iso *se =
1602 				(void *)rx_free->pdu;
1603 
1604 			if (!se->status) {
1605 				ll_rx_release(rx_free);
1606 
1607 				break;
1608 			}
1609 		}
1610 		/* Pass through */
1611 
1612 		case NODE_RX_TYPE_SYNC_ISO_LOST:
1613 		{
1614 			struct ll_sync_iso_set *sync_iso =
1615 				(void *)rx_free->rx_ftr.param;
1616 
1617 			ull_sync_iso_stream_release(sync_iso);
1618 		}
1619 		break;
1620 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1621 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1622 
1623 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
1624 	defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
1625 		case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
1626 		case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
1627 		case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
1628 		{
1629 			const uint8_t report_cnt = 1U;
1630 
1631 			ull_iq_report_link_inc_quota(report_cnt);
1632 			ull_df_iq_report_mem_release(rx_free);
1633 			ull_df_rx_iq_report_alloc(report_cnt);
1634 		}
1635 		break;
1636 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
1637 
1638 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_CONN_ISO)
1639 		case NODE_RX_TYPE_TERMINATE:
1640 		{
1641 			if (IS_ACL_HANDLE(rx_free->hdr.handle)) {
1642 				struct ll_conn *conn;
1643 				memq_link_t *link;
1644 
1645 				conn = ll_conn_get(rx_free->hdr.handle);
1646 
1647 				LL_ASSERT(!conn->lll.link_tx_free);
1648 				link = memq_deinit(&conn->lll.memq_tx.head,
1649 						&conn->lll.memq_tx.tail);
1650 				LL_ASSERT(link);
1651 				conn->lll.link_tx_free = link;
1652 
1653 				ll_conn_release(conn);
1654 			} else if (IS_CIS_HANDLE(rx_free->hdr.handle)) {
1655 				ll_rx_link_quota_inc();
1656 				ll_rx_release(rx_free);
1657 			}
1658 		}
1659 		break;
1660 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_CONN_ISO */
1661 
1662 		case NODE_RX_TYPE_EVENT_DONE:
1663 		default:
1664 			LL_ASSERT(0);
1665 			break;
1666 		}
1667 	}
1668 
1669 	*node_rx = rx;
1670 
1671 	rx_replenish_all();
1672 }
1673 
ll_rx_link_quota_update(int8_t delta)1674 static void ll_rx_link_quota_update(int8_t delta)
1675 {
1676 	LL_ASSERT(delta <= 0 || mem_link_rx.quota_pdu < RX_CNT);
1677 	mem_link_rx.quota_pdu += delta;
1678 }
1679 
ll_rx_link_quota_inc(void)1680 static void ll_rx_link_quota_inc(void)
1681 {
1682 	ll_rx_link_quota_update(1);
1683 }
1684 
ll_rx_link_quota_dec(void)1685 static void ll_rx_link_quota_dec(void)
1686 {
1687 	ll_rx_link_quota_update(-1);
1688 }
1689 
ll_rx_link_alloc(void)1690 void *ll_rx_link_alloc(void)
1691 {
1692 	return mem_acquire(&mem_link_rx.free);
1693 }
1694 
ll_rx_link_release(memq_link_t * link)1695 void ll_rx_link_release(memq_link_t *link)
1696 {
1697 	mem_release(link, &mem_link_rx.free);
1698 }
1699 
ll_rx_alloc(void)1700 void *ll_rx_alloc(void)
1701 {
1702 	return mem_acquire(&mem_pdu_rx.free);
1703 }
1704 
ll_rx_release(void * node_rx)1705 void ll_rx_release(void *node_rx)
1706 {
1707 	mem_release(node_rx, &mem_pdu_rx.free);
1708 }
1709 
ll_rx_put(memq_link_t * link,void * rx)1710 void ll_rx_put(memq_link_t *link, void *rx)
1711 {
1712 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
1713 	struct node_rx_hdr *rx_hdr = rx;
1714 
1715 	/* Serialize Tx ack with Rx enqueue by storing reference to
1716 	 * last element index in Tx ack FIFO.
1717 	 */
1718 	rx_hdr->ack_last = mfifo_fifo_tx_ack.l;
1719 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1720 
1721 	/* Enqueue the Rx object */
1722 	memq_enqueue(link, rx, &memq_ll_rx.tail);
1723 }
1724 
1725 /**
1726  * @brief Permit another loop in the controller thread (prio_recv_thread)
1727  * @details Execution context: ULL mayfly
1728  */
ll_rx_sched(void)1729 void ll_rx_sched(void)
1730 {
1731 	/* sem_recv references the same semaphore (sem_prio_recv)
1732 	 * in prio_recv_thread
1733 	 */
1734 	k_sem_give(sem_recv);
1735 }
1736 
ll_rx_put_sched(memq_link_t * link,void * rx)1737 void ll_rx_put_sched(memq_link_t *link, void *rx)
1738 {
1739 	ll_rx_put(link, rx);
1740 	ll_rx_sched();
1741 }
1742 
1743 #if defined(CONFIG_BT_CONN)
ll_pdu_rx_alloc_peek(uint8_t count)1744 void *ll_pdu_rx_alloc_peek(uint8_t count)
1745 {
1746 	if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
1747 		return NULL;
1748 	}
1749 
1750 	return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
1751 }
1752 
ll_pdu_rx_alloc(void)1753 void *ll_pdu_rx_alloc(void)
1754 {
1755 	return MFIFO_DEQUEUE(ll_pdu_rx_free);
1756 }
1757 #endif /* CONFIG_BT_CONN */
1758 
1759 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
ll_tx_ack_put(uint16_t handle,struct node_tx * node_tx)1760 void ll_tx_ack_put(uint16_t handle, struct node_tx *node_tx)
1761 {
1762 	struct lll_tx *tx;
1763 	uint8_t idx;
1764 
1765 	idx = MFIFO_ENQUEUE_GET(tx_ack, (void **)&tx);
1766 	LL_ASSERT(tx);
1767 
1768 	tx->handle = handle;
1769 	tx->node = node_tx;
1770 
1771 	MFIFO_ENQUEUE(tx_ack, idx);
1772 }
1773 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
1774 
ll_timeslice_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1775 void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
1776 				uint8_t * const ticker_id)
1777 {
1778 	*instance_index = TICKER_INSTANCE_ID_CTLR;
1779 	*ticker_id = (TICKER_NODES - FLASH_TICKER_NODES - COEX_TICKER_NODES);
1780 }
1781 
ll_coex_ticker_id_get(uint8_t * const instance_index,uint8_t * const ticker_id)1782 void ll_coex_ticker_id_get(uint8_t * const instance_index,
1783 				uint8_t * const ticker_id)
1784 {
1785 	*instance_index = TICKER_INSTANCE_ID_CTLR;
1786 	*ticker_id = (TICKER_NODES - COEX_TICKER_NODES);
1787 }
1788 
ll_radio_state_abort(void)1789 void ll_radio_state_abort(void)
1790 {
1791 	static memq_link_t link;
1792 	static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1793 	uint32_t ret;
1794 
1795 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1796 			     &mfy);
1797 	LL_ASSERT(!ret);
1798 }
1799 
ll_radio_state_is_idle(void)1800 uint32_t ll_radio_state_is_idle(void)
1801 {
1802 	return lll_radio_is_idle();
1803 }
1804 
ull_ticker_status_give(uint32_t status,void * param)1805 void ull_ticker_status_give(uint32_t status, void *param)
1806 {
1807 	*((uint32_t volatile *)param) = status;
1808 
1809 	k_sem_give(&sem_ticker_api_cb);
1810 }
1811 
1812 /**
1813  * @brief Take the ticker API semaphore (if applicable) and wait for operation
1814  *        complete.
1815  *
1816  * Waits for ticker operation to complete by taking ticker API semaphore,
1817  * unless the operation was executed inline due to same-priority caller/
1818  * callee id.
1819  *
1820  * In case of asynchronous ticker operation (caller priority !=
1821  * callee priority), the function grabs the semaphore and waits for
1822  * ull_ticker_status_give, which assigns the ret_cb variable and releases
1823  * the semaphore.
1824  *
1825  * In case of synchronous ticker operation, the result is already known at
1826  * entry, and semaphore is only taken if ret_cb has been updated. This is done
1827  * to balance take/give counts. If *ret_cb is still TICKER_STATUS_BUSY, but
1828  * ret is not, the ticker operation has failed early, and no callback will be
1829  * invoked. In this case the semaphore shall not be taken.
1830  *
1831  * @param ret    Return value from ticker API call:
1832  *               TICKER_STATUS_BUSY:    Ticker operation is queued
1833  *               TICKER_STATUS_SUCCESS: Operation completed OK
1834  *               TICKER_STATUS_FAILURE: Operation failed
1835  *
1836  * @param ret_cb Pointer to user data passed to ticker operation
1837  *               callback, which holds the operation result. Value
1838  *               upon entry:
1839  *               TICKER_STATUS_BUSY:    Ticker has not yet called CB
1840  *               TICKER_STATUS_SUCCESS: Operation completed OK via CB
1841  *               TICKER_STATUS_FAILURE: Operation failed via CB
1842  *
1843  *               NOTE: For correct operation, *ret_cb must be initialized
1844  *               to TICKER_STATUS_BUSY before initiating the ticker API call.
1845  *
1846  * @return uint32_t Returns result of completed ticker operation
1847  */
ull_ticker_status_take(uint32_t ret,uint32_t volatile * ret_cb)1848 uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
1849 {
1850 	if ((ret == TICKER_STATUS_BUSY) || (*ret_cb != TICKER_STATUS_BUSY)) {
1851 		/* Operation is either pending of completed via callback
1852 		 * prior to this function call. Take the semaphore and wait,
1853 		 * or take it to balance take/give counting.
1854 		 */
1855 		k_sem_take(&sem_ticker_api_cb, K_FOREVER);
1856 		return *ret_cb;
1857 	}
1858 
1859 	return ret;
1860 }
1861 
ull_disable_mark(void * param)1862 void *ull_disable_mark(void *param)
1863 {
1864 	return mark_set(&mark_disable, param);
1865 }
1866 
ull_disable_unmark(void * param)1867 void *ull_disable_unmark(void *param)
1868 {
1869 	return mark_unset(&mark_disable, param);
1870 }
1871 
ull_disable_mark_get(void)1872 void *ull_disable_mark_get(void)
1873 {
1874 	return mark_get(mark_disable);
1875 }
1876 
1877 /**
1878  * @brief Stops a specified ticker using the ull_disable_(un)mark functions.
1879  *
1880  * @param ticker_handle The handle of the ticker.
1881  * @param param         The object to mark.
1882  * @param lll_disable   Optional object when calling @ref ull_disable
1883  *
1884  * @return 0 if success, else ERRNO.
1885  */
ull_ticker_stop_with_mark(uint8_t ticker_handle,void * param,void * lll_disable)1886 int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param,
1887 			      void *lll_disable)
1888 {
1889 	uint32_t volatile ret_cb;
1890 	uint32_t ret;
1891 	void *mark;
1892 	int err;
1893 
1894 	mark = ull_disable_mark(param);
1895 	if (mark != param) {
1896 		return -ENOLCK;
1897 	}
1898 
1899 	ret_cb = TICKER_STATUS_BUSY;
1900 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1901 			  ticker_handle, ull_ticker_status_give,
1902 			  (void *)&ret_cb);
1903 	ret = ull_ticker_status_take(ret, &ret_cb);
1904 	if (ret) {
1905 		mark = ull_disable_unmark(param);
1906 		if (mark != param) {
1907 			return -ENOLCK;
1908 		}
1909 
1910 		return -EALREADY;
1911 	}
1912 
1913 	err = ull_disable(lll_disable);
1914 
1915 	mark = ull_disable_unmark(param);
1916 	if (mark != param) {
1917 		return -ENOLCK;
1918 	}
1919 
1920 	if (err && (err != -EALREADY)) {
1921 		return err;
1922 	}
1923 
1924 	return 0;
1925 }
1926 
1927 #if defined(CONFIG_BT_CONN)
ull_update_mark(void * param)1928 void *ull_update_mark(void *param)
1929 {
1930 	return mark_set(&mark_update, param);
1931 }
1932 
ull_update_unmark(void * param)1933 void *ull_update_unmark(void *param)
1934 {
1935 	return mark_unset(&mark_update, param);
1936 }
1937 
ull_update_mark_get(void)1938 void *ull_update_mark_get(void)
1939 {
1940 	return mark_get(mark_update);
1941 }
1942 #endif /* CONFIG_BT_CONN */
1943 
ull_disable(void * lll)1944 int ull_disable(void *lll)
1945 {
1946 	static memq_link_t link;
1947 	static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1948 	struct ull_hdr *hdr;
1949 	struct k_sem sem;
1950 	uint32_t ret;
1951 
1952 	hdr = HDR_LLL2ULL(lll);
1953 	if (!ull_ref_get(hdr)) {
1954 		return -EALREADY;
1955 	}
1956 	cpu_dmb(); /* Ensure synchronized data access */
1957 
1958 	k_sem_init(&sem, 0, 1);
1959 
1960 	hdr->disabled_param = &sem;
1961 	hdr->disabled_cb = disabled_cb;
1962 
1963 	cpu_dmb(); /* Ensure synchronized data access */
1964 
1965 	/* ULL_HIGH can run after we have call `ull_ref_get` and it can
1966 	 * decrement the ref count. Hence, handle this race condition by
1967 	 * ensuring that `disabled_cb` has been set while the ref count is still
1968 	 * set.
1969 	 * No need to call `lll_disable` and take the semaphore thereafter if
1970 	 * reference count is zero.
1971 	 * If the `sem` is given when reference count was decremented, we do not
1972 	 * care.
1973 	 */
1974 	if (!ull_ref_get(hdr)) {
1975 		return -EALREADY;
1976 	}
1977 
1978 	mfy.param = lll;
1979 	ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0,
1980 			     &mfy);
1981 	LL_ASSERT(!ret);
1982 
1983 	return k_sem_take(&sem, ULL_DISABLE_TIMEOUT);
1984 }
1985 
ull_pdu_rx_alloc_peek(uint8_t count)1986 void *ull_pdu_rx_alloc_peek(uint8_t count)
1987 {
1988 	if (count > MFIFO_AVAIL_COUNT_GET(pdu_rx_free)) {
1989 		return NULL;
1990 	}
1991 
1992 	return MFIFO_DEQUEUE_PEEK(pdu_rx_free);
1993 }
1994 
ull_pdu_rx_alloc_peek_iter(uint8_t * idx)1995 void *ull_pdu_rx_alloc_peek_iter(uint8_t *idx)
1996 {
1997 	return *(void **)MFIFO_DEQUEUE_ITER_GET(pdu_rx_free, idx);
1998 }
1999 
ull_pdu_rx_alloc(void)2000 void *ull_pdu_rx_alloc(void)
2001 {
2002 	return MFIFO_DEQUEUE(pdu_rx_free);
2003 }
2004 
ull_rx_put(memq_link_t * link,void * rx)2005 void ull_rx_put(memq_link_t *link, void *rx)
2006 {
2007 #if defined(CONFIG_BT_CONN)
2008 	struct node_rx_hdr *rx_hdr = rx;
2009 
2010 	/* Serialize Tx ack with Rx enqueue by storing reference to
2011 	 * last element index in Tx ack FIFO.
2012 	 */
2013 	rx_hdr->ack_last = ull_conn_ack_last_idx_get();
2014 #endif /* CONFIG_BT_CONN */
2015 
2016 	/* Enqueue the Rx object */
2017 	memq_enqueue(link, rx, &memq_ull_rx.tail);
2018 }
2019 
ull_rx_sched(void)2020 void ull_rx_sched(void)
2021 {
2022 	static memq_link_t link;
2023 	static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2024 
2025 	/* Kick the ULL (using the mayfly, tailchain it) */
2026 	mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
2027 }
2028 
ull_rx_put_sched(memq_link_t * link,void * rx)2029 void ull_rx_put_sched(memq_link_t *link, void *rx)
2030 {
2031 	ull_rx_put(link, rx);
2032 	ull_rx_sched();
2033 }
2034 
ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,struct lll_prepare_param * prepare_param,lll_prepare_cb_t prepare_cb,uint8_t is_resume)2035 struct lll_event *ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,
2036 				      lll_abort_cb_t abort_cb,
2037 				      struct lll_prepare_param *prepare_param,
2038 				      lll_prepare_cb_t prepare_cb,
2039 				      uint8_t is_resume)
2040 {
2041 	struct lll_event *e;
2042 	uint8_t idx;
2043 
2044 	idx = MFIFO_ENQUEUE_GET(prep, (void **)&e);
2045 	if (!e) {
2046 		return NULL;
2047 	}
2048 
2049 	memcpy(&e->prepare_param, prepare_param, sizeof(e->prepare_param));
2050 	e->prepare_cb = prepare_cb;
2051 	e->is_abort_cb = is_abort_cb;
2052 	e->abort_cb = abort_cb;
2053 	e->is_resume = is_resume;
2054 	e->is_aborted = 0U;
2055 
2056 	MFIFO_ENQUEUE(prep, idx);
2057 
2058 	return e;
2059 }
2060 
ull_prepare_dequeue_get(void)2061 void *ull_prepare_dequeue_get(void)
2062 {
2063 	return MFIFO_DEQUEUE_GET(prep);
2064 }
2065 
ull_prepare_dequeue_iter(uint8_t * idx)2066 void *ull_prepare_dequeue_iter(uint8_t *idx)
2067 {
2068 	return MFIFO_DEQUEUE_ITER_GET(prep, idx);
2069 }
2070 
ull_prepare_dequeue(uint8_t caller_id)2071 void ull_prepare_dequeue(uint8_t caller_id)
2072 {
2073 	void *param_normal_head = NULL;
2074 	void *param_normal_next = NULL;
2075 	void *param_resume_head = NULL;
2076 	void *param_resume_next = NULL;
2077 	struct lll_event *next;
2078 	uint8_t loop;
2079 
2080 	/* Development assertion check to ensure the below loop processing
2081 	 * has a limit.
2082 	 *
2083 	 * Only 2 scanner and 1 advertiser (directed adv) gets enqueue back:
2084 	 *
2085 	 * Already in queue max 7 (EVENT_PIPELINE_MAX):
2086 	 *  - 2 continuous scan prepare in queue (1M and Coded PHY)
2087 	 *  - 2 continuous scan resume in queue (1M and Coded PHY)
2088 	 *  - 1 directed adv prepare
2089 	 *  - 1 directed adv resume
2090 	 *  - 1 any other role with time reservation
2091 	 *
2092 	 * The loop removes the duplicates (scan and advertiser) with is_aborted
2093 	 * flag set in 7 iterations:
2094 	 *  - 1 scan prepare (1M)
2095 	 *  - 1 scan prepare (Coded PHY)
2096 	 *  - 1 directed adv prepare
2097 	 *
2098 	 * and has enqueue the following in these 7 iterations:
2099 	 *  - 1 scan resume (1M)
2100 	 *  - 1 scan resume (Coded PHY)
2101 	 *  - 1 directed adv resume
2102 	 *
2103 	 * Hence, it should be (EVENT_PIPELINE_MAX + 3U) iterations max.
2104 	 */
2105 	loop = (EVENT_PIPELINE_MAX + 3U);
2106 
2107 	next = ull_prepare_dequeue_get();
2108 	while (next) {
2109 		void *param = next->prepare_param.param;
2110 		uint8_t is_aborted = next->is_aborted;
2111 		uint8_t is_resume = next->is_resume;
2112 
2113 		/* Assert if we exceed iterations processing the prepare queue
2114 		 */
2115 		LL_ASSERT(loop);
2116 		loop--;
2117 
2118 		/* Let LLL invoke the `prepare` interface if radio not in active
2119 		 * use. Otherwise, enqueue at end of the prepare pipeline queue.
2120 		 */
2121 		if (!is_aborted) {
2122 			static memq_link_t link;
2123 			static struct mayfly mfy = {0, 0, &link, NULL,
2124 						    lll_resume};
2125 			uint32_t ret;
2126 
2127 			mfy.param = next;
2128 			ret = mayfly_enqueue(caller_id, TICKER_USER_ID_LLL, 0,
2129 					     &mfy);
2130 			LL_ASSERT(!ret);
2131 		}
2132 
2133 		MFIFO_DEQUEUE(prep);
2134 
2135 		/* Check for anymore more prepare elements in queue */
2136 		next = ull_prepare_dequeue_get();
2137 		if (!next) {
2138 			break;
2139 		}
2140 
2141 		/* A valid prepare element has its `prepare` invoked or was
2142 		 * enqueued back into prepare pipeline.
2143 		 */
2144 		if (!is_aborted) {
2145 			/* The prepare element was not a resume event, it would
2146 			 * use the radio or was enqueued back into prepare
2147 			 * pipeline with a preempt timeout being set.
2148 			 *
2149 			 * Remember the first encountered and the next element
2150 			 * in the prepare pipeline so that we do not infinitely
2151 			 * loop through the resume events in prepare pipeline.
2152 			 */
2153 			if (!is_resume) {
2154 				if (!param_normal_head) {
2155 					param_normal_head = param;
2156 				} else if (!param_normal_next) {
2157 					param_normal_next = param;
2158 				}
2159 			} else {
2160 				if (!param_resume_head) {
2161 					param_resume_head = param;
2162 				} else if (!param_resume_next) {
2163 					param_resume_next = param;
2164 				}
2165 			}
2166 
2167 			/* Stop traversing the prepare pipeline when we reach
2168 			 * back to the first or next event where we
2169 			 * initially started processing the prepare pipeline.
2170 			 */
2171 			if (!next->is_aborted &&
2172 			    ((!next->is_resume &&
2173 			      ((next->prepare_param.param ==
2174 				param_normal_head) ||
2175 			       (next->prepare_param.param ==
2176 				param_normal_next))) ||
2177 			     (next->is_resume &&
2178 			      !param_normal_next &&
2179 			      ((next->prepare_param.param ==
2180 				param_resume_head) ||
2181 			       (next->prepare_param.param ==
2182 				param_resume_next))))) {
2183 				break;
2184 			}
2185 		}
2186 	}
2187 }
2188 
ull_event_done_extra_get(void)2189 struct event_done_extra *ull_event_done_extra_get(void)
2190 {
2191 	struct node_rx_event_done *evdone;
2192 
2193 	evdone = MFIFO_DEQUEUE_PEEK(done);
2194 	if (!evdone) {
2195 		return NULL;
2196 	}
2197 
2198 	return &evdone->extra;
2199 }
2200 
ull_done_extra_type_set(uint8_t type)2201 struct event_done_extra *ull_done_extra_type_set(uint8_t type)
2202 {
2203 	struct event_done_extra *extra;
2204 
2205 	extra = ull_event_done_extra_get();
2206 	if (!extra) {
2207 		return NULL;
2208 	}
2209 
2210 	extra->type = type;
2211 
2212 	return extra;
2213 }
2214 
ull_event_done(void * param)2215 void *ull_event_done(void *param)
2216 {
2217 	struct node_rx_event_done *evdone;
2218 	memq_link_t *link;
2219 
2220 	/* Obtain new node that signals "Done of an RX-event".
2221 	 * Obtain this by dequeuing from the global 'mfifo_done' queue.
2222 	 * Note that 'mfifo_done' is a queue of pointers, not of
2223 	 * struct node_rx_event_done
2224 	 */
2225 	evdone = MFIFO_DEQUEUE(done);
2226 	if (!evdone) {
2227 		/* Not fatal if we can not obtain node, though
2228 		 * we will loose the packets in software stack.
2229 		 * If this happens during Conn Upd, this could cause LSTO
2230 		 */
2231 		return NULL;
2232 	}
2233 
2234 	link = evdone->hdr.link;
2235 	evdone->hdr.link = NULL;
2236 
2237 	evdone->hdr.type = NODE_RX_TYPE_EVENT_DONE;
2238 	evdone->param = param;
2239 
2240 	ull_rx_put_sched(link, evdone);
2241 
2242 	return evdone;
2243 }
2244 
2245 #if defined(CONFIG_BT_PERIPHERAL) || defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2246 /**
2247  * @brief Extract timing from completed event
2248  *
2249  * @param node_rx_event_done[in] Done event containing fresh timing information
2250  * @param ticks_drift_plus[out]  Positive part of drift uncertainty window
2251  * @param ticks_drift_minus[out] Negative part of drift uncertainty window
2252  */
ull_drift_ticks_get(struct node_rx_event_done * done,uint32_t * ticks_drift_plus,uint32_t * ticks_drift_minus)2253 void ull_drift_ticks_get(struct node_rx_event_done *done,
2254 			 uint32_t *ticks_drift_plus,
2255 			 uint32_t *ticks_drift_minus)
2256 {
2257 	uint32_t start_to_address_expected_us;
2258 	uint32_t start_to_address_actual_us;
2259 	uint32_t window_widening_event_us;
2260 	uint32_t preamble_to_addr_us;
2261 
2262 	start_to_address_actual_us =
2263 		done->extra.drift.start_to_address_actual_us;
2264 	window_widening_event_us =
2265 		done->extra.drift.window_widening_event_us;
2266 	preamble_to_addr_us =
2267 		done->extra.drift.preamble_to_addr_us;
2268 
2269 	start_to_address_expected_us = EVENT_JITTER_US +
2270 				       EVENT_TICKER_RES_MARGIN_US +
2271 				       window_widening_event_us +
2272 				       preamble_to_addr_us;
2273 
2274 	if (start_to_address_actual_us <= start_to_address_expected_us) {
2275 		*ticks_drift_plus =
2276 			HAL_TICKER_US_TO_TICKS(window_widening_event_us);
2277 		*ticks_drift_minus =
2278 			HAL_TICKER_US_TO_TICKS((start_to_address_expected_us -
2279 					       start_to_address_actual_us));
2280 	} else {
2281 		*ticks_drift_plus =
2282 			HAL_TICKER_US_TO_TICKS(start_to_address_actual_us);
2283 		*ticks_drift_minus =
2284 			HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US +
2285 					       EVENT_TICKER_RES_MARGIN_US +
2286 					       preamble_to_addr_us);
2287 	}
2288 }
2289 #endif /* CONFIG_BT_PERIPHERAL || CONFIG_BT_CTLR_SYNC_PERIODIC */
2290 
init_reset(void)2291 static inline int init_reset(void)
2292 {
2293 	memq_link_t *link;
2294 
2295 	/* Initialize and allocate done pool */
2296 	RXFIFO_INIT_ALLOC(done);
2297 
2298 	/* Initialize rx pool. */
2299 	mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
2300 		 sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE),
2301 		 &mem_pdu_rx.free);
2302 
2303 	/* Initialize rx link pool. */
2304 	mem_init(mem_link_rx.pool, sizeof(memq_link_t),
2305 		 sizeof(mem_link_rx.pool) / sizeof(memq_link_t),
2306 		 &mem_link_rx.free);
2307 
2308 	/* Acquire a link to initialize ull rx memq */
2309 	link = mem_acquire(&mem_link_rx.free);
2310 	LL_ASSERT(link);
2311 
2312 	/* Initialize ull rx memq */
2313 	MEMQ_INIT(ull_rx, link);
2314 
2315 	/* Acquire a link to initialize ll rx memq */
2316 	link = mem_acquire(&mem_link_rx.free);
2317 	LL_ASSERT(link);
2318 
2319 	/* Initialize ll rx memq */
2320 	MEMQ_INIT(ll_rx, link);
2321 
2322 	/* Allocate rx free buffers */
2323 	mem_link_rx.quota_pdu = RX_CNT;
2324 	rx_replenish_all();
2325 
2326 #if (defined(CONFIG_BT_BROADCASTER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
2327 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2328 	defined(CONFIG_BT_CTLR_SYNC_PERIODIC) || \
2329 	defined(CONFIG_BT_CONN)
2330 	/* Initialize channel map */
2331 	ull_chan_reset();
2332 #endif /* (CONFIG_BT_BROADCASTER && CONFIG_BT_CTLR_ADV_EXT) ||
2333 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
2334 	* CONFIG_BT_CTLR_SYNC_PERIODIC ||
2335 	* CONFIG_BT_CONN
2336 	*/
2337 
2338 	return 0;
2339 }
2340 
perform_lll_reset(void * param)2341 static void perform_lll_reset(void *param)
2342 {
2343 	int err;
2344 
2345 	/* Reset LLL */
2346 	err = lll_reset();
2347 	LL_ASSERT(!err);
2348 
2349 #if defined(CONFIG_BT_BROADCASTER)
2350 	/* Reset adv state */
2351 	err = lll_adv_reset();
2352 	LL_ASSERT(!err);
2353 #endif /* CONFIG_BT_BROADCASTER */
2354 
2355 #if defined(CONFIG_BT_OBSERVER)
2356 	/* Reset scan state */
2357 	err = lll_scan_reset();
2358 	LL_ASSERT(!err);
2359 #endif /* CONFIG_BT_OBSERVER */
2360 
2361 #if defined(CONFIG_BT_CONN)
2362 	/* Reset conn role */
2363 	err = lll_conn_reset();
2364 	LL_ASSERT(!err);
2365 #endif /* CONFIG_BT_CONN */
2366 
2367 #if defined(CONFIG_BT_CTLR_DF)
2368 	err = lll_df_reset();
2369 	LL_ASSERT(!err);
2370 #endif /* CONFIG_BT_CTLR_DF */
2371 
2372 #if !defined(CONFIG_BT_CTLR_ZLI)
2373 	k_sem_give(param);
2374 #endif /* !CONFIG_BT_CTLR_ZLI */
2375 }
2376 
mark_set(void ** m,void * param)2377 static inline void *mark_set(void **m, void *param)
2378 {
2379 	if (!*m) {
2380 		*m = param;
2381 	}
2382 
2383 	return *m;
2384 }
2385 
mark_unset(void ** m,void * param)2386 static inline void *mark_unset(void **m, void *param)
2387 {
2388 	if (*m && *m == param) {
2389 		*m = NULL;
2390 
2391 		return param;
2392 	}
2393 
2394 	return NULL;
2395 }
2396 
mark_get(void * m)2397 static inline void *mark_get(void *m)
2398 {
2399 	return m;
2400 }
2401 
rx_replenish(uint8_t max)2402 static void rx_replenish(uint8_t max)
2403 {
2404 	uint8_t idx;
2405 
2406 	if (max > mem_link_rx.quota_pdu) {
2407 		max = mem_link_rx.quota_pdu;
2408 	}
2409 
2410 	while (max && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
2411 		memq_link_t *link;
2412 		struct node_rx_hdr *rx;
2413 
2414 		link = mem_acquire(&mem_link_rx.free);
2415 		if (!link) {
2416 			return;
2417 		}
2418 
2419 		rx = mem_acquire(&mem_pdu_rx.free);
2420 		if (!rx) {
2421 			ll_rx_link_release(link);
2422 			return;
2423 		}
2424 
2425 		rx->link = link;
2426 
2427 		MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
2428 
2429 		ll_rx_link_quota_dec();
2430 
2431 		max--;
2432 	}
2433 
2434 #if defined(CONFIG_BT_CONN)
2435 	if (!max) {
2436 		return;
2437 	}
2438 
2439 	/* Replenish the ULL to LL/HCI free Rx PDU queue after LLL to ULL free
2440 	 * Rx PDU queue has been filled.
2441 	 */
2442 	while (mem_link_rx.quota_pdu &&
2443 	       MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
2444 		memq_link_t *link;
2445 		struct node_rx_hdr *rx;
2446 
2447 		link = mem_acquire(&mem_link_rx.free);
2448 		if (!link) {
2449 			return;
2450 		}
2451 
2452 		rx = mem_acquire(&mem_pdu_rx.free);
2453 		if (!rx) {
2454 			ll_rx_link_release(link);
2455 			return;
2456 		}
2457 
2458 		link->mem = NULL;
2459 		rx->link = link;
2460 
2461 		MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
2462 
2463 		ll_rx_link_quota_dec();
2464 	}
2465 #endif /* CONFIG_BT_CONN */
2466 }
2467 
rx_replenish_all(void)2468 static void rx_replenish_all(void)
2469 {
2470 	rx_replenish(UINT8_MAX);
2471 }
2472 
2473 #if defined(CONFIG_BT_CONN) || \
2474 	(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
2475 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2476 	defined(CONFIG_BT_CTLR_ADV_ISO)
2477 
rx_replenish_one(void)2478 static void rx_replenish_one(void)
2479 {
2480 	rx_replenish(1U);
2481 }
2482 
rx_release_replenish(struct node_rx_hdr * rx)2483 static void rx_release_replenish(struct node_rx_hdr *rx)
2484 {
2485 	ll_rx_release(rx);
2486 	rx_replenish_one();
2487 }
2488 
rx_link_dequeue_release_quota_inc(memq_link_t * link)2489 static void rx_link_dequeue_release_quota_inc(memq_link_t *link)
2490 {
2491 	(void)memq_dequeue(memq_ll_rx.tail,
2492 			   &memq_ll_rx.head, NULL);
2493 	ll_rx_link_release(link);
2494 	ll_rx_link_quota_inc();
2495 }
2496 #endif /* CONFIG_BT_CONN ||
2497 	* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
2498 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
2499 	* CONFIG_BT_CTLR_ADV_ISO
2500 	*/
2501 
rx_demux(void * param)2502 static void rx_demux(void *param)
2503 {
2504 	memq_link_t *link;
2505 
2506 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2507 	do {
2508 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2509 		struct node_rx_hdr *rx;
2510 
2511 		link = memq_peek(memq_ull_rx.head, memq_ull_rx.tail,
2512 				 (void **)&rx);
2513 		if (link) {
2514 #if defined(CONFIG_BT_CONN)
2515 			struct node_tx *node_tx;
2516 			memq_link_t *link_tx;
2517 			uint16_t handle; /* Handle to Ack TX */
2518 #endif /* CONFIG_BT_CONN */
2519 
2520 			LL_ASSERT(rx);
2521 
2522 #if defined(CONFIG_BT_CONN)
2523 			link_tx = ull_conn_ack_by_last_peek(rx->ack_last,
2524 							    &handle, &node_tx);
2525 			if (link_tx) {
2526 				rx_demux_conn_tx_ack(rx->ack_last, handle,
2527 						     link_tx, node_tx);
2528 			} else
2529 #endif /* CONFIG_BT_CONN */
2530 			{
2531 				rx_demux_rx(link, rx);
2532 			}
2533 
2534 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2535 			rx_demux_yield();
2536 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
2537 
2538 #if defined(CONFIG_BT_CONN)
2539 		} else {
2540 			struct node_tx *node_tx;
2541 			uint8_t ack_last;
2542 			uint16_t handle;
2543 
2544 			link = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2545 			if (link) {
2546 				rx_demux_conn_tx_ack(ack_last, handle,
2547 						      link, node_tx);
2548 
2549 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2550 				rx_demux_yield();
2551 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2552 
2553 			}
2554 #endif /* CONFIG_BT_CONN */
2555 		}
2556 
2557 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2558 	} while (link);
2559 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2560 }
2561 
2562 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
rx_demux_yield(void)2563 static void rx_demux_yield(void)
2564 {
2565 	static memq_link_t link;
2566 	static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
2567 	struct node_rx_hdr *rx;
2568 	memq_link_t *link_peek;
2569 
2570 	link_peek = memq_peek(memq_ull_rx.head, memq_ull_rx.tail, (void **)&rx);
2571 	if (!link_peek) {
2572 #if defined(CONFIG_BT_CONN)
2573 		struct node_tx *node_tx;
2574 		uint8_t ack_last;
2575 		uint16_t handle;
2576 
2577 		link_peek = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
2578 		if (!link_peek) {
2579 			return;
2580 		}
2581 #else /* !CONFIG_BT_CONN */
2582 		return;
2583 #endif /* !CONFIG_BT_CONN */
2584 	}
2585 
2586 	/* Kick the ULL (using the mayfly, tailchain it) */
2587 	mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_HIGH, 1,
2588 		       &mfy);
2589 }
2590 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2591 
2592 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
tx_cmplt_get(uint16_t * handle,uint8_t * first,uint8_t last)2593 static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
2594 {
2595 	struct lll_tx *tx;
2596 	uint8_t cmplt;
2597 	uint8_t next;
2598 
2599 	next = *first;
2600 	tx = mfifo_dequeue_iter_get(mfifo_fifo_tx_ack.m, mfifo_tx_ack.s,
2601 				    mfifo_tx_ack.n, mfifo_fifo_tx_ack.f, last,
2602 				    &next);
2603 	if (!tx) {
2604 		return 0;
2605 	}
2606 
2607 	*handle = tx->handle;
2608 	cmplt = 0U;
2609 	do {
2610 		if (false) {
2611 #if defined(CONFIG_BT_CTLR_ADV_ISO) || \
2612 	defined(CONFIG_BT_CTLR_CONN_ISO)
2613 		} else if (IS_CIS_HANDLE(tx->handle) ||
2614 			   IS_ADV_ISO_HANDLE(tx->handle)) {
2615 			struct node_tx_iso *tx_node;
2616 			uint8_t sdu_fragments;
2617 
2618 			/* NOTE: tx_cmplt_get() is permitted to be called
2619 			 *       multiple times before the tx_ack queue which is
2620 			 *       associated with Rx queue is changed by the
2621 			 *       dequeue of Rx node.
2622 			 *
2623 			 *       Tx node is released early without waiting for
2624 			 *       any dependency on Rx queue. Released Tx node
2625 			 *       reference is overloaded to store the Tx
2626 			 *       fragments count.
2627 			 *
2628 			 *       A hack is used here that depends on the fact
2629 			 *       that memory addresses have a value greater than
2630 			 *       0xFF, to determined if a node Tx has been
2631 			 *       released in a prior iteration of this function.
2632 			 */
2633 
2634 			/* We must count each SDU HCI fragment */
2635 			tx_node = tx->node;
2636 			if (IS_NODE_TX_PTR(tx_node)) {
2637 				/* We count each SDU fragment completed
2638 				 * by this PDU.
2639 				 */
2640 				sdu_fragments = tx_node->sdu_fragments;
2641 
2642 				/* Replace node reference with fragments
2643 				 * count
2644 				 */
2645 				NODE_TX_FRAGMENTS_SET(tx->node, sdu_fragments);
2646 
2647 				/* Release node as its a reference and not
2648 				 * fragments count.
2649 				 */
2650 				ll_iso_link_tx_release(tx_node->link);
2651 				ll_iso_tx_mem_release(tx_node);
2652 			} else {
2653 				/* Get SDU fragments count from the encoded
2654 				 * node reference value.
2655 				 */
2656 				sdu_fragments = NODE_TX_FRAGMENTS_GET(tx_node);
2657 			}
2658 
2659 			/* Accumulate the tx acknowledgements */
2660 			cmplt += sdu_fragments;
2661 
2662 			goto next_ack;
2663 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2664 
2665 #if defined(CONFIG_BT_CONN)
2666 		} else {
2667 			struct node_tx *tx_node;
2668 			struct pdu_data *p;
2669 
2670 			/* NOTE: tx_cmplt_get() is permitted to be called
2671 			 *       multiple times before the tx_ack queue which is
2672 			 *       associated with Rx queue is changed by the
2673 			 *       dequeue of Rx node.
2674 			 *
2675 			 *       Tx node is released early without waiting for
2676 			 *       any dependency on Rx queue. Released Tx node
2677 			 *       reference is overloaded to store whether
2678 			 *       packet with data or control was released.
2679 			 *
2680 			 *       A hack is used here that depends on the fact
2681 			 *       that memory addresses have a value greater than
2682 			 *       0xFF, to determined if a node Tx has been
2683 			 *       released in a prior iteration of this function.
2684 			 */
2685 			tx_node = tx->node;
2686 			p = (void *)tx_node->pdu;
2687 			if (!tx_node ||
2688 			    (IS_NODE_TX_PTR(tx_node) &&
2689 			     (p->ll_id == PDU_DATA_LLID_DATA_START ||
2690 			      p->ll_id == PDU_DATA_LLID_DATA_CONTINUE)) ||
2691 			    (!IS_NODE_TX_PTR(tx_node) &&
2692 			     IS_NODE_TX_DATA(tx_node))) {
2693 				/* data packet, hence count num cmplt */
2694 				NODE_TX_DATA_SET(tx->node);
2695 				cmplt++;
2696 			} else {
2697 				/* ctrl packet or flushed, hence dont count num
2698 				 * cmplt
2699 				 */
2700 				NODE_TX_CTRL_SET(tx->node);
2701 			}
2702 
2703 			if (IS_NODE_TX_PTR(tx_node)) {
2704 				ll_tx_mem_release(tx_node);
2705 			}
2706 #endif /* CONFIG_BT_CONN */
2707 
2708 		}
2709 
2710 #if defined(CONFIG_BT_CTLR_ADV_ISO) || \
2711 	defined(CONFIG_BT_CTLR_CONN_ISO)
2712 next_ack:
2713 #endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
2714 
2715 		*first = next;
2716 		tx = mfifo_dequeue_iter_get(mfifo_fifo_tx_ack.m, mfifo_tx_ack.s,
2717 					    mfifo_tx_ack.n, mfifo_fifo_tx_ack.f,
2718 					    last, &next);
2719 	} while (tx && tx->handle == *handle);
2720 
2721 	return cmplt;
2722 }
2723 
rx_demux_conn_tx_ack(uint8_t ack_last,uint16_t handle,memq_link_t * link,struct node_tx * node_tx)2724 static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
2725 					memq_link_t *link,
2726 					struct node_tx *node_tx)
2727 {
2728 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2729 	do {
2730 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2731 		/* Dequeue node */
2732 		ull_conn_ack_dequeue();
2733 
2734 		/* Process Tx ack */
2735 		ull_conn_tx_ack(handle, link, node_tx);
2736 
2737 		/* Release link mem */
2738 		ull_conn_link_tx_release(link);
2739 
2740 		/* check for more rx ack */
2741 		link = ull_conn_ack_by_last_peek(ack_last, &handle, &node_tx);
2742 
2743 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
2744 		if (!link)
2745 #else /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2746 	} while (link);
2747 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
2748 
2749 		{
2750 			/* trigger thread to call ll_rx_get() */
2751 			ll_rx_sched();
2752 		}
2753 }
2754 #endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
2755 
2756 /**
2757  * @brief Dispatch rx objects
2758  * @details Rx objects are only peeked, not dequeued yet.
2759  *   Execution context: ULL high priority Mayfly
2760  */
rx_demux_rx(memq_link_t * link,struct node_rx_hdr * rx)2761 static inline void rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx)
2762 {
2763 	/* Demux Rx objects */
2764 	switch (rx->type) {
2765 	case NODE_RX_TYPE_EVENT_DONE:
2766 	{
2767 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2768 		rx_demux_event_done(link, (struct node_rx_event_done *)rx);
2769 	}
2770 	break;
2771 
2772 #if defined(CONFIG_BT_OBSERVER)
2773 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2774 	case NODE_RX_TYPE_EXT_1M_REPORT:
2775 	case NODE_RX_TYPE_EXT_CODED_REPORT:
2776 	case NODE_RX_TYPE_EXT_AUX_REPORT:
2777 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2778 	case NODE_RX_TYPE_SYNC_REPORT:
2779 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2780 	{
2781 		struct pdu_adv *adv;
2782 
2783 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2784 
2785 		adv = (void *)((struct node_rx_pdu *)rx)->pdu;
2786 		if (adv->type != PDU_ADV_TYPE_EXT_IND) {
2787 			ll_rx_put_sched(link, rx);
2788 			break;
2789 		}
2790 
2791 		ull_scan_aux_setup(link, (struct node_rx_pdu *)rx);
2792 	}
2793 	break;
2794 
2795 	case NODE_RX_TYPE_EXT_AUX_RELEASE:
2796 	{
2797 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2798 		ull_scan_aux_release(link, (struct node_rx_pdu *)rx);
2799 	}
2800 	break;
2801 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2802 	case NODE_RX_TYPE_SYNC:
2803 	{
2804 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2805 		ull_sync_established_report(link, (struct node_rx_pdu *)rx);
2806 	}
2807 	break;
2808 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2809 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2810 #endif /* CONFIG_BT_OBSERVER */
2811 
2812 #if defined(CONFIG_BT_CTLR_CONN_ISO)
2813 	case NODE_RX_TYPE_CIS_ESTABLISHED:
2814 	{
2815 		struct ll_conn *conn;
2816 
2817 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2818 
2819 		conn = ll_conn_get(rx->handle);
2820 		if (ull_cp_cc_awaiting_established(conn)) {
2821 			ull_cp_cc_established(conn, BT_HCI_ERR_SUCCESS);
2822 		}
2823 
2824 		rx->type = NODE_RX_TYPE_RELEASE;
2825 		ll_rx_put_sched(link, rx);
2826 	}
2827 	break;
2828 #endif /* CONFIG_BT_CTLR_CONN_ISO */
2829 
2830 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
2831 	defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
2832 	case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
2833 	case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
2834 	case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
2835 	case NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE:
2836 	{
2837 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2838 		ll_rx_put_sched(link, rx);
2839 	}
2840 	break;
2841 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
2842 
2843 #if defined(CONFIG_BT_CONN)
2844 	case NODE_RX_TYPE_CONNECTION:
2845 	{
2846 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2847 		ull_conn_setup(link, (struct node_rx_pdu *)rx);
2848 	}
2849 	break;
2850 
2851 	case NODE_RX_TYPE_DC_PDU:
2852 	{
2853 		ull_conn_rx(link, (struct node_rx_pdu **)&rx);
2854 
2855 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2856 
2857 		/* Only schedule node if not marked as retain by LLCP */
2858 		if (rx && rx->type != NODE_RX_TYPE_RETAIN) {
2859 			ll_rx_put_sched(link, rx);
2860 		}
2861 	}
2862 	break;
2863 
2864 	case NODE_RX_TYPE_TERMINATE:
2865 #endif /* CONFIG_BT_CONN */
2866 
2867 #if defined(CONFIG_BT_OBSERVER) || \
2868 	defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
2869 	defined(CONFIG_BT_CTLR_BROADCAST_ISO) || \
2870 	defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
2871 	defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
2872 	defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
2873 	defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
2874 	defined(CONFIG_BT_CONN)
2875 
2876 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
2877 	case NODE_RX_TYPE_SYNC_CHM_COMPLETE:
2878 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
2879 
2880 #if defined(CONFIG_BT_CTLR_ADV_ISO)
2881 	case NODE_RX_TYPE_BIG_CHM_COMPLETE:
2882 	case NODE_RX_TYPE_BIG_TERMINATE:
2883 #endif /* CONFIG_BT_CTLR_ADV_ISO */
2884 
2885 #if defined(CONFIG_BT_OBSERVER)
2886 	case NODE_RX_TYPE_REPORT:
2887 #endif /* CONFIG_BT_OBSERVER */
2888 
2889 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
2890 	case NODE_RX_TYPE_SCAN_REQ:
2891 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
2892 
2893 #if defined(CONFIG_BT_CTLR_PROFILE_ISR)
2894 	case NODE_RX_TYPE_PROFILE:
2895 #endif /* CONFIG_BT_CTLR_PROFILE_ISR */
2896 
2897 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
2898 	case NODE_RX_TYPE_ADV_INDICATION:
2899 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
2900 
2901 #if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
2902 	case NODE_RX_TYPE_SCAN_INDICATION:
2903 #endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
2904 
2905 	case NODE_RX_TYPE_RELEASE:
2906 	{
2907 		(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
2908 		ll_rx_put_sched(link, rx);
2909 	}
2910 	break;
2911 #endif /* CONFIG_BT_OBSERVER ||
2912 	* CONFIG_BT_CTLR_ADV_PERIODIC ||
2913 	* CONFIG_BT_CTLR_BROADCAST_ISO ||
2914 	* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY ||
2915 	* CONFIG_BT_CTLR_PROFILE_ISR ||
2916 	* CONFIG_BT_CTLR_ADV_INDICATION ||
2917 	* CONFIG_BT_CTLR_SCAN_INDICATION ||
2918 	* CONFIG_BT_CONN
2919 	*/
2920 
2921 	default:
2922 	{
2923 #if defined(CONFIG_BT_CTLR_USER_EXT)
2924 		/* Try proprietary demuxing */
2925 		rx_demux_rx_proprietary(link, rx, memq_ull_rx.tail,
2926 					&memq_ull_rx.head);
2927 #else
2928 		LL_ASSERT(0);
2929 #endif /* CONFIG_BT_CTLR_USER_EXT */
2930 	}
2931 	break;
2932 	}
2933 }
2934 
rx_demux_event_done(memq_link_t * link,struct node_rx_event_done * done)2935 static inline void rx_demux_event_done(memq_link_t *link,
2936 				       struct node_rx_event_done *done)
2937 {
2938 	struct ull_hdr *ull_hdr;
2939 	void *release;
2940 
2941 	/* Decrement prepare reference if ULL will not resume */
2942 	ull_hdr = done->param;
2943 	if (ull_hdr) {
2944 		LL_ASSERT(ull_ref_get(ull_hdr));
2945 		ull_ref_dec(ull_hdr);
2946 	}
2947 
2948 	/* Process role dependent event done */
2949 	switch (done->extra.type) {
2950 #if defined(CONFIG_BT_CONN)
2951 	case EVENT_DONE_EXTRA_TYPE_CONN:
2952 		ull_conn_done(done);
2953 		break;
2954 #endif /* CONFIG_BT_CONN */
2955 
2956 #if defined(CONFIG_BT_BROADCASTER)
2957 #if defined(CONFIG_BT_CTLR_ADV_EXT) || \
2958 	defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2959 	case EVENT_DONE_EXTRA_TYPE_ADV:
2960 		ull_adv_done(done);
2961 		break;
2962 
2963 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2964 	case EVENT_DONE_EXTRA_TYPE_ADV_AUX:
2965 		ull_adv_aux_done(done);
2966 		break;
2967 
2968 #if defined(CONFIG_BT_CTLR_ADV_ISO)
2969 	case EVENT_DONE_EXTRA_TYPE_ADV_ISO_COMPLETE:
2970 		ull_adv_iso_done_complete(done);
2971 		break;
2972 
2973 	case EVENT_DONE_EXTRA_TYPE_ADV_ISO_TERMINATE:
2974 		ull_adv_iso_done_terminate(done);
2975 		break;
2976 #endif /* CONFIG_BT_CTLR_ADV_ISO */
2977 #endif /* CONFIG_BT_CTLR_ADV_EXT */
2978 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
2979 #endif /* CONFIG_BT_BROADCASTER */
2980 
2981 #if defined(CONFIG_BT_CTLR_ADV_EXT)
2982 #if defined(CONFIG_BT_OBSERVER)
2983 	case EVENT_DONE_EXTRA_TYPE_SCAN:
2984 		ull_scan_done(done);
2985 		break;
2986 
2987 	case EVENT_DONE_EXTRA_TYPE_SCAN_AUX:
2988 		ull_scan_aux_done(done);
2989 		break;
2990 
2991 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2992 	case EVENT_DONE_EXTRA_TYPE_SYNC:
2993 		ull_sync_done(done);
2994 		break;
2995 
2996 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
2997 	case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_ESTAB:
2998 		ull_sync_iso_estab_done(done);
2999 		break;
3000 
3001 	case EVENT_DONE_EXTRA_TYPE_SYNC_ISO:
3002 		ull_sync_iso_done(done);
3003 		break;
3004 
3005 	case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_TERMINATE:
3006 		ull_sync_iso_done_terminate(done);
3007 		break;
3008 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
3009 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
3010 #endif /* CONFIG_BT_OBSERVER */
3011 #endif /* CONFIG_BT_CTLR_ADV_EXT */
3012 
3013 #if defined(CONFIG_BT_CTLR_CONN_ISO)
3014 	case EVENT_DONE_EXTRA_TYPE_CIS:
3015 		ull_conn_iso_done(done);
3016 		break;
3017 #endif /* CONFIG_BT_CTLR_CONN_ISO */
3018 
3019 #if defined(CONFIG_BT_CTLR_USER_EXT)
3020 	case EVENT_DONE_EXTRA_TYPE_USER_START
3021 		... EVENT_DONE_EXTRA_TYPE_USER_END:
3022 		ull_proprietary_done(done);
3023 		break;
3024 #endif /* CONFIG_BT_CTLR_USER_EXT */
3025 
3026 	case EVENT_DONE_EXTRA_TYPE_NONE:
3027 		/* ignore */
3028 		break;
3029 
3030 	default:
3031 		LL_ASSERT(0);
3032 		break;
3033 	}
3034 
3035 	/* Release done */
3036 	done->extra.type = 0U;
3037 	release = RXFIFO_RELEASE(done, link, done);
3038 	LL_ASSERT(release == done);
3039 
3040 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
3041 	/* dequeue prepare pipeline */
3042 	ull_prepare_dequeue(TICKER_USER_ID_ULL_HIGH);
3043 
3044 	/* LLL done synchronize count */
3045 	lll_done_ull_inc();
3046 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
3047 
3048 	/* If disable initiated, signal the semaphore */
3049 	if (ull_hdr && !ull_ref_get(ull_hdr) && ull_hdr->disabled_cb) {
3050 		ull_hdr->disabled_cb(ull_hdr->disabled_param);
3051 	}
3052 }
3053 
disabled_cb(void * param)3054 static void disabled_cb(void *param)
3055 {
3056 	k_sem_give(param);
3057 }
3058 
3059 /**
3060  * @brief   Support function for RXFIFO_ALLOC macro
3061  * @details This function allocates up to 'max' number of MFIFO elements by
3062  *          enqueuing pointers to memory elements with associated memq links.
3063  */
ull_rxfifo_alloc(uint8_t s,uint8_t n,uint8_t f,uint8_t * l,uint8_t * m,void * mem_free,void * link_free,uint8_t max)3064 void ull_rxfifo_alloc(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
3065 		      void *mem_free, void *link_free, uint8_t max)
3066 {
3067 	uint8_t idx;
3068 
3069 	while ((max--) && mfifo_enqueue_idx_get(n, f, *l, &idx)) {
3070 		memq_link_t *link;
3071 		struct node_rx_hdr *rx;
3072 
3073 		link = mem_acquire(link_free);
3074 		if (!link) {
3075 			break;
3076 		}
3077 
3078 		rx = mem_acquire(mem_free);
3079 		if (!rx) {
3080 			mem_release(link, link_free);
3081 			break;
3082 		}
3083 
3084 		link->mem = NULL;
3085 		rx->link = link;
3086 
3087 		mfifo_by_idx_enqueue(m, s, idx, rx, l);
3088 	}
3089 }
3090 
3091 /**
3092  * @brief   Support function for RXFIFO_RELEASE macro
3093  * @details This function releases a node by returning it to the FIFO.
3094  */
ull_rxfifo_release(uint8_t s,uint8_t n,uint8_t f,uint8_t * l,uint8_t * m,memq_link_t * link,struct node_rx_hdr * rx)3095 void *ull_rxfifo_release(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
3096 			 memq_link_t *link, struct node_rx_hdr *rx)
3097 {
3098 	uint8_t idx;
3099 
3100 	if (!mfifo_enqueue_idx_get(n, f, *l, &idx)) {
3101 		return NULL;
3102 	}
3103 
3104 	rx->link = link;
3105 
3106 	mfifo_by_idx_enqueue(m, s, idx, rx, l);
3107 
3108 	return rx;
3109 }
3110